Compare commits

..

1 commit

Author SHA1 Message Date
18ab31ad66
update zig-previous workflow in zig 0.14.x branch
All checks were successful
AWS-Zig Build / build-zig-amd64-host (push) Successful in 2m51s
2025-08-22 11:59:17 -07:00
31 changed files with 1969 additions and 2187 deletions

View file

@ -18,9 +18,11 @@ jobs:
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Setup Zig - name: Setup Zig
uses: https://github.com/mlugg/setup-zig@v2.0.5 uses: https://github.com/mlugg/setup-zig@v2.0.1
# We will let setup-zig use minimum_zig_version from build.zig.zon with:
# setup-zig also sets up the zig cache appropriately version: 0.14.0
- name: Restore Zig caches
uses: https://github.com/Hanaasagi/zig-action-cache@3954aae427f8b05914e08dfd79f15e1f2e435929
- name: Ulimit - name: Ulimit
run: ulimit -a run: ulimit -a
- name: Run smoke test - name: Run smoke test

View file

@ -26,9 +26,11 @@ jobs:
with: with:
ref: zig-develop ref: zig-develop
- name: Setup Zig - name: Setup Zig
uses: https://github.com/mlugg/setup-zig@v2.0.5 uses: https://github.com/mlugg/setup-zig@v2.0.1
with: with:
version: master version: master
- name: Restore Zig caches
uses: https://github.com/Hanaasagi/zig-action-cache@3954aae427f8b05914e08dfd79f15e1f2e435929
- name: Run smoke test - name: Run smoke test
run: zig build smoke-test --verbose run: zig build smoke-test --verbose
- name: Run full tests - name: Run full tests

View file

@ -1,5 +1,5 @@
[tools] [tools]
pre-commit = "latest" pre-commit = "latest"
"ubi:DonIsaac/zlint" = "latest" "ubi:DonIsaac/zlint" = "latest"
zig = "0.15.1" zig = "0.14.1"
zls = "0.15.0" zls = "0.14.0"

View file

@ -15,16 +15,19 @@ repos:
- id: zig-build - id: zig-build
- repo: local - repo: local
hooks: hooks:
- id: smoke-test - id: zlint
name: Run zig build smoke-test name: Run zig build smoke-test
entry: zig entry: zig
args: ["build", "--verbose", "smoke-test"] args: ["build", "--verbose", "smoke-test"]
language: system language: system
types: [file] types: [file]
pass_filenames: false pass_filenames: false
- id: zlint
name: Run zlint # - repo: local
entry: zlint # hooks:
args: ["--deny-warnings", "--fix"] # - id: zlint
language: system # name: Run zlint
types: [zig] # entry: zlint
# args: ["--deny-warnings", "--fix"]
# language: system
# types: [zig]

View file

@ -1,17 +1,18 @@
AWS SDK for Zig AWS SDK for Zig
=============== ===============
[Zig 0.15.1](https://ziglang.org/download/#release-0.15.1): [Zig 0.14](https://ziglang.org/download/#release-0.14.0):
[![Build Status: Zig 0.15.1](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/build.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=build.yaml&state=closed) [![Build Status: Zig 0.14.0](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/build.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=build.yaml&state=closed)
[Nightly Zig](https://ziglang.org/download/): [Nightly Zig](https://ziglang.org/download/):
[![Build Status: Zig Nightly](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-nightly.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-nightly.yaml&state=closed) [![Build Status: Zig Nightly](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-nightly.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-nightly.yaml&state=closed)
[Zig 0.14.1](https://ziglang.org/download/#release-0.14.1): [Zig 0.13](https://ziglang.org/download/#release-0.13.0):
[![Build Status: Zig 0.13.0](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-previous.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-previous.yaml&state=closed)
[![Build Status: Zig 0.14.x](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-previous.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-previous.yaml&state=closed)
Current executable size for the demo is 980k after compiling with -Doptimize=ReleaseSmall Current executable size for the demo is 980k after compiling with -Doptimize=ReleaseSmall
in x86_64-linux, and will vary based on services used. Tested targets: in x86_64-linux, and will vary based on services used. Tested targets:
@ -29,15 +30,15 @@ Tested targets are built, but not continuously tested, by CI.
Branches Branches
-------- --------
* **master**: This branch tracks the latest released zig version
* **zig-0.13**: This branch tracks the 0.13 released zig version.
Support for the previous version is best effort, generally
degrading over time. Fixes will generally appear in master, then
backported into the previous version.
* **zig-develop**: This branch tracks zig nightly, and is used mainly as a canary * **zig-develop**: This branch tracks zig nightly, and is used mainly as a canary
for breaking changes that will need to be dealt with when for breaking changes that will need to be dealt with when
a new zig release appears. Expect significant delays in any a new zig release appears. Expect significant delays in any
build failures (PRs always welcome!). build failures (PRs always welcome!).
* **master**: This branch tracks the latest released zig version
* **zig-0.14.x**: This branch tracks the 0.14/0.14.1 released zig versions.
Support for these previous version is best effort, generally
degrading over time. Fixes will generally appear in master, then
backported into the previous version.
Other branches/tags exist but are unsupported Other branches/tags exist but are unsupported

View file

@ -11,20 +11,19 @@
"README.md", "README.md",
"LICENSE", "LICENSE",
}, },
.minimum_zig_version = "0.15.1",
.dependencies = .{ .dependencies = .{
.smithy = .{ .smithy = .{
.url = "git+https://git.lerch.org/lobo/smithy.git#09c0a618877ebaf8e15fbfc505983876f4e063d5", .url = "https://git.lerch.org/lobo/smithy/archive/fd9be1afbfcc60d52896c077d8e9c963bb667bf1.tar.gz",
.hash = "smithy-1.0.0-uAyBgTnTAgBp2v6vypGcK5-YOCtxs2iEqR-4LfC5FTlS", .hash = "smithy-1.0.0-uAyBgZPSAgBHStx7nrj0u3sN66g8Ppnn3XFUEJhn00rP",
}, },
.models = .{ .models = .{
.url = "https://github.com/aws/aws-sdk-go-v2/archive/refs/tags/release-2025-05-05.tar.gz", .url = "https://github.com/aws/aws-sdk-go-v2/archive/refs/tags/release-2025-05-05.tar.gz",
.hash = "N-V-__8AAKWdeiawujEcrfukQbb8lLAiQIRT0uG5gCcm4b7W", .hash = "N-V-__8AAKWdeiawujEcrfukQbb8lLAiQIRT0uG5gCcm4b7W",
}, },
.zeit = .{ .zeit = .{
.url = "git+https://github.com/rockorager/zeit?ref=zig-0.15#ed2ca60db118414bda2b12df2039e33bad3b0b88", .url = "git+https://github.com/rockorager/zeit#f86d568b89a5922f084dae524a1eaf709855cd5e",
.hash = "zeit-0.6.0-5I6bk0J9AgCVa0nnyL0lNY9Xa9F68hHq-ZarhuXNV-Jb", .hash = "zeit-0.6.0-5I6bkzt5AgC1_BCuSzXkV0JHeF4Mhti1Z_jFC7E_nmD2",
}, },
.date = .{ .date = .{
.path = "lib/date", .path = "lib/date",
@ -33,8 +32,8 @@
.path = "lib/json", .path = "lib/json",
}, },
.case = .{ .case = .{
.url = "git+https://github.com/travisstaloch/case.git#f8003fe5f93b65f673d10d41323e347225e8cb87", .url = "git+https://github.com/travisstaloch/case.git#610caade88ca54d2745f115114b08e73e2c6fe02",
.hash = "case-0.0.1-chGYqx_EAADaGJjmoln5M1iMBDTrMdd8to5wdEVpfXm4", .hash = "N-V-__8AAIfIAAC_RzCtghVVBVdqUzB8AaaGIyvK2WWz38bC",
}, },
}, },
} }

View file

@ -1,19 +1,11 @@
.{ .{
.name = .codegen, .name = "aws-zig-codegen",
.version = "0.0.1", .version = "0.0.1",
.paths = .{
"build.zig",
"build.zig.zon",
"src",
"README.md",
"LICENSE",
},
.fingerprint = 0x41c2ec2d551fe279,
.dependencies = .{ .dependencies = .{
.smithy = .{ .smithy = .{
.url = "git+https://git.lerch.org/lobo/smithy.git#09c0a618877ebaf8e15fbfc505983876f4e063d5", .url = "https://git.lerch.org/lobo/smithy/archive/41b61745d25a65817209dd5dddbb5f9b66896a99.tar.gz",
.hash = "smithy-1.0.0-uAyBgTnTAgBp2v6vypGcK5-YOCtxs2iEqR-4LfC5FTlS", .hash = "122087deb0ae309b2258d59b40d82fe5921fdfc35b420bb59033244851f7f276fa34",
}, },
}, },
} }

View file

@ -12,7 +12,7 @@ allocator: std.mem.Allocator,
indent_level: u64, indent_level: u64,
pub fn appendToTypeStack(self: @This(), shape_info: *const smithy.ShapeInfo) !void { pub fn appendToTypeStack(self: @This(), shape_info: *const smithy.ShapeInfo) !void {
try self.type_stack.append(self.allocator, shape_info); try self.type_stack.append(shape_info);
} }
pub fn popFromTypeStack(self: @This()) void { pub fn popFromTypeStack(self: @This()) void {

View file

@ -107,9 +107,8 @@ pub fn computeDirectoryHash(
const arena = arena_instance.allocator(); const arena = arena_instance.allocator();
// Collect all files, recursively, then sort. // Collect all files, recursively, then sort.
// Normally we're looking at around 300 model files var all_files = std.ArrayList(*HashedFile).init(gpa);
var all_files = try std.ArrayList(*HashedFile).initCapacity(gpa, 300); defer all_files.deinit();
defer all_files.deinit(gpa);
var walker = try dir.walk(gpa); var walker = try dir.walk(gpa);
defer walker.deinit(); defer walker.deinit();
@ -140,7 +139,7 @@ pub fn computeDirectoryHash(
wait_group.start(); wait_group.start();
try thread_pool.spawn(workerHashFile, .{ dir, hashed_file, &wait_group }); try thread_pool.spawn(workerHashFile, .{ dir, hashed_file, &wait_group });
try all_files.append(gpa, hashed_file); try all_files.append(hashed_file);
} }
} }
@ -156,7 +155,7 @@ pub fn computeDirectoryHash(
hasher.update(&hashed_file.hash); hasher.update(&hashed_file.hash);
} }
if (any_failures) return error.DirectoryHashUnavailable; if (any_failures) return error.DirectoryHashUnavailable;
if (options.needFileHashes) options.fileHashes = try all_files.toOwnedSlice(gpa); if (options.needFileHashes) options.fileHashes = try all_files.toOwnedSlice();
return hasher.finalResult(); return hasher.finalResult();
} }
fn workerHashFile(dir: std.fs.Dir, hashed_file: *HashedFile, wg: *std.Thread.WaitGroup) void { fn workerHashFile(dir: std.fs.Dir, hashed_file: *HashedFile, wg: *std.Thread.WaitGroup) void {

View file

@ -17,9 +17,6 @@ const ServiceShape = smt.ServiceShape;
const ListShape = smt.ListShape; const ListShape = smt.ListShape;
const MapShape = smt.MapShape; const MapShape = smt.MapShape;
// manifest file 21k currently, but unbounded
var manifest_buf: [1024 * 32]u8 = undefined;
pub fn main() anyerror!void { pub fn main() anyerror!void {
const root_progress_node = std.Progress.start(.{}); const root_progress_node = std.Progress.start(.{});
defer root_progress_node.end(); defer root_progress_node.end();
@ -30,8 +27,7 @@ pub fn main() anyerror!void {
const args = try std.process.argsAlloc(allocator); const args = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, args); defer std.process.argsFree(allocator, args);
var stdout_writer = std.fs.File.stdout().writer(&.{}); const stdout = std.io.getStdOut().writer();
const stdout = &stdout_writer.interface;
var output_dir = std.fs.cwd(); var output_dir = std.fs.cwd();
defer if (output_dir.fd > 0) output_dir.close(); defer if (output_dir.fd > 0) output_dir.close();
@ -52,10 +48,11 @@ pub fn main() anyerror!void {
models_dir = try std.fs.cwd().openDir(args[i + 1], .{ .iterate = true }); models_dir = try std.fs.cwd().openDir(args[i + 1], .{ .iterate = true });
} }
var manifest_file = try output_dir.createFile("service_manifest.zig", .{}); // TODO: We need a different way to handle this file...
defer manifest_file.close(); const manifest_file_started = false;
var manifest = manifest_file.writer(&manifest_buf).interface; var manifest_file: std.fs.File = undefined;
defer manifest.flush() catch @panic("Could not flush service manifest"); defer if (manifest_file_started) manifest_file.close();
var manifest: std.fs.File.Writer = undefined;
var files_processed: usize = 0; var files_processed: usize = 0;
var skip_next = true; var skip_next = true;
for (args) |arg| { for (args) |arg| {
@ -74,7 +71,11 @@ pub fn main() anyerror!void {
skip_next = true; skip_next = true;
continue; continue;
} }
try processFile(arg, output_dir, &manifest); if (!manifest_file_started) {
manifest_file = try output_dir.createFile("service_manifest.zig", .{});
manifest = manifest_file.writer();
}
try processFile(arg, output_dir, manifest);
files_processed += 1; files_processed += 1;
} }
if (files_processed == 0) { if (files_processed == 0) {
@ -93,7 +94,7 @@ pub fn main() anyerror!void {
} }
if (args.len == 0) if (args.len == 0)
_ = try generateServices(allocator, ";", std.fs.File.stdin(), stdout); _ = try generateServices(allocator, ";", std.io.getStdIn(), stdout);
if (verbose) { if (verbose) {
const output_path = try output_dir.realpathAlloc(allocator, "."); const output_path = try output_dir.realpathAlloc(allocator, ".");
@ -132,8 +133,7 @@ fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir, parent_pro
// Do this in a brain dead fashion from here, no optimization // Do this in a brain dead fashion from here, no optimization
const manifest_file = try output_dir.createFile("service_manifest.zig", .{}); const manifest_file = try output_dir.createFile("service_manifest.zig", .{});
defer manifest_file.close(); defer manifest_file.close();
var manifest = manifest_file.writer(&manifest_buf); const manifest = manifest_file.writer();
defer manifest.interface.flush() catch @panic("Error flushing service_manifest.zig");
var mi = models_dir.iterate(); var mi = models_dir.iterate();
const generating_models_progress = parent_progress.start("generating models", count); const generating_models_progress = parent_progress.start("generating models", count);
@ -141,15 +141,18 @@ fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir, parent_pro
while (try mi.next()) |e| { while (try mi.next()) |e| {
if ((e.kind == .file or e.kind == .sym_link) and std.mem.endsWith(u8, e.name, ".json")) { if ((e.kind == .file or e.kind == .sym_link) and std.mem.endsWith(u8, e.name, ".json")) {
try processFile(e.name, output_dir, &manifest.interface); try processFile(e.name, output_dir, manifest);
generating_models_progress.completeOne(); generating_models_progress.completeOne();
} }
} }
// re-calculate so we can store the manifest // re-calculate so we can store the manifest
model_digest = calculated_manifest.model_dir_hash_digest; model_digest = calculated_manifest.model_dir_hash_digest;
_, calculated_manifest = try calculateDigests(models_dir, output_dir, &thread_pool); _, calculated_manifest = try calculateDigests(models_dir, output_dir, &thread_pool);
const data = try std.fmt.allocPrint(allocator, "{f}", .{std.json.fmt(calculated_manifest, .{ .whitespace = .indent_2 })}); try output_dir.writeFile(.{ .sub_path = "output_manifest.json", .data = try std.json.stringifyAlloc(
try output_dir.writeFile(.{ .sub_path = "output_manifest.json", .data = data }); allocator,
calculated_manifest,
.{ .whitespace = .indent_2 },
) });
} }
var model_digest: ?[Hasher.hex_multihash_len]u8 = null; var model_digest: ?[Hasher.hex_multihash_len]u8 = null;
@ -197,7 +200,7 @@ fn calculateDigests(models_dir: std.fs.Dir, output_dir: std.fs.Dir, thread_pool:
}, },
}; };
} }
fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: *std.Io.Writer) !void { fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: anytype) !void {
// It's probably best to create our own allocator here so we can deint at the end and // It's probably best to create our own allocator here so we can deint at the end and
// toss all allocations related to the services in this file // toss all allocations related to the services in this file
// I can't guarantee we're not leaking something, and at the end of the // I can't guarantee we're not leaking something, and at the end of the
@ -206,10 +209,11 @@ fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: *std.Io.
defer arena.deinit(); defer arena.deinit();
const allocator = arena.allocator(); const allocator = arena.allocator();
var output = try std.Io.Writer.Allocating.initCapacity(allocator, 1024 * 1024 * 2); var output = try std.ArrayListUnmanaged(u8).initCapacity(allocator, 1024 * 1024 * 2);
defer output.deinit(); defer output.deinit(allocator);
const writer = &output.writer; var counting_writer = std.io.countingWriter(output.writer(allocator));
var writer = counting_writer.writer();
_ = try writer.write("const std = @import(\"std\");\n"); _ = try writer.write("const std = @import(\"std\");\n");
_ = try writer.write("const smithy = @import(\"smithy\");\n"); _ = try writer.write("const smithy = @import(\"smithy\");\n");
@ -222,12 +226,7 @@ fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: *std.Io.
if (verbose) std.log.info("Processing file: {s}", .{file_name}); if (verbose) std.log.info("Processing file: {s}", .{file_name});
const service_names = generateServicesForFilePath( const service_names = generateServicesForFilePath(allocator, ";", file_name, writer) catch |err| {
allocator,
";",
file_name,
writer,
) catch |err| {
std.log.err("Error processing file: {s}", .{file_name}); std.log.err("Error processing file: {s}", .{file_name});
return err; return err;
}; };
@ -250,7 +249,7 @@ fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: *std.Io.
output_file_name = new_output_file_name; output_file_name = new_output_file_name;
} }
const unformatted: [:0]const u8 = try output.toOwnedSliceSentinel(0); const unformatted: [:0]const u8 = try output.toOwnedSliceSentinel(allocator, 0);
const formatted = try zigFmt(allocator, unformatted); const formatted = try zigFmt(allocator, unformatted);
// Dump our buffer out to disk // Dump our buffer out to disk
@ -267,17 +266,14 @@ fn zigFmt(allocator: std.mem.Allocator, buffer: [:0]const u8) ![]const u8 {
var tree = try std.zig.Ast.parse(allocator, buffer, .zig); var tree = try std.zig.Ast.parse(allocator, buffer, .zig);
defer tree.deinit(allocator); defer tree.deinit(allocator);
var aw = try std.Io.Writer.Allocating.initCapacity(allocator, buffer.len); return try tree.render(allocator);
defer aw.deinit();
try tree.render(allocator, &aw.writer, .{});
return aw.toOwnedSlice();
} }
fn generateServicesForFilePath( fn generateServicesForFilePath(
allocator: std.mem.Allocator, allocator: std.mem.Allocator,
comptime terminator: []const u8, comptime terminator: []const u8,
path: []const u8, path: []const u8,
writer: *std.Io.Writer, writer: anytype,
) ![][]const u8 { ) ![][]const u8 {
const file = try std.fs.cwd().openFile(path, .{}); const file = try std.fs.cwd().openFile(path, .{});
defer file.close(); defer file.close();
@ -292,34 +288,28 @@ fn addReference(id: []const u8, map: *std.StringHashMap(u64)) !void {
res.value_ptr.* = 1; res.value_ptr.* = 1;
} }
} }
fn countAllReferences(allocator: std.mem.Allocator, shape_ids: [][]const u8, shapes: std.StringHashMap(smithy.ShapeInfo), shape_references: *std.StringHashMap(u64), stack: *std.ArrayList([]const u8)) anyerror!void { fn countAllReferences(shape_ids: [][]const u8, shapes: std.StringHashMap(smithy.ShapeInfo), shape_references: *std.StringHashMap(u64), stack: *std.ArrayList([]const u8)) anyerror!void {
for (shape_ids) |id| { for (shape_ids) |id| {
const shape = shapes.get(id); const shape = shapes.get(id);
if (shape == null) { if (shape == null) {
std.log.err("Error - could not find shape with id {s}", .{id}); std.log.err("Error - could not find shape with id {s}", .{id});
return error.ShapeNotFound; return error.ShapeNotFound;
} }
try countReferences(allocator, shape.?, shapes, shape_references, stack); try countReferences(shape.?, shapes, shape_references, stack);
} }
} }
fn countTypeMembersReferences(allocator: std.mem.Allocator, type_members: []smithy.TypeMember, shapes: std.StringHashMap(smithy.ShapeInfo), shape_references: *std.StringHashMap(u64), stack: *std.ArrayList([]const u8)) anyerror!void { fn countTypeMembersReferences(type_members: []smithy.TypeMember, shapes: std.StringHashMap(smithy.ShapeInfo), shape_references: *std.StringHashMap(u64), stack: *std.ArrayList([]const u8)) anyerror!void {
for (type_members) |m| { for (type_members) |m| {
const target = shapes.get(m.target); const target = shapes.get(m.target);
if (target == null) { if (target == null) {
std.log.err("Error - could not find target {s}", .{m.target}); std.log.err("Error - could not find target {s}", .{m.target});
return error.TargetNotFound; return error.TargetNotFound;
} }
try countReferences(allocator, target.?, shapes, shape_references, stack); try countReferences(target.?, shapes, shape_references, stack);
} }
} }
fn countReferences( fn countReferences(shape: smithy.ShapeInfo, shapes: std.StringHashMap(smithy.ShapeInfo), shape_references: *std.StringHashMap(u64), stack: *std.ArrayList([]const u8)) anyerror!void {
allocator: std.mem.Allocator,
shape: smithy.ShapeInfo,
shapes: std.StringHashMap(smithy.ShapeInfo),
shape_references: *std.StringHashMap(u64),
stack: *std.ArrayList([]const u8),
) anyerror!void {
// Add ourselves as a reference, then we will continue down the tree // Add ourselves as a reference, then we will continue down the tree
try addReference(shape.id, shape_references); try addReference(shape.id, shape_references);
// Put ourselves on the stack. If we come back to ourselves, we want to end. // Put ourselves on the stack. If we come back to ourselves, we want to end.
@ -327,7 +317,7 @@ fn countReferences(
if (std.mem.eql(u8, shape.id, i)) if (std.mem.eql(u8, shape.id, i))
return; return;
} }
try stack.append(allocator, shape.id); try stack.append(shape.id);
defer _ = stack.pop(); defer _ = stack.pop();
// Well, this is a fun read: https://awslabs.github.io/smithy/1.0/spec/core/model.html#recursive-shape-definitions // Well, this is a fun read: https://awslabs.github.io/smithy/1.0/spec/core/model.html#recursive-shape-definitions
// Looks like recursion has special rules in the spec to accomodate Java. // Looks like recursion has special rules in the spec to accomodate Java.
@ -349,15 +339,15 @@ fn countReferences(
.unit, .unit,
=> {}, => {},
.document, .member, .resource => {}, // less sure about these? .document, .member, .resource => {}, // less sure about these?
.list => |i| try countReferences(allocator, shapes.get(i.member_target).?, shapes, shape_references, stack), .list => |i| try countReferences(shapes.get(i.member_target).?, shapes, shape_references, stack),
.set => |i| try countReferences(allocator, shapes.get(i.member_target).?, shapes, shape_references, stack), .set => |i| try countReferences(shapes.get(i.member_target).?, shapes, shape_references, stack),
.map => |i| { .map => |i| {
try countReferences(allocator, shapes.get(i.key).?, shapes, shape_references, stack); try countReferences(shapes.get(i.key).?, shapes, shape_references, stack);
try countReferences(allocator, shapes.get(i.value).?, shapes, shape_references, stack); try countReferences(shapes.get(i.value).?, shapes, shape_references, stack);
}, },
.structure => |m| try countTypeMembersReferences(allocator, m.members, shapes, shape_references, stack), .structure => |m| try countTypeMembersReferences(m.members, shapes, shape_references, stack),
.uniontype => |m| try countTypeMembersReferences(allocator, m.members, shapes, shape_references, stack), .uniontype => |m| try countTypeMembersReferences(m.members, shapes, shape_references, stack),
.service => |i| try countAllReferences(allocator, i.operations, shapes, shape_references, stack), .service => |i| try countAllReferences(i.operations, shapes, shape_references, stack),
.operation => |op| { .operation => |op| {
if (op.input) |i| { if (op.input) |i| {
const val = shapes.get(i); const val = shapes.get(i);
@ -365,7 +355,7 @@ fn countReferences(
std.log.err("Error processing shape with id \"{s}\". Input shape \"{s}\" was not found", .{ shape.id, i }); std.log.err("Error processing shape with id \"{s}\". Input shape \"{s}\" was not found", .{ shape.id, i });
return error.ShapeNotFound; return error.ShapeNotFound;
} }
try countReferences(allocator, val.?, shapes, shape_references, stack); try countReferences(val.?, shapes, shape_references, stack);
} }
if (op.output) |i| { if (op.output) |i| {
const val = shapes.get(i); const val = shapes.get(i);
@ -373,31 +363,27 @@ fn countReferences(
std.log.err("Error processing shape with id \"{s}\". Output shape \"{s}\" was not found", .{ shape.id, i }); std.log.err("Error processing shape with id \"{s}\". Output shape \"{s}\" was not found", .{ shape.id, i });
return error.ShapeNotFound; return error.ShapeNotFound;
} }
try countReferences(allocator, val.?, shapes, shape_references, stack); try countReferences(val.?, shapes, shape_references, stack);
} }
if (op.errors) |i| try countAllReferences(allocator, i, shapes, shape_references, stack); if (op.errors) |i| try countAllReferences(i, shapes, shape_references, stack);
}, },
.@"enum" => |m| try countTypeMembersReferences(allocator, m.members, shapes, shape_references, stack), .@"enum" => |m| try countTypeMembersReferences(m.members, shapes, shape_references, stack),
} }
} }
fn generateServices( fn generateServices(allocator: std.mem.Allocator, comptime _: []const u8, file: std.fs.File, writer: anytype) ![][]const u8 {
allocator: std.mem.Allocator,
comptime _: []const u8,
file: std.fs.File,
writer: *std.Io.Writer,
) ![][]const u8 {
const json = try file.readToEndAlloc(allocator, 1024 * 1024 * 1024); const json = try file.readToEndAlloc(allocator, 1024 * 1024 * 1024);
defer allocator.free(json); defer allocator.free(json);
const model = try smithy.parse(allocator, json); const model = try smithy.parse(allocator, json);
defer model.deinit(); defer model.deinit();
var shapes = std.StringHashMap(smithy.ShapeInfo).init(allocator); var shapes = std.StringHashMap(smithy.ShapeInfo).init(allocator);
defer shapes.deinit(); defer shapes.deinit();
var services = try std.ArrayList(smithy.ShapeInfo).initCapacity(allocator, model.shapes.len); var services = std.ArrayList(smithy.ShapeInfo).init(allocator);
defer services.deinit();
for (model.shapes) |shape| { for (model.shapes) |shape| {
try shapes.put(shape.id, shape); try shapes.put(shape.id, shape);
switch (shape.shape) { switch (shape.shape) {
.service => services.appendAssumeCapacity(shape), .service => try services.append(shape),
else => {}, else => {},
} }
} }
@ -406,15 +392,15 @@ fn generateServices(
// a reference count in case there are recursive data structures // a reference count in case there are recursive data structures
var shape_references = std.StringHashMap(u64).init(allocator); var shape_references = std.StringHashMap(u64).init(allocator);
defer shape_references.deinit(); defer shape_references.deinit();
var stack: std.ArrayList([]const u8) = .{}; var stack = std.ArrayList([]const u8).init(allocator);
defer stack.deinit(allocator); defer stack.deinit();
for (services.items) |service| for (services.items) |service|
try countReferences(allocator, service, shapes, &shape_references, &stack); try countReferences(service, shapes, &shape_references, &stack);
var constant_names = try std.ArrayList([]const u8).initCapacity(allocator, services.items.len); var constant_names = std.ArrayList([]const u8).init(allocator);
defer constant_names.deinit(allocator); defer constant_names.deinit();
var unresolved: std.ArrayList(smithy.ShapeInfo) = .{}; var unresolved = std.ArrayList(smithy.ShapeInfo).init(allocator);
defer unresolved.deinit(allocator); defer unresolved.deinit();
var generated = std.StringHashMap(void).init(allocator); var generated = std.StringHashMap(void).init(allocator);
defer generated.deinit(); defer generated.deinit();
@ -459,7 +445,7 @@ fn generateServices(
// name of the field will be snake_case of whatever comes in from // name of the field will be snake_case of whatever comes in from
// sdk_id. Not sure this will simple... // sdk_id. Not sure this will simple...
const constant_name = try support.constantName(allocator, sdk_id, .snake); const constant_name = try support.constantName(allocator, sdk_id, .snake);
constant_names.appendAssumeCapacity(constant_name); try constant_names.append(constant_name);
try writer.print("const Self = @This();\n", .{}); try writer.print("const Self = @This();\n", .{});
if (version) |v| if (version) |v|
try writer.print("pub const version: ?[]const u8 = \"{s}\";\n", .{v}) try writer.print("pub const version: ?[]const u8 = \"{s}\";\n", .{v})
@ -495,16 +481,16 @@ fn generateServices(
try generateOperation(allocator, shapes.get(op).?, state, writer); try generateOperation(allocator, shapes.get(op).?, state, writer);
} }
try generateAdditionalTypes(allocator, state, writer); try generateAdditionalTypes(allocator, state, writer);
return constant_names.toOwnedSlice(allocator); return constant_names.toOwnedSlice();
} }
fn generateAdditionalTypes(allocator: std.mem.Allocator, file_state: FileGenerationState, writer: *std.Io.Writer) !void { fn generateAdditionalTypes(allocator: std.mem.Allocator, file_state: FileGenerationState, writer: anytype) !void {
// More types may be added during processing // More types may be added during processing
while (file_state.additional_types_to_generate.pop()) |t| { while (file_state.additional_types_to_generate.pop()) |t| {
if (file_state.additional_types_generated.getEntry(t.name) != null) continue; if (file_state.additional_types_generated.getEntry(t.name) != null) continue;
// std.log.info("\t\t{s}", .{t.name}); // std.log.info("\t\t{s}", .{t.name});
var type_stack: std.ArrayList(*const smithy.ShapeInfo) = .{}; var type_stack = std.ArrayList(*const smithy.ShapeInfo).init(allocator);
defer type_stack.deinit(allocator); defer type_stack.deinit();
const state = GenerationState{ const state = GenerationState{
.type_stack = &type_stack, .type_stack = &type_stack,
.file_state = file_state, .file_state = file_state,
@ -524,9 +510,9 @@ fn generateAdditionalTypes(allocator: std.mem.Allocator, file_state: FileGenerat
} }
} }
fn outputIndent(state: GenerationState, writer: *std.Io.Writer) !void { fn outputIndent(state: GenerationState, writer: anytype) !void {
const n_chars = 4 * state.indent_level; const n_chars = 4 * state.indent_level;
try writer.splatBytesAll(" ", n_chars); try writer.writeByteNTimes(' ', n_chars);
} }
const StructType = enum { const StructType = enum {
@ -550,12 +536,12 @@ const operation_sub_types = [_]OperationSubTypeInfo{
}, },
}; };
fn generateOperation(allocator: std.mem.Allocator, operation: smithy.ShapeInfo, file_state: FileGenerationState, writer: *std.Io.Writer) !void { fn generateOperation(allocator: std.mem.Allocator, operation: smithy.ShapeInfo, file_state: FileGenerationState, writer: anytype) !void {
const snake_case_name = try support.constantName(allocator, operation.name, .snake); const snake_case_name = try support.constantName(allocator, operation.name, .snake);
defer allocator.free(snake_case_name); defer allocator.free(snake_case_name);
var type_stack: std.ArrayList(*const smithy.ShapeInfo) = .{}; var type_stack = std.ArrayList(*const smithy.ShapeInfo).init(allocator);
defer type_stack.deinit(allocator); defer type_stack.deinit();
const state = GenerationState{ const state = GenerationState{
.type_stack = &type_stack, .type_stack = &type_stack,
.file_state = file_state, .file_state = file_state,
@ -600,12 +586,7 @@ fn generateOperation(allocator: std.mem.Allocator, operation: smithy.ShapeInfo,
new_state.indent_level = 0; new_state.indent_level = 0;
std.debug.assert(new_state.type_stack.items.len == 0); std.debug.assert(new_state.type_stack.items.len == 0);
try serialization.json.generateToJsonFunction( try serialization.json.generateToJsonFunction(shape_id, writer.any(), new_state, generate_type_options.keyCase(.pascal));
shape_id,
writer,
new_state,
generate_type_options.keyCase(.pascal),
);
try writer.writeAll("\n"); try writer.writeAll("\n");
}, },
@ -657,7 +638,7 @@ fn generateOperation(allocator: std.mem.Allocator, operation: smithy.ShapeInfo,
_ = try writer.write("} = .{};\n"); _ = try writer.write("} = .{};\n");
} }
fn generateMetadataFunction(operation_name: []const u8, state: GenerationState, writer: *std.Io.Writer, options: GenerateTypeOptions) !void { fn generateMetadataFunction(operation_name: []const u8, state: GenerationState, writer: anytype, options: GenerateTypeOptions) !void {
// TODO: Shove these lines in here, and also the else portion // TODO: Shove these lines in here, and also the else portion
// pub fn metaInfo(self: @This()) struct { service: @TypeOf(sts), action: @TypeOf(sts.get_caller_identity) } { // pub fn metaInfo(self: @This()) struct { service: @TypeOf(sts), action: @TypeOf(sts.get_caller_identity) } {
// return .{ .service = sts, .action = sts.get_caller_identity }; // return .{ .service = sts, .action = sts.get_caller_identity };
@ -718,7 +699,7 @@ fn getTypeName(allocator: std.mem.Allocator, shape: smithy.ShapeInfo) ![]const u
} }
} }
fn reuseCommonType(shape: smithy.ShapeInfo, writer: *std.Io.Writer, state: GenerationState) !bool { fn reuseCommonType(shape: smithy.ShapeInfo, writer: anytype, state: GenerationState) !bool {
// We want to return if we're at the top level of the stack. There are three // We want to return if we're at the top level of the stack. There are three
// reasons for this: // reasons for this:
// 1. For operations, we have a request that includes a metadata function // 1. For operations, we have a request that includes a metadata function
@ -748,14 +729,14 @@ fn reuseCommonType(shape: smithy.ShapeInfo, writer: *std.Io.Writer, state: Gener
rc = true; rc = true;
_ = try writer.write(type_name); // This can't possibly be this easy... _ = try writer.write(type_name); // This can't possibly be this easy...
if (state.file_state.additional_types_generated.getEntry(shape.name) == null) if (state.file_state.additional_types_generated.getEntry(shape.name) == null)
try state.file_state.additional_types_to_generate.append(state.allocator, shape); try state.file_state.additional_types_to_generate.append(shape);
} }
} }
return rc; return rc;
} }
/// return type is anyerror!void as this is a recursive function, so the compiler cannot properly infer error types /// return type is anyerror!void as this is a recursive function, so the compiler cannot properly infer error types
fn generateTypeFor(shape_id: []const u8, writer: *std.Io.Writer, state: GenerationState, comptime options: GenerateTypeOptions) anyerror!bool { fn generateTypeFor(shape_id: []const u8, writer: anytype, state: GenerationState, comptime options: GenerateTypeOptions) anyerror!bool {
const end_structure = options.end_structure; const end_structure = options.end_structure;
var rc = false; var rc = false;
@ -827,8 +808,7 @@ fn generateTypeFor(shape_id: []const u8, writer: *std.Io.Writer, state: Generati
.float => |s| try generateSimpleTypeFor(s, "f32", writer), .float => |s| try generateSimpleTypeFor(s, "f32", writer),
.long => |s| try generateSimpleTypeFor(s, "i64", writer), .long => |s| try generateSimpleTypeFor(s, "i64", writer),
.map => |m| { .map => |m| {
var null_writer = std.Io.Writer.Discarding.init(&.{}).writer; if (!try reuseCommonType(shape_info, std.io.null_writer, state)) {
if (!try reuseCommonType(shape_info, &null_writer, state)) {
try generateMapTypeFor(m, writer, state, options); try generateMapTypeFor(m, writer, state, options);
rc = true; rc = true;
} else { } else {
@ -845,7 +825,7 @@ fn generateTypeFor(shape_id: []const u8, writer: *std.Io.Writer, state: Generati
return rc; return rc;
} }
fn generateMapTypeFor(map: anytype, writer: *std.Io.Writer, state: GenerationState, comptime options: GenerateTypeOptions) anyerror!void { fn generateMapTypeFor(map: anytype, writer: anytype, state: GenerationState, comptime options: GenerateTypeOptions) anyerror!void {
_ = try writer.write("struct {\n"); _ = try writer.write("struct {\n");
try writer.writeAll("pub const is_map_type = true;\n\n"); try writer.writeAll("pub const is_map_type = true;\n\n");
@ -868,12 +848,12 @@ fn generateMapTypeFor(map: anytype, writer: *std.Io.Writer, state: GenerationSta
_ = try writer.write("}"); _ = try writer.write("}");
} }
fn generateSimpleTypeFor(_: anytype, type_name: []const u8, writer: *std.Io.Writer) !void { fn generateSimpleTypeFor(_: anytype, type_name: []const u8, writer: anytype) !void {
_ = try writer.write(type_name); // This had required stuff but the problem was elsewhere. Better to leave as function just in case _ = try writer.write(type_name); // This had required stuff but the problem was elsewhere. Better to leave as function just in case
} }
const Mapping = struct { snake: []const u8, original: []const u8 }; const Mapping = struct { snake: []const u8, original: []const u8 };
fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, type_type_name: []const u8, writer: *std.Io.Writer, state: GenerationState, comptime options: GenerateTypeOptions) anyerror!void { fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, type_type_name: []const u8, writer: anytype, state: GenerationState, comptime options: GenerateTypeOptions) anyerror!void {
_ = shape_id; _ = shape_id;
var arena = std.heap.ArenaAllocator.init(state.allocator); var arena = std.heap.ArenaAllocator.init(state.allocator);
@ -881,7 +861,7 @@ fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, ty
const allocator = arena.allocator(); const allocator = arena.allocator();
var field_name_mappings = try std.ArrayList(Mapping).initCapacity(allocator, members.len); var field_name_mappings = try std.ArrayList(Mapping).initCapacity(allocator, members.len);
defer field_name_mappings.deinit(allocator); defer field_name_mappings.deinit();
// There is an httpQueryParams trait as well, but nobody is using it. API GW // There is an httpQueryParams trait as well, but nobody is using it. API GW
// pretends to, but it's an empty map // pretends to, but it's an empty map
// //
@ -889,13 +869,13 @@ fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, ty
// //
// httpLabel is interesting - right now we just assume anything can be used - do we need to track this? // httpLabel is interesting - right now we just assume anything can be used - do we need to track this?
var http_query_mappings = try std.ArrayList(Mapping).initCapacity(allocator, members.len); var http_query_mappings = try std.ArrayList(Mapping).initCapacity(allocator, members.len);
defer http_query_mappings.deinit(allocator); defer http_query_mappings.deinit();
var http_header_mappings = try std.ArrayList(Mapping).initCapacity(allocator, members.len); var http_header_mappings = try std.ArrayList(Mapping).initCapacity(allocator, members.len);
defer http_header_mappings.deinit(allocator); defer http_header_mappings.deinit();
var map_fields = try std.ArrayList([]const u8).initCapacity(allocator, members.len); var map_fields = std.ArrayList([]const u8).init(allocator);
defer map_fields.deinit(allocator); defer map_fields.deinit();
// prolog. We'll rely on caller to get the spacing correct here // prolog. We'll rely on caller to get the spacing correct here
_ = try writer.write(type_type_name); _ = try writer.write(type_type_name);
@ -950,7 +930,7 @@ fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, ty
try writer.print("{s}: ", .{member_name}); try writer.print("{s}: ", .{member_name});
try writeOptional(member.traits, writer, null); try writeOptional(member.traits, writer, null);
if (try generateTypeFor(member.target, writer, child_state, options.endStructure(true))) if (try generateTypeFor(member.target, writer, child_state, options.endStructure(true)))
map_fields.appendAssumeCapacity(try std.fmt.allocPrint(allocator, "{s}", .{member_name})); try map_fields.append(try std.fmt.allocPrint(allocator, "{s}", .{member_name}));
if (!std.mem.eql(u8, "union", type_type_name)) if (!std.mem.eql(u8, "union", type_type_name))
try writeOptional(member.traits, writer, " = null"); try writeOptional(member.traits, writer, " = null");
@ -998,14 +978,7 @@ fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, ty
_ = try writer.write("}\n"); _ = try writer.write("}\n");
} }
fn writeMappings( fn writeMappings(state: GenerationState, @"pub": []const u8, mapping_name: []const u8, mappings: anytype, force_output: bool, writer: anytype) !void {
state: GenerationState,
@"pub": []const u8,
mapping_name: []const u8,
mappings: anytype,
force_output: bool,
writer: *std.Io.Writer,
) !void {
if (mappings.items.len == 0 and !force_output) return; if (mappings.items.len == 0 and !force_output) return;
try outputIndent(state, writer); try outputIndent(state, writer);
if (mappings.items.len == 0) { if (mappings.items.len == 0) {
@ -1025,7 +998,7 @@ fn writeMappings(
_ = try writer.write("};\n"); _ = try writer.write("};\n");
} }
fn writeOptional(traits: ?[]smithy.Trait, writer: *std.Io.Writer, value: ?[]const u8) !void { fn writeOptional(traits: ?[]smithy.Trait, writer: anytype, value: ?[]const u8) !void {
if (traits) |ts| if (smt.hasTrait(.required, ts)) return; if (traits) |ts| if (smt.hasTrait(.required, ts)) return;
try writer.writeAll(value orelse "?"); try writer.writeAll(value orelse "?");
} }

View file

@ -17,7 +17,7 @@ const JsonMember = struct {
shape_info: smithy.ShapeInfo, shape_info: smithy.ShapeInfo,
}; };
pub fn generateToJsonFunction(shape_id: []const u8, writer: *std.Io.Writer, state: GenerationState, comptime options: GenerateTypeOptions) !void { pub fn generateToJsonFunction(shape_id: []const u8, writer: std.io.AnyWriter, state: GenerationState, comptime options: GenerateTypeOptions) !void {
_ = options; _ = options;
const allocator = state.allocator; const allocator = state.allocator;
@ -117,15 +117,15 @@ fn getMemberValueJson(allocator: std.mem.Allocator, source: []const u8, member:
const member_value = try std.fmt.allocPrint(allocator, "@field({s}, \"{s}\")", .{ source, member.field_name }); const member_value = try std.fmt.allocPrint(allocator, "@field({s}, \"{s}\")", .{ source, member.field_name });
defer allocator.free(member_value); defer allocator.free(member_value);
var output_block = std.Io.Writer.Allocating.init(allocator); var output_block = std.ArrayListUnmanaged(u8){};
defer output_block.deinit(); const writer = output_block.writer(allocator);
try writeMemberValue( try writeMemberValue(
&output_block.writer, writer,
member_value, member_value,
); );
return output_block.toOwnedSlice(); return output_block.toOwnedSlice(allocator);
} }
fn getShapeJsonValueType(shape: Shape) []const u8 { fn getShapeJsonValueType(shape: Shape) []const u8 {
@ -139,7 +139,7 @@ fn getShapeJsonValueType(shape: Shape) []const u8 {
} }
fn writeMemberValue( fn writeMemberValue(
writer: *std.Io.Writer, writer: anytype,
member_value: []const u8, member_value: []const u8,
) !void { ) !void {
try writer.writeAll(member_value); try writer.writeAll(member_value);
@ -153,7 +153,7 @@ const WriteMemberJsonParams = struct {
member: smithy.TypeMember, member: smithy.TypeMember,
}; };
fn writeStructureJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) !void { fn writeStructureJson(params: WriteMemberJsonParams, writer: std.io.AnyWriter) !void {
const shape_type = "structure"; const shape_type = "structure";
const allocator = params.state.allocator; const allocator = params.state.allocator;
const state = params.state; const state = params.state;
@ -221,7 +221,7 @@ fn writeStructureJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) !vo
} }
} }
fn writeListJson(list: smithy_tools.ListShape, params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerror!void { fn writeListJson(list: smithy_tools.ListShape, params: WriteMemberJsonParams, writer: std.io.AnyWriter) anyerror!void {
const state = params.state; const state = params.state;
const allocator = state.allocator; const allocator = state.allocator;
@ -274,7 +274,7 @@ fn writeListJson(list: smithy_tools.ListShape, params: WriteMemberJsonParams, wr
} }
} }
fn writeMapJson(map: smithy_tools.MapShape, params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerror!void { fn writeMapJson(map: smithy_tools.MapShape, params: WriteMemberJsonParams, writer: std.io.AnyWriter) anyerror!void {
const state = params.state; const state = params.state;
const name = params.field_name; const name = params.field_name;
const value = params.field_value; const value = params.field_value;
@ -351,11 +351,11 @@ fn writeMapJson(map: smithy_tools.MapShape, params: WriteMemberJsonParams, write
} }
} }
fn writeScalarJson(comment: []const u8, params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerror!void { fn writeScalarJson(comment: []const u8, params: WriteMemberJsonParams, writer: std.io.AnyWriter) anyerror!void {
try writer.print("try jw.write({s}); // {s}\n\n", .{ params.field_value, comment }); try writer.print("try jw.write({s}); // {s}\n\n", .{ params.field_value, comment });
} }
fn writeMemberJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerror!void { fn writeMemberJson(params: WriteMemberJsonParams, writer: std.io.AnyWriter) anyerror!void {
const shape_id = params.shape_id; const shape_id = params.shape_id;
const state = params.state; const state = params.state;
const shape_info = try smithy_tools.getShapeInfo(shape_id, state.file_state.shapes); const shape_info = try smithy_tools.getShapeInfo(shape_id, state.file_state.shapes);

View file

@ -15,17 +15,15 @@ pub fn build(b: *std.Build) void {
// set a preferred release mode, allowing the user to decide how to optimize. // set a preferred release mode, allowing the user to decide how to optimize.
const optimize = b.standardOptimizeOption(.{}); const optimize = b.standardOptimizeOption(.{});
const mod_exe = b.createModule(.{ const exe = b.addExecutable(.{
.name = "tmp",
// In this case the main source file is merely a path, however, in more
// complicated build scripts, this could be a generated file.
.root_source_file = b.path("src/main.zig"), .root_source_file = b.path("src/main.zig"),
.target = target, .target = target,
.optimize = optimize, .optimize = optimize,
}); });
const exe = b.addExecutable(.{
.name = "tmp",
.root_module = mod_exe,
});
const aws_dep = b.dependency("aws", .{ const aws_dep = b.dependency("aws", .{
// These are the two arguments to the dependency. It expects a target and optimization level. // These are the two arguments to the dependency. It expects a target and optimization level.
.target = target, .target = target,
@ -61,15 +59,12 @@ pub fn build(b: *std.Build) void {
const run_step = b.step("run", "Run the app"); const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step); run_step.dependOn(&run_cmd.step);
const mod_unit_tests = b.createModule(.{
.root_source_file = b.path("src/main.zig"),
.target = target,
.optimize = optimize,
});
// Creates a step for unit testing. This only builds the test executable // Creates a step for unit testing. This only builds the test executable
// but does not run it. // but does not run it.
const unit_tests = b.addTest(.{ const unit_tests = b.addTest(.{
.root_module = mod_unit_tests, .root_source_file = b.path("src/main.zig"),
.target = target,
.optimize = optimize,
}); });
const run_unit_tests = b.addRunArtifact(unit_tests); const run_unit_tests = b.addRunArtifact(unit_tests);

View file

@ -6,8 +6,8 @@
.dependencies = .{ .dependencies = .{
.aws = .{ .aws = .{
.url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/cfc8aee1a6b54eac4a58893674361f1ad58e8595/cfc8aee1a6b54eac4a58893674361f1ad58e8595-with-models.tar.gz", .url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/7a6086447c1249b0e5b5b5f3873d2f7932bea56d/7a6086447c1249b0e5b5b5f3873d2f7932bea56d-with-models.tar.gz",
.hash = "aws-0.0.1-SbsFcK8HCgA-P7sjZP5z7J7ZfZLTkQ4osD0qgbyUgTzG", .hash = "aws-0.0.1-SbsFcGN_CQCBjurpc2GEMw4c_qAkGu6KpuVnLBLY4L4q",
}, },
}, },
} }

View file

@ -15,10 +15,10 @@ pub fn main() anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){}; var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit(); defer _ = gpa.deinit();
const allocator = gpa.allocator(); const allocator = gpa.allocator();
var stdout_buffer: [1024]u8 = undefined; const stdout_raw = std.io.getStdOut().writer();
var stdout_raw = std.fs.File.stdout().writer(&stdout_buffer); var bw = std.io.bufferedWriter(stdout_raw);
const stdout = &stdout_raw.interface; defer bw.flush() catch unreachable;
defer stdout.flush() catch unreachable; const stdout = bw.writer();
// To use a proxy, uncomment the following with your own configuration // To use a proxy, uncomment the following with your own configuration
// const proxy = std.http.Proxy{ // const proxy = std.http.Proxy{

View file

@ -5,8 +5,8 @@
.minimum_zig_version = "0.14.0", .minimum_zig_version = "0.14.0",
.dependencies = .{ .dependencies = .{
.zeit = .{ .zeit = .{
.url = "git+https://github.com/rockorager/zeit?ref=zig-0.15#ed2ca60db118414bda2b12df2039e33bad3b0b88", .url = "git+https://github.com/rockorager/zeit#f86d568b89a5922f084dae524a1eaf709855cd5e",
.hash = "zeit-0.6.0-5I6bk0J9AgCVa0nnyL0lNY9Xa9F68hHq-ZarhuXNV-Jb", .hash = "zeit-0.6.0-5I6bkzt5AgC1_BCuSzXkV0JHeF4Mhti1Z_jFC7E_nmD2",
}, },
.json = .{ .json = .{
.path = "../json", .path = "../json",

View file

@ -17,10 +17,10 @@ pub const Timestamp = enum(zeit.Nanoseconds) {
}) catch std.debug.panic("Failed to parse timestamp to instant: {d}", .{value}); }) catch std.debug.panic("Failed to parse timestamp to instant: {d}", .{value});
const fmt = "Mon, 02 Jan 2006 15:04:05 GMT"; const fmt = "Mon, 02 Jan 2006 15:04:05 GMT";
var buf: [fmt.len]u8 = undefined; var buf = std.mem.zeroes([fmt.len]u8);
var fbs = std.Io.Writer.fixed(&buf); var fbs = std.io.fixedBufferStream(&buf);
instant.time().gofmt(&fbs, fmt) catch std.debug.panic("Failed to format instant: {d}", .{instant.timestamp}); instant.time().gofmt(fbs.writer(), fmt) catch std.debug.panic("Failed to format instant: {d}", .{instant.timestamp});
try jw.write(&buf); try jw.write(&buf);
} }

View file

@ -1772,12 +1772,12 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
.slice => { .slice => {
switch (token) { switch (token) {
.ArrayBegin => { .ArrayBegin => {
var arraylist = std.ArrayList(ptrInfo.child){}; var arraylist = std.ArrayList(ptrInfo.child).init(allocator);
errdefer { errdefer {
while (arraylist.pop()) |v| { while (arraylist.pop()) |v| {
parseFree(ptrInfo.child, v, options); parseFree(ptrInfo.child, v, options);
} }
arraylist.deinit(allocator); arraylist.deinit();
} }
while (true) { while (true) {
@ -1787,11 +1787,11 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
else => {}, else => {},
} }
try arraylist.ensureTotalCapacity(allocator, arraylist.items.len + 1); try arraylist.ensureTotalCapacity(arraylist.items.len + 1);
const v = try parseInternal(ptrInfo.child, tok, tokens, options); const v = try parseInternal(ptrInfo.child, tok, tokens, options);
arraylist.appendAssumeCapacity(v); arraylist.appendAssumeCapacity(v);
} }
return arraylist.toOwnedSlice(allocator); return arraylist.toOwnedSlice();
}, },
.String => |stringToken| { .String => |stringToken| {
if (ptrInfo.child != u8) return error.UnexpectedToken; if (ptrInfo.child != u8) return error.UnexpectedToken;
@ -1817,12 +1817,12 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
if (key_type == null) return error.UnexpectedToken; if (key_type == null) return error.UnexpectedToken;
const value_type = typeForField(ptrInfo.child, "value"); const value_type = typeForField(ptrInfo.child, "value");
if (value_type == null) return error.UnexpectedToken; if (value_type == null) return error.UnexpectedToken;
var arraylist = std.ArrayList(ptrInfo.child){}; var arraylist = std.ArrayList(ptrInfo.child).init(allocator);
errdefer { errdefer {
while (arraylist.pop()) |v| { while (arraylist.pop()) |v| {
parseFree(ptrInfo.child, v, options); parseFree(ptrInfo.child, v, options);
} }
arraylist.deinit(allocator); arraylist.deinit();
} }
while (true) { while (true) {
const key = (try tokens.next()) orelse return error.UnexpectedEndOfJson; const key = (try tokens.next()) orelse return error.UnexpectedEndOfJson;
@ -1831,13 +1831,13 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
else => {}, else => {},
} }
try arraylist.ensureTotalCapacity(allocator, arraylist.items.len + 1); try arraylist.ensureTotalCapacity(arraylist.items.len + 1);
const key_val = try parseInternal(key_type.?, key, tokens, options); const key_val = try parseInternal(key_type.?, key, tokens, options);
const val = (try tokens.next()) orelse return error.UnexpectedEndOfJson; const val = (try tokens.next()) orelse return error.UnexpectedEndOfJson;
const val_val = try parseInternal(value_type.?, val, tokens, options); const val_val = try parseInternal(value_type.?, val, tokens, options);
arraylist.appendAssumeCapacity(.{ .key = key_val, .value = val_val }); arraylist.appendAssumeCapacity(.{ .key = key_val, .value = val_val });
} }
return arraylist.toOwnedSlice(allocator); return arraylist.toOwnedSlice();
}, },
else => return error.UnexpectedToken, else => return error.UnexpectedToken,
} }

File diff suppressed because it is too large Load diff

View file

@ -25,7 +25,7 @@ pub const Credentials = struct {
}; };
} }
pub fn deinit(self: Self) void { pub fn deinit(self: Self) void {
std.crypto.secureZero(u8, self.secret_key); std.crypto.utils.secureZero(u8, self.secret_key);
self.allocator.free(self.secret_key); self.allocator.free(self.secret_key);
self.allocator.free(self.access_key); self.allocator.free(self.access_key);
if (self.session_token) |t| self.allocator.free(t); if (self.session_token) |t| self.allocator.free(t);

View file

@ -173,12 +173,11 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
var cl = std.http.Client{ .allocator = allocator }; var cl = std.http.Client{ .allocator = allocator };
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
var aw: std.Io.Writer.Allocating = .init(allocator); var resp_payload = std.ArrayList(u8).init(allocator);
defer aw.deinit(); defer resp_payload.deinit();
const response_payload = &aw.writer;
const req = try cl.fetch(.{ const req = try cl.fetch(.{
.location = .{ .url = container_uri }, .location = .{ .url = container_uri },
.response_writer = response_payload, .response_storage = .{ .dynamic = &resp_payload },
}); });
if (req.status != .ok and req.status != .not_found) { if (req.status != .ok and req.status != .not_found) {
log.warn("Bad status code received from container credentials endpoint: {}", .{@intFromEnum(req.status)}); log.warn("Bad status code received from container credentials endpoint: {}", .{@intFromEnum(req.status)});
@ -186,8 +185,8 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
} }
if (req.status == .not_found) return null; if (req.status == .not_found) return null;
log.debug("Read {d} bytes from container credentials endpoint", .{aw.written().len}); log.debug("Read {d} bytes from container credentials endpoint", .{resp_payload.items.len});
if (aw.written().len == 0) return null; if (resp_payload.items.len == 0) return null;
const CredsResponse = struct { const CredsResponse = struct {
AccessKeyId: []const u8, AccessKeyId: []const u8,
@ -197,8 +196,8 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
Token: []const u8, Token: []const u8,
}; };
const creds_response = blk: { const creds_response = blk: {
const res = std.json.parseFromSlice(CredsResponse, allocator, aw.written(), .{}) catch |e| { const res = std.json.parseFromSlice(CredsResponse, allocator, resp_payload.items, .{}) catch |e| {
log.err("Unexpected Json response from container credentials endpoint: {s}", .{aw.written()}); log.err("Unexpected Json response from container credentials endpoint: {s}", .{resp_payload.items});
log.err("Error parsing json: {}", .{e}); log.err("Error parsing json: {}", .{e});
if (@errorReturnTrace()) |trace| { if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*); std.debug.dumpStackTrace(trace.*);
@ -225,27 +224,26 @@ fn getImdsv2Credentials(allocator: std.mem.Allocator) !?auth.Credentials {
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
// Get token // Get token
{ {
var aw: std.Io.Writer.Allocating = .init(allocator); var resp_payload = std.ArrayList(u8).init(allocator);
defer aw.deinit(); defer resp_payload.deinit();
const response_payload = &aw.writer;
const req = try cl.fetch(.{ const req = try cl.fetch(.{
.method = .PUT, .method = .PUT,
.location = .{ .url = "http://169.254.169.254/latest/api/token" }, .location = .{ .url = "http://169.254.169.254/latest/api/token" },
.extra_headers = &[_]std.http.Header{ .extra_headers = &[_]std.http.Header{
.{ .name = "X-aws-ec2-metadata-token-ttl-seconds", .value = "21600" }, .{ .name = "X-aws-ec2-metadata-token-ttl-seconds", .value = "21600" },
}, },
.response_writer = response_payload, .response_storage = .{ .dynamic = &resp_payload },
}); });
if (req.status != .ok) { if (req.status != .ok) {
log.warn("Bad status code received from IMDS v2: {}", .{@intFromEnum(req.status)}); log.warn("Bad status code received from IMDS v2: {}", .{@intFromEnum(req.status)});
return null; return null;
} }
if (aw.written().len == 0) { if (resp_payload.items.len == 0) {
log.warn("Unexpected zero response from IMDS v2", .{}); log.warn("Unexpected zero response from IMDS v2", .{});
return null; return null;
} }
token = try aw.toOwnedSlice(); token = try resp_payload.toOwnedSlice();
errdefer if (token) |t| allocator.free(t); errdefer if (token) |t| allocator.free(t);
} }
std.debug.assert(token != null); std.debug.assert(token != null);
@ -267,16 +265,15 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
// "InstanceProfileArn" : "arn:aws:iam::550620852718:instance-profile/ec2-dev", // "InstanceProfileArn" : "arn:aws:iam::550620852718:instance-profile/ec2-dev",
// "InstanceProfileId" : "AIPAYAM4POHXCFNKZ7HU2" // "InstanceProfileId" : "AIPAYAM4POHXCFNKZ7HU2"
// } // }
var aw: std.Io.Writer.Allocating = .init(allocator); var resp_payload = std.ArrayList(u8).init(allocator);
defer aw.deinit(); defer resp_payload.deinit();
const response_payload = &aw.writer;
const req = try client.fetch(.{ const req = try client.fetch(.{
.method = .GET, .method = .GET,
.location = .{ .url = "http://169.254.169.254/latest/meta-data/iam/info" }, .location = .{ .url = "http://169.254.169.254/latest/meta-data/iam/info" },
.extra_headers = &[_]std.http.Header{ .extra_headers = &[_]std.http.Header{
.{ .name = "X-aws-ec2-metadata-token", .value = imds_token }, .{ .name = "X-aws-ec2-metadata-token", .value = imds_token },
}, },
.response_writer = response_payload, .response_storage = .{ .dynamic = &resp_payload },
}); });
if (req.status != .ok and req.status != .not_found) { if (req.status != .ok and req.status != .not_found) {
@ -284,7 +281,7 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
return null; return null;
} }
if (req.status == .not_found) return null; if (req.status == .not_found) return null;
if (aw.written().len == 0) { if (resp_payload.items.len == 0) {
log.warn("Unexpected empty response from IMDS endpoint post token", .{}); log.warn("Unexpected empty response from IMDS endpoint post token", .{});
return null; return null;
} }
@ -295,8 +292,8 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
InstanceProfileArn: []const u8, InstanceProfileArn: []const u8,
InstanceProfileId: []const u8, InstanceProfileId: []const u8,
}; };
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, aw.written(), .{}) catch |e| { const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, resp_payload.items, .{}) catch |e| {
log.err("Unexpected Json response from IMDS endpoint: {s}", .{aw.written()}); log.err("Unexpected Json response from IMDS endpoint: {s}", .{resp_payload.items});
log.err("Error parsing json: {}", .{e}); log.err("Error parsing json: {}", .{e});
if (@errorReturnTrace()) |trace| { if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*); std.debug.dumpStackTrace(trace.*);
@ -318,16 +315,15 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, role_name: []const u8, imds_token: []u8) !?auth.Credentials { fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, role_name: []const u8, imds_token: []u8) !?auth.Credentials {
const url = try std.fmt.allocPrint(allocator, "http://169.254.169.254/latest/meta-data/iam/security-credentials/{s}/", .{role_name}); const url = try std.fmt.allocPrint(allocator, "http://169.254.169.254/latest/meta-data/iam/security-credentials/{s}/", .{role_name});
defer allocator.free(url); defer allocator.free(url);
var aw: std.Io.Writer.Allocating = .init(allocator); var resp_payload = std.ArrayList(u8).init(allocator);
defer aw.deinit(); defer resp_payload.deinit();
const response_payload = &aw.writer;
const req = try client.fetch(.{ const req = try client.fetch(.{
.method = .GET, .method = .GET,
.location = .{ .url = url }, .location = .{ .url = url },
.extra_headers = &[_]std.http.Header{ .extra_headers = &[_]std.http.Header{
.{ .name = "X-aws-ec2-metadata-token", .value = imds_token }, .{ .name = "X-aws-ec2-metadata-token", .value = imds_token },
}, },
.response_writer = response_payload, .response_storage = .{ .dynamic = &resp_payload },
}); });
if (req.status != .ok and req.status != .not_found) { if (req.status != .ok and req.status != .not_found) {
@ -335,7 +331,7 @@ fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, ro
return null; return null;
} }
if (req.status == .not_found) return null; if (req.status == .not_found) return null;
if (aw.written().len == 0) { if (resp_payload.items.len == 0) {
log.warn("Unexpected empty response from IMDS role endpoint", .{}); log.warn("Unexpected empty response from IMDS role endpoint", .{});
return null; return null;
} }
@ -350,8 +346,8 @@ fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, ro
Token: []const u8, Token: []const u8,
Expiration: []const u8, Expiration: []const u8,
}; };
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, aw.written(), .{}) catch |e| { const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, resp_payload.items, .{}) catch |e| {
log.err("Unexpected Json response from IMDS endpoint: {s}", .{aw.written()}); log.err("Unexpected Json response from IMDS endpoint: {s}", .{resp_payload.items});
log.err("Error parsing json: {}", .{e}); log.err("Error parsing json: {}", .{e});
if (@errorReturnTrace()) |trace| { if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*); std.debug.dumpStackTrace(trace.*);

View file

@ -90,37 +90,8 @@ pub const Options = struct {
dualstack: bool = false, dualstack: bool = false,
sigv4_service_name: ?[]const u8 = null, sigv4_service_name: ?[]const u8 = null,
mock: ?Mock = null, /// Used for testing to provide consistent signing. If null, will use current time
}; signing_time: ?i64 = null,
/// mocking methods for isolated testing
pub const Mock = struct {
/// Used to provide consistent signing
signing_time: ?i64,
/// context is desiged to be type-erased pointer (@intFromPtr)
context: usize = 0,
request_fn: *const fn (
usize,
std.http.Method,
std.Uri,
std.http.Client.RequestOptions,
) std.http.Client.RequestError!std.http.Client.Request,
send_body_complete: *const fn (usize, []u8) std.Io.Writer.Error!void,
receive_head: *const fn (usize) std.http.Client.Request.ReceiveHeadError!std.http.Client.Response,
reader_decompressing: *const fn (usize) *std.Io.Reader,
fn request(m: Mock, method: std.http.Method, uri: std.Uri, options: std.http.Client.RequestOptions) std.http.Client.RequestError!std.http.Client.Request {
return m.request_fn(m.context, method, uri, options);
}
fn sendBodyComplete(m: Mock, body: []u8) std.Io.Writer.Error!void {
return m.send_body_complete(m.context, body);
}
fn receiveHead(m: Mock) std.http.Client.Request.ReceiveHeadError!std.http.Client.Response {
return m.receive_head(m.context);
}
fn readerDecompressing(m: Mock) *std.Io.Reader {
return m.reader_decompressing(m.context);
}
}; };
pub const Header = std.http.Header; pub const Header = std.http.Header;
@ -192,9 +163,9 @@ pub const AwsHttp = struct {
.region = getRegion(service, options.region), .region = getRegion(service, options.region),
.service = options.sigv4_service_name orelse service, .service = options.sigv4_service_name orelse service,
.credentials = creds, .credentials = creds,
.signing_time = if (options.mock) |m| m.signing_time else null, .signing_time = options.signing_time,
}; };
return try self.makeRequest(endpoint, request, signing_config, options); return try self.makeRequest(endpoint, request, signing_config);
} }
/// makeRequest is a low level http/https function that can be used inside /// makeRequest is a low level http/https function that can be used inside
@ -213,13 +184,7 @@ pub const AwsHttp = struct {
/// Content-Length: (length of body) /// Content-Length: (length of body)
/// ///
/// Return value is an HttpResult, which will need the caller to deinit(). /// Return value is an HttpResult, which will need the caller to deinit().
pub fn makeRequest( pub fn makeRequest(self: Self, endpoint: EndPoint, request: HttpRequest, signing_config: ?signing.Config) !HttpResult {
self: Self,
endpoint: EndPoint,
request: HttpRequest,
signing_config: ?signing.Config,
options: Options,
) !HttpResult {
var request_cp = request; var request_cp = request;
log.debug("Request Path: {s}", .{request_cp.path}); log.debug("Request Path: {s}", .{request_cp.path});
@ -234,8 +199,8 @@ pub const AwsHttp = struct {
// We will use endpoint instead // We will use endpoint instead
request_cp.path = endpoint.path; request_cp.path = endpoint.path;
var request_headers = std.ArrayList(std.http.Header){}; var request_headers = std.ArrayList(std.http.Header).init(self.allocator);
defer request_headers.deinit(self.allocator); defer request_headers.deinit();
const len = try addHeaders(self.allocator, &request_headers, endpoint.host, request_cp.body, request_cp.content_type, request_cp.headers); const len = try addHeaders(self.allocator, &request_headers, endpoint.host, request_cp.body, request_cp.content_type, request_cp.headers);
defer if (len) |l| self.allocator.free(l); defer if (len) |l| self.allocator.free(l);
@ -248,10 +213,10 @@ pub const AwsHttp = struct {
} }
} }
var headers = std.ArrayList(std.http.Header){}; var headers = std.ArrayList(std.http.Header).init(self.allocator);
defer headers.deinit(self.allocator); defer headers.deinit();
for (request_cp.headers) |header| for (request_cp.headers) |header|
try headers.append(self.allocator, .{ .name = header.name, .value = header.value }); try headers.append(.{ .name = header.name, .value = header.value });
log.debug("All Request Headers:", .{}); log.debug("All Request Headers:", .{});
for (headers.items) |h| { for (headers.items) |h| {
log.debug("\t{s}: {s}", .{ h.name, h.value }); log.debug("\t{s}: {s}", .{ h.name, h.value });
@ -263,12 +228,18 @@ pub const AwsHttp = struct {
// TODO: Fix this proxy stuff. This is all a kludge just to compile, but std.http.Client has it all built in now // TODO: Fix this proxy stuff. This is all a kludge just to compile, but std.http.Client has it all built in now
var cl = std.http.Client{ .allocator = self.allocator, .https_proxy = if (self.proxy) |*p| @constCast(p) else null }; var cl = std.http.Client{ .allocator = self.allocator, .https_proxy = if (self.proxy) |*p| @constCast(p) else null };
defer cl.deinit(); // TODO: Connection pooling defer cl.deinit(); // TODO: Connection pooling
const method = std.meta.stringToEnum(std.http.Method, request_cp.method).?;
// Fetch API in 0.15.1 is insufficient as it does not provide const method = std.meta.stringToEnum(std.http.Method, request_cp.method).?;
// server headers. We'll construct and send the request ourselves var server_header_buffer: [16 * 1024]u8 = undefined;
const uri = try std.Uri.parse(url); var resp_payload = std.ArrayList(u8).init(self.allocator);
const req_options: std.http.Client.RequestOptions = .{ defer resp_payload.deinit();
const req = try cl.fetch(.{
.server_header_buffer = &server_header_buffer,
.method = method,
.payload = if (request_cp.body.len > 0) request_cp.body else null,
.response_storage = .{ .dynamic = &resp_payload },
.raw_uri = true,
.location = .{ .url = url },
// we need full control over most headers. I wish libraries would do a // we need full control over most headers. I wish libraries would do a
// better job of having default headers as an opt-in... // better job of having default headers as an opt-in...
.headers = .{ .headers = .{
@ -280,13 +251,7 @@ pub const AwsHttp = struct {
.content_type = .omit, .content_type = .omit,
}, },
.extra_headers = headers.items, .extra_headers = headers.items,
}; });
var req = if (options.mock) |m|
try m.request(method, uri, req_options) // This will call the test harness
else
try cl.request(method, uri, req_options);
defer req.deinit();
// TODO: Need to test for payloads > 2^14. I believe one of our tests does this, but not sure // TODO: Need to test for payloads > 2^14. I believe one of our tests does this, but not sure
// if (request_cp.body.len > 0) { // if (request_cp.body.len > 0) {
// // Workaround for https://github.com/ziglang/zig/issues/15626 // // Workaround for https://github.com/ziglang/zig/issues/15626
@ -301,69 +266,33 @@ pub const AwsHttp = struct {
// } // }
// try req.wait(); // try req.wait();
if (request_cp.body.len > 0) {
// This seems a bit silly, but we can't have a []const u8 here
// because when it sends, it's using a writer, and this becomes
// the buffer of the writer. It's conceivable that something
// in the chain then does actually modify the body of the request
// so we'll need to duplicate it here
const req_body = try self.allocator.dupe(u8, request_cp.body);
defer self.allocator.free(req_body); // docs for sendBodyComplete say it flushes, so no need to outlive this
if (options.mock) |m|
try m.sendBodyComplete(req_body)
else
try req.sendBodyComplete(req_body);
} else if (options.mock == null) try req.sendBodiless();
// if (options.mock == null) log.err("Request sent. Body len {d}, uri {f}", .{ request_cp.body.len, uri });
var response = if (options.mock) |m| try m.receiveHead() else try req.receiveHead(&.{});
// TODO: Timeout - is this now above us? // TODO: Timeout - is this now above us?
log.debug( log.debug(
"Request Complete. Response code {d}: {?s}", "Request Complete. Response code {d}: {?s}",
.{ @intFromEnum(response.head.status), response.head.status.phrase() }, .{ @intFromEnum(req.status), req.status.phrase() },
); );
log.debug("Response headers:", .{}); log.debug("Response headers:", .{});
var resp_headers = std.ArrayList(Header){}; var resp_headers = std.ArrayList(Header).init(
defer resp_headers.deinit(self.allocator); self.allocator,
var it = response.head.iterateHeaders(); );
defer resp_headers.deinit();
var it = std.http.HeaderIterator.init(server_header_buffer[0..]);
while (it.next()) |h| { // even though we don't expect to fill the buffer, while (it.next()) |h| { // even though we don't expect to fill the buffer,
// we don't get a length, but looks via stdlib source // we don't get a length, but looks via stdlib source
// it should be ok to call next on the undefined memory // it should be ok to call next on the undefined memory
log.debug(" {s}: {s}", .{ h.name, h.value }); log.debug(" {s}: {s}", .{ h.name, h.value });
try resp_headers.append(self.allocator, .{ try resp_headers.append(.{
.name = try (self.allocator.dupe(u8, h.name)), .name = try (self.allocator.dupe(u8, h.name)),
.value = try (self.allocator.dupe(u8, h.value)), .value = try (self.allocator.dupe(u8, h.value)),
}); });
} }
// This is directly lifted from fetch, as there is no function in
// 0.15.1 client to negotiate decompression
const decompress_buffer: []u8 = switch (response.head.content_encoding) {
.identity => &.{},
.zstd => try self.allocator.alloc(u8, std.compress.zstd.default_window_len),
.deflate, .gzip => try self.allocator.alloc(u8, std.compress.flate.max_window_len),
.compress => return error.UnsupportedCompressionMethod,
};
defer self.allocator.free(decompress_buffer);
var transfer_buffer: [64]u8 = undefined; log.debug("raw response body:\n{s}", .{resp_payload.items});
var decompress: std.http.Decompress = undefined;
const reader = response.readerDecompressing(&transfer_buffer, &decompress, decompress_buffer);
// Not sure on optimal size here, but should definitely be > 0
var aw = try std.Io.Writer.Allocating.initCapacity(self.allocator, 128);
defer aw.deinit();
const response_writer = &aw.writer;
_ = reader.streamRemaining(response_writer) catch |err| switch (err) {
error.ReadFailed => return response.bodyErr().?,
else => |e| return e,
};
log.debug("raw response body:\n{s}", .{aw.written()});
const rc = HttpResult{ const rc = HttpResult{
.response_code = @intFromEnum(response.head.status), .response_code = @intFromEnum(req.status),
.body = try aw.toOwnedSlice(), .body = try resp_payload.toOwnedSlice(),
.headers = try resp_headers.toOwnedSlice(self.allocator), .headers = try resp_headers.toOwnedSlice(),
.allocator = self.allocator, .allocator = self.allocator,
}; };
return rc; return rc;
@ -376,21 +305,15 @@ fn getRegion(service: []const u8, region: []const u8) []const u8 {
return region; return region;
} }
fn addHeaders( fn addHeaders(allocator: std.mem.Allocator, headers: *std.ArrayList(std.http.Header), host: []const u8, body: []const u8, content_type: []const u8, additional_headers: []const Header) !?[]const u8 {
allocator: std.mem.Allocator, // We don't need allocator and body because they were to add a
headers: *std.ArrayList(std.http.Header), // Content-Length header. But that is being added by the client send()
host: []const u8, // function, so we don't want it on the request twice. But I also feel
body: []const u8, // pretty strongly that send() should be providing us control, because
content_type: []const u8, // I think if we don't add it here, it won't get signed, and we would
additional_headers: []const Header, // really prefer it to be signed. So, we will wait and watch for this
) !?[]const u8 { // situation to change in stdlib
// We don't need body because they were to add a Content-Length header. But _ = allocator;
// that is being added by the client send() function, so we don't want it
// on the request twice. But I also feel pretty strongly that send() should
// be providing us control, because I think if we don't add it here, it
// won't get signed, and we would really prefer it to be signed. So, we
// will wait and watch for this situation to change in stdlib
_ = body; _ = body;
var has_content_type = false; var has_content_type = false;
for (additional_headers) |h| { for (additional_headers) |h| {
@ -399,12 +322,12 @@ fn addHeaders(
break; break;
} }
} }
try headers.append(allocator, .{ .name = "Accept", .value = "application/json" }); try headers.append(.{ .name = "Accept", .value = "application/json" });
try headers.append(allocator, .{ .name = "Host", .value = host }); try headers.append(.{ .name = "Host", .value = host });
try headers.append(allocator, .{ .name = "User-Agent", .value = "zig-aws 1.0" }); try headers.append(.{ .name = "User-Agent", .value = "zig-aws 1.0" });
if (!has_content_type) if (!has_content_type)
try headers.append(allocator, .{ .name = "Content-Type", .value = content_type }); try headers.append(.{ .name = "Content-Type", .value = content_type });
try headers.appendSlice(allocator, additional_headers); try headers.appendSlice(additional_headers);
return null; return null;
} }

View file

@ -157,7 +157,7 @@ pub const SigningError = error{
XAmzExpiresHeaderInRequest, XAmzExpiresHeaderInRequest,
/// Used if the request headers already includes x-amz-region-set /// Used if the request headers already includes x-amz-region-set
XAmzRegionSetHeaderInRequest, XAmzRegionSetHeaderInRequest,
} || error{OutOfMemory}; } || std.fmt.AllocPrintError;
const forbidden_headers = .{ const forbidden_headers = .{
.{ .name = "x-amz-content-sha256", .err = SigningError.XAmzContentSha256HeaderInRequest }, .{ .name = "x-amz-content-sha256", .err = SigningError.XAmzContentSha256HeaderInRequest },
@ -240,10 +240,6 @@ pub fn signRequest(allocator: std.mem.Allocator, request: base.Request, config:
// regardless of whether we're sticking the header on the request // regardless of whether we're sticking the header on the request
std.debug.assert(config.signed_body_header == .none or std.debug.assert(config.signed_body_header == .none or
config.signed_body_header == .sha256); config.signed_body_header == .sha256);
log.debug(
"Request body len: {d}. First 5 bytes (max): {s}",
.{ request.body.len, request.body[0..@min(request.body.len, 5)] },
);
const payload_hash = try hash(allocator, request.body, .sha256); const payload_hash = try hash(allocator, request.body, .sha256);
if (config.signed_body_header == .sha256) { if (config.signed_body_header == .sha256) {
// From the AWS nitro enclaves SDK, it appears that there is no reason // From the AWS nitro enclaves SDK, it appears that there is no reason
@ -316,12 +312,12 @@ pub fn signRequest(allocator: std.mem.Allocator, request: base.Request, config:
.name = "Authorization", .name = "Authorization",
.value = try std.fmt.allocPrint( .value = try std.fmt.allocPrint(
allocator, allocator,
"AWS4-HMAC-SHA256 Credential={s}/{s}, SignedHeaders={s}, Signature={x}", "AWS4-HMAC-SHA256 Credential={s}/{s}, SignedHeaders={s}, Signature={s}",
.{ .{
config.credentials.access_key, config.credentials.access_key,
scope, scope,
canonical_request.headers.signed_headers, canonical_request.headers.signed_headers,
signature, std.fmt.fmtSliceHexLower(signature),
}, },
), ),
}; };
@ -352,7 +348,7 @@ pub fn freeSignedRequest(allocator: std.mem.Allocator, request: *base.Request, c
pub const credentialsFn = *const fn ([]const u8) ?Credentials; pub const credentialsFn = *const fn ([]const u8) ?Credentials;
pub fn verifyServerRequest(allocator: std.mem.Allocator, request: *std.http.Server.Request, request_body_reader: *std.Io.Reader, credentials_fn: credentialsFn) !bool { pub fn verifyServerRequest(allocator: std.mem.Allocator, request: *std.http.Server.Request, request_body_reader: anytype, credentials_fn: credentialsFn) !bool {
var unverified_request = try UnverifiedRequest.init(allocator, request); var unverified_request = try UnverifiedRequest.init(allocator, request);
defer unverified_request.deinit(); defer unverified_request.deinit();
return verify(allocator, unverified_request, request_body_reader, credentials_fn); return verify(allocator, unverified_request, request_body_reader, credentials_fn);
@ -363,19 +359,17 @@ pub const UnverifiedRequest = struct {
target: []const u8, target: []const u8,
method: std.http.Method, method: std.http.Method,
allocator: std.mem.Allocator, allocator: std.mem.Allocator,
raw: *std.http.Server.Request,
pub fn init(allocator: std.mem.Allocator, request: *std.http.Server.Request) !UnverifiedRequest { pub fn init(allocator: std.mem.Allocator, request: *std.http.Server.Request) !UnverifiedRequest {
var al = std.ArrayList(std.http.Header){}; var al = std.ArrayList(std.http.Header).init(allocator);
defer al.deinit(allocator); defer al.deinit();
var it = request.iterateHeaders(); var it = request.iterateHeaders();
while (it.next()) |h| try al.append(allocator, h); while (it.next()) |h| try al.append(h);
return .{ return .{
.target = request.head.target, .target = request.head.target,
.method = request.head.method, .method = request.head.method,
.headers = try al.toOwnedSlice(allocator), .headers = try al.toOwnedSlice(),
.allocator = allocator, .allocator = allocator,
.raw = request,
}; };
} }
@ -393,7 +387,7 @@ pub const UnverifiedRequest = struct {
} }
}; };
pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_body_reader: *std.Io.Reader, credentials_fn: credentialsFn) !bool { pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_body_reader: anytype, credentials_fn: credentialsFn) !bool {
var arena = std.heap.ArenaAllocator.init(allocator); var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit(); defer arena.deinit();
const aa = arena.allocator(); const aa = arena.allocator();
@ -426,10 +420,10 @@ pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_
return verifyParsedAuthorization( return verifyParsedAuthorization(
aa, aa,
request, request,
request_body_reader,
credential.?, credential.?,
signed_headers.?, signed_headers.?,
signature.?, signature.?,
request_body_reader,
credentials_fn, credentials_fn,
); );
} }
@ -437,10 +431,10 @@ pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_
fn verifyParsedAuthorization( fn verifyParsedAuthorization(
allocator: std.mem.Allocator, allocator: std.mem.Allocator,
request: UnverifiedRequest, request: UnverifiedRequest,
request_body_reader: anytype,
credential: []const u8, credential: []const u8,
signed_headers: []const u8, signed_headers: []const u8,
signature: []const u8, signature: []const u8,
request_body_reader: *std.Io.Reader,
credentials_fn: credentialsFn, credentials_fn: credentialsFn,
) !bool { ) !bool {
// AWS4-HMAC-SHA256 // AWS4-HMAC-SHA256
@ -500,7 +494,7 @@ fn verifyParsedAuthorization(
.content_type = request.getFirstHeaderValue("content-type").?, .content_type = request.getFirstHeaderValue("content-type").?,
}; };
signed_request.query = request.target[signed_request.path.len..]; // TODO: should this be +1? query here would include '?' signed_request.query = request.target[signed_request.path.len..]; // TODO: should this be +1? query here would include '?'
signed_request.body = try request_body_reader.allocRemaining(allocator, .unlimited); signed_request.body = try request_body_reader.readAllAlloc(allocator, std.math.maxInt(usize));
defer allocator.free(signed_request.body); defer allocator.free(signed_request.body);
signed_request = try signRequest(allocator, signed_request, config); signed_request = try signRequest(allocator, signed_request, config);
defer freeSignedRequest(allocator, &signed_request, config); defer freeSignedRequest(allocator, &signed_request, config);
@ -551,7 +545,7 @@ fn getSigningKey(allocator: std.mem.Allocator, signing_date: []const u8, config:
defer { defer {
// secureZero avoids compiler optimizations that may say // secureZero avoids compiler optimizations that may say
// "WTF are you doing this thing? Looks like nothing to me. It's silly and we will remove it" // "WTF are you doing this thing? Looks like nothing to me. It's silly and we will remove it"
std.crypto.secureZero(u8, secret); // zero our copy of secret std.crypto.utils.secureZero(u8, secret); // zero our copy of secret
allocator.free(secret); allocator.free(secret);
} }
// log.debug("secret: {s}", .{secret}); // log.debug("secret: {s}", .{secret});
@ -679,7 +673,7 @@ fn canonicalUri(allocator: std.mem.Allocator, path: []const u8, double_encode: b
fn encodeParamPart(allocator: std.mem.Allocator, path: []const u8) ![]const u8 { fn encodeParamPart(allocator: std.mem.Allocator, path: []const u8) ![]const u8 {
const unreserved_marks = "-_.!~*'()"; const unreserved_marks = "-_.!~*'()";
var encoded = try std.ArrayList(u8).initCapacity(allocator, path.len); var encoded = try std.ArrayList(u8).initCapacity(allocator, path.len);
defer encoded.deinit(allocator); defer encoded.deinit();
for (path) |c| { for (path) |c| {
var should_encode = true; var should_encode = true;
for (unreserved_marks) |r| for (unreserved_marks) |r|
@ -691,16 +685,16 @@ fn encodeParamPart(allocator: std.mem.Allocator, path: []const u8) ![]const u8 {
should_encode = false; should_encode = false;
if (!should_encode) { if (!should_encode) {
try encoded.append(allocator, c); try encoded.append(c);
continue; continue;
} }
// Whatever remains, encode it // Whatever remains, encode it
try encoded.append(allocator, '%'); try encoded.append('%');
const hex = try std.fmt.allocPrint(allocator, "{X}", .{&[_]u8{c}}); const hex = try std.fmt.allocPrint(allocator, "{s}", .{std.fmt.fmtSliceHexUpper(&[_]u8{c})});
defer allocator.free(hex); defer allocator.free(hex);
try encoded.appendSlice(allocator, hex); try encoded.appendSlice(hex);
} }
return encoded.toOwnedSlice(allocator); return encoded.toOwnedSlice();
} }
// URI encode every byte except the unreserved characters: // URI encode every byte except the unreserved characters:
@ -721,7 +715,7 @@ fn encodeUri(allocator: std.mem.Allocator, path: []const u8) ![]u8 {
const reserved_characters = ";,/?:@&=+$#"; const reserved_characters = ";,/?:@&=+$#";
const unreserved_marks = "-_.!~*'()"; const unreserved_marks = "-_.!~*'()";
var encoded = try std.ArrayList(u8).initCapacity(allocator, path.len); var encoded = try std.ArrayList(u8).initCapacity(allocator, path.len);
defer encoded.deinit(allocator); defer encoded.deinit();
// if (std.mem.startsWith(u8, path, "/2017-03-31/tags/arn")) { // if (std.mem.startsWith(u8, path, "/2017-03-31/tags/arn")) {
// try encoded.appendSlice("/2017-03-31/tags/arn%25253Aaws%25253Alambda%25253Aus-west-2%25253A550620852718%25253Afunction%25253Aawsome-lambda-LambdaStackawsomeLambda"); // try encoded.appendSlice("/2017-03-31/tags/arn%25253Aaws%25253Alambda%25253Aus-west-2%25253A550620852718%25253Afunction%25253Aawsome-lambda-LambdaStackawsomeLambda");
// return encoded.toOwnedSlice(); // return encoded.toOwnedSlice();
@ -744,16 +738,16 @@ fn encodeUri(allocator: std.mem.Allocator, path: []const u8) ![]u8 {
should_encode = false; should_encode = false;
if (!should_encode) { if (!should_encode) {
try encoded.append(allocator, c); try encoded.append(c);
continue; continue;
} }
// Whatever remains, encode it // Whatever remains, encode it
try encoded.append(allocator, '%'); try encoded.append('%');
const hex = try std.fmt.allocPrint(allocator, "{X}", .{&[_]u8{c}}); const hex = try std.fmt.allocPrint(allocator, "{s}", .{std.fmt.fmtSliceHexUpper(&[_]u8{c})});
defer allocator.free(hex); defer allocator.free(hex);
try encoded.appendSlice(allocator, hex); try encoded.appendSlice(hex);
} }
return encoded.toOwnedSlice(allocator); return encoded.toOwnedSlice();
} }
fn canonicalQueryString(allocator: std.mem.Allocator, path: []const u8) ![]const u8 { fn canonicalQueryString(allocator: std.mem.Allocator, path: []const u8) ![]const u8 {
@ -806,25 +800,25 @@ fn canonicalQueryString(allocator: std.mem.Allocator, path: []const u8) ![]const
// Split this by component // Split this by component
var portions = std.mem.splitScalar(u8, query, '&'); var portions = std.mem.splitScalar(u8, query, '&');
var sort_me = std.ArrayList([]const u8){}; var sort_me = std.ArrayList([]const u8).init(allocator);
defer sort_me.deinit(allocator); defer sort_me.deinit();
while (portions.next()) |item| while (portions.next()) |item|
try sort_me.append(allocator, item); try sort_me.append(item);
std.sort.pdq([]const u8, sort_me.items, {}, lessThanBinary); std.sort.pdq([]const u8, sort_me.items, {}, lessThanBinary);
var normalized = try std.ArrayList(u8).initCapacity(allocator, path.len); var normalized = try std.ArrayList(u8).initCapacity(allocator, path.len);
defer normalized.deinit(allocator); defer normalized.deinit();
var first = true; var first = true;
for (sort_me.items) |i| { for (sort_me.items) |i| {
if (!first) try normalized.append(allocator, '&'); if (!first) try normalized.append('&');
first = false; first = false;
const first_equals = std.mem.indexOf(u8, i, "="); const first_equals = std.mem.indexOf(u8, i, "=");
if (first_equals == null) { if (first_equals == null) {
// Rare. This is "foo=" // Rare. This is "foo="
const normed_item = try encodeUri(allocator, i); const normed_item = try encodeUri(allocator, i);
defer allocator.free(normed_item); defer allocator.free(normed_item);
try normalized.appendSlice(allocator, i); // This should be encoded try normalized.appendSlice(i); // This should be encoded
try normalized.append(allocator, '='); try normalized.append('=');
continue; continue;
} }
@ -837,12 +831,12 @@ fn canonicalQueryString(allocator: std.mem.Allocator, path: []const u8) ![]const
// Double-encode any = in the value. But not anything else? // Double-encode any = in the value. But not anything else?
const weird_equals_in_value_thing = try replace(allocator, value, "%3D", "%253D"); const weird_equals_in_value_thing = try replace(allocator, value, "%3D", "%253D");
defer allocator.free(weird_equals_in_value_thing); defer allocator.free(weird_equals_in_value_thing);
try normalized.appendSlice(allocator, key); try normalized.appendSlice(key);
try normalized.append(allocator, '='); try normalized.append('=');
try normalized.appendSlice(allocator, weird_equals_in_value_thing); try normalized.appendSlice(weird_equals_in_value_thing);
} }
return normalized.toOwnedSlice(allocator); return normalized.toOwnedSlice();
} }
fn replace(allocator: std.mem.Allocator, haystack: []const u8, needle: []const u8, replacement_value: []const u8) ![]const u8 { fn replace(allocator: std.mem.Allocator, haystack: []const u8, needle: []const u8, replacement_value: []const u8) ![]const u8 {
@ -881,7 +875,7 @@ fn canonicalHeaders(allocator: std.mem.Allocator, headers: []const std.http.Head
allocator.free(h.name); allocator.free(h.name);
allocator.free(h.value); allocator.free(h.value);
} }
dest.deinit(allocator); dest.deinit();
} }
var total_len: usize = 0; var total_len: usize = 0;
var total_name_len: usize = 0; var total_name_len: usize = 0;
@ -911,15 +905,15 @@ fn canonicalHeaders(allocator: std.mem.Allocator, headers: []const std.http.Head
defer allocator.free(value); defer allocator.free(value);
const n = try std.ascii.allocLowerString(allocator, h.name); const n = try std.ascii.allocLowerString(allocator, h.name);
const v = try std.fmt.allocPrint(allocator, "{s}", .{value}); const v = try std.fmt.allocPrint(allocator, "{s}", .{value});
try dest.append(allocator, .{ .name = n, .value = v }); try dest.append(.{ .name = n, .value = v });
} }
std.sort.pdq(std.http.Header, dest.items, {}, lessThan); std.sort.pdq(std.http.Header, dest.items, {}, lessThan);
var dest_str = try std.ArrayList(u8).initCapacity(allocator, total_len); var dest_str = try std.ArrayList(u8).initCapacity(allocator, total_len);
defer dest_str.deinit(allocator); defer dest_str.deinit();
var signed_headers = try std.ArrayList(u8).initCapacity(allocator, total_name_len); var signed_headers = try std.ArrayList(u8).initCapacity(allocator, total_name_len);
defer signed_headers.deinit(allocator); defer signed_headers.deinit();
var first = true; var first = true;
for (dest.items) |h| { for (dest.items) |h| {
dest_str.appendSliceAssumeCapacity(h.name); dest_str.appendSliceAssumeCapacity(h.name);
@ -932,8 +926,8 @@ fn canonicalHeaders(allocator: std.mem.Allocator, headers: []const std.http.Head
signed_headers.appendSliceAssumeCapacity(h.name); signed_headers.appendSliceAssumeCapacity(h.name);
} }
return CanonicalHeaders{ return CanonicalHeaders{
.str = try dest_str.toOwnedSlice(allocator), .str = try dest_str.toOwnedSlice(),
.signed_headers = try signed_headers.toOwnedSlice(allocator), .signed_headers = try signed_headers.toOwnedSlice(),
}; };
} }
@ -978,7 +972,7 @@ fn hash(allocator: std.mem.Allocator, payload: []const u8, sig_type: SignatureTy
}; };
var out: [std.crypto.hash.sha2.Sha256.digest_length]u8 = undefined; var out: [std.crypto.hash.sha2.Sha256.digest_length]u8 = undefined;
std.crypto.hash.sha2.Sha256.hash(to_hash, &out, .{}); std.crypto.hash.sha2.Sha256.hash(to_hash, &out, .{});
return try std.fmt.allocPrint(allocator, "{x}", .{out}); return try std.fmt.allocPrint(allocator, "{s}", .{std.fmt.fmtSliceHexLower(&out)});
} }
// SignedHeaders + '\n' + // SignedHeaders + '\n' +
// HexEncode(Hash(RequestPayload)) // HexEncode(Hash(RequestPayload))
@ -1016,13 +1010,13 @@ test "canonical query" {
test "canonical headers" { test "canonical headers" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5); var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5);
defer headers.deinit(allocator); defer headers.deinit();
try headers.append(allocator, .{ .name = "Host", .value = "iam.amazonaws.com" }); try headers.append(.{ .name = "Host", .value = "iam.amazonaws.com" });
try headers.append(allocator, .{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" }); try headers.append(.{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" });
try headers.append(allocator, .{ .name = "User-Agent", .value = "This header should be skipped" }); try headers.append(.{ .name = "User-Agent", .value = "This header should be skipped" });
try headers.append(allocator, .{ .name = "My-header1", .value = " a b c " }); try headers.append(.{ .name = "My-header1", .value = " a b c " });
try headers.append(allocator, .{ .name = "X-Amz-Date", .value = "20150830T123600Z" }); try headers.append(.{ .name = "X-Amz-Date", .value = "20150830T123600Z" });
try headers.append(allocator, .{ .name = "My-header2", .value = " \"a b c\" " }); try headers.append(.{ .name = "My-header2", .value = " \"a b c\" " });
const expected = const expected =
\\content-type:application/x-www-form-urlencoded; charset=utf-8 \\content-type:application/x-www-form-urlencoded; charset=utf-8
\\host:iam.amazonaws.com \\host:iam.amazonaws.com
@ -1041,12 +1035,12 @@ test "canonical headers" {
test "canonical request" { test "canonical request" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5); var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5);
defer headers.deinit(allocator); defer headers.deinit();
try headers.append(allocator, .{ .name = "User-agent", .value = "c sdk v1.0" }); try headers.append(.{ .name = "User-agent", .value = "c sdk v1.0" });
// In contrast to AWS CRT (aws-c-auth), we add the date as part of the // In contrast to AWS CRT (aws-c-auth), we add the date as part of the
// signing operation. They add it as part of the canonicalization // signing operation. They add it as part of the canonicalization
try headers.append(allocator, .{ .name = "X-Amz-Date", .value = "20150830T123600Z" }); try headers.append(.{ .name = "X-Amz-Date", .value = "20150830T123600Z" });
try headers.append(allocator, .{ .name = "Host", .value = "example.amazonaws.com" }); try headers.append(.{ .name = "Host", .value = "example.amazonaws.com" });
const req = base.Request{ const req = base.Request{
.path = "/", .path = "/",
.method = "GET", .method = "GET",
@ -1101,10 +1095,10 @@ test "can sign" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5); var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5);
defer headers.deinit(allocator); defer headers.deinit();
try headers.append(allocator, .{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" }); try headers.append(.{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" });
try headers.append(allocator, .{ .name = "Content-Length", .value = "13" }); try headers.append(.{ .name = "Content-Length", .value = "13" });
try headers.append(allocator, .{ .name = "Host", .value = "example.amazonaws.com" }); try headers.append(.{ .name = "Host", .value = "example.amazonaws.com" });
const req = base.Request{ const req = base.Request{
.path = "/", .path = "/",
.query = "", .query = "",
@ -1171,27 +1165,25 @@ test "can verify server request" {
"X-Amz-Date: 20230908T170252Z\r\n" ++ "X-Amz-Date: 20230908T170252Z\r\n" ++
"x-amz-content-sha256: fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9\r\n" ++ "x-amz-content-sha256: fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9\r\n" ++
"Authorization: AWS4-HMAC-SHA256 Credential=ACCESS/20230908/us-west-2/s3/aws4_request, SignedHeaders=accept;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class, Signature=fcc43ce73a34c9bd1ddf17e8a435f46a859812822f944f9eeb2aabcd64b03523\r\n\r\nbar"; "Authorization: AWS4-HMAC-SHA256 Credential=ACCESS/20230908/us-west-2/s3/aws4_request, SignedHeaders=accept;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class, Signature=fcc43ce73a34c9bd1ddf17e8a435f46a859812822f944f9eeb2aabcd64b03523\r\n\r\nbar";
var reader = std.Io.Reader.fixed(req); var read_buffer: [1024]u8 = undefined;
var body_reader = std.Io.Reader.fixed("bar"); @memcpy(read_buffer[0..req.len], req);
var server: std.http.Server = .{ var server: std.http.Server = .{
.out = undefined, // We're not sending a response here .connection = undefined,
.reader = .{ .state = .ready,
.in = &reader, .read_buffer = &read_buffer,
.interface = undefined, .read_buffer_len = req.len,
.state = .received_head, .next_request_start = 0,
.max_head_len = req.len,
},
}; };
var request: std.http.Server.Request = .{ var request: std.http.Server.Request = .{
.server = &server, .server = &server,
.head = try std.http.Server.Request.Head.parse(req), .head_end = req.len - 3,
.head_buffer = req, .head = try std.http.Server.Request.Head.parse(read_buffer[0 .. req.len - 3]),
.reader_state = undefined,
}; };
// const old_level = std.testing.log_level;
// std.testing.log_level = .debug; // std.testing.log_level = .debug;
// defer std.testing.log_level = old_level; var fbs = std.io.fixedBufferStream("bar");
try std.testing.expect(try verifyServerRequest(allocator, &request, &body_reader, struct { try std.testing.expect(try verifyServerRequest(allocator, &request, fbs.reader(), struct {
cred: Credentials, cred: Credentials,
const Self = @This(); const Self = @This();
@ -1229,25 +1221,22 @@ test "can verify server request without x-amz-content-sha256" {
const req_data = head ++ body; const req_data = head ++ body;
var read_buffer: [2048]u8 = undefined; var read_buffer: [2048]u8 = undefined;
@memcpy(read_buffer[0..req_data.len], req_data); @memcpy(read_buffer[0..req_data.len], req_data);
var reader = std.Io.Reader.fixed(&read_buffer);
var body_reader = std.Io.Reader.fixed(body);
var server: std.http.Server = .{ var server: std.http.Server = .{
.out = undefined, // We're not sending a response here .connection = undefined,
.reader = .{ .state = .ready,
.interface = undefined, .read_buffer = &read_buffer,
.in = &reader, .read_buffer_len = req_data.len,
.state = .received_head, .next_request_start = 0,
.max_head_len = 1024,
},
}; };
var request: std.http.Server.Request = .{ var request: std.http.Server.Request = .{
.server = &server, .server = &server,
.head = try std.http.Server.Request.Head.parse(head), .head_end = head.len,
.head_buffer = head, .head = try std.http.Server.Request.Head.parse(read_buffer[0..head.len]),
.reader_state = undefined,
}; };
{ {
var h = try std.ArrayList(std.http.Header).initCapacity(allocator, 4); var h = std.ArrayList(std.http.Header).init(allocator);
defer h.deinit(allocator); defer h.deinit();
const signed_headers = &[_][]const u8{ "content-type", "host", "x-amz-date", "x-amz-target" }; const signed_headers = &[_][]const u8{ "content-type", "host", "x-amz-date", "x-amz-target" };
var it = request.iterateHeaders(); var it = request.iterateHeaders();
while (it.next()) |source| { while (it.next()) |source| {
@ -1256,7 +1245,7 @@ test "can verify server request without x-amz-content-sha256" {
match = std.ascii.eqlIgnoreCase(s, source.name); match = std.ascii.eqlIgnoreCase(s, source.name);
if (match) break; if (match) break;
} }
if (match) try h.append(allocator, .{ .name = source.name, .value = source.value }); if (match) try h.append(.{ .name = source.name, .value = source.value });
} }
const req = base.Request{ const req = base.Request{
.path = "/", .path = "/",
@ -1293,7 +1282,9 @@ test "can verify server request without x-amz-content-sha256" {
} }
{ // verification { // verification
try std.testing.expect(try verifyServerRequest(allocator, &request, &body_reader, struct { var fis = std.io.fixedBufferStream(body[0..]);
try std.testing.expect(try verifyServerRequest(allocator, &request, fis.reader(), struct {
cred: Credentials, cred: Credentials,
const Self = @This(); const Self = @This();

File diff suppressed because it is too large Load diff

47
src/case.zig Normal file
View file

@ -0,0 +1,47 @@
const std = @import("std");
const expectEqualStrings = std.testing.expectEqualStrings;
pub fn snakeToCamel(allocator: std.mem.Allocator, name: []const u8) ![]u8 {
var utf8_name = (std.unicode.Utf8View.init(name) catch unreachable).iterator();
var target_inx: usize = 0;
var previous_ascii: u8 = 0;
var rc = try allocator.alloc(u8, name.len);
while (utf8_name.nextCodepoint()) |cp| {
if (cp > 0xff) return error.UnicodeNotSupported;
const ascii_char: u8 = @truncate(cp);
if (ascii_char != '_') {
if (previous_ascii == '_' and ascii_char >= 'a' and ascii_char <= 'z') {
const uppercase_char = ascii_char - ('a' - 'A');
rc[target_inx] = uppercase_char;
} else {
rc[target_inx] = ascii_char;
}
target_inx = target_inx + 1;
}
previous_ascii = ascii_char;
}
// Do we care if the allocator refuses resize?
_ = allocator.resize(rc, target_inx);
return rc[0..target_inx];
}
pub fn snakeToPascal(allocator: std.mem.Allocator, name: []const u8) ![]u8 {
const rc = try snakeToCamel(allocator, name);
if (rc[0] >= 'a' and rc[0] <= 'z') {
const uppercase_char = rc[0] - ('a' - 'A');
rc[0] = uppercase_char;
}
return rc;
}
test "converts from snake to camelCase" {
const allocator = std.testing.allocator;
const camel = try snakeToCamel(allocator, "access_key_id");
defer allocator.free(camel);
try expectEqualStrings("accessKeyId", camel);
}
test "single word" {
const allocator = std.testing.allocator;
const camel = try snakeToCamel(allocator, "word");
defer allocator.free(camel);
try expectEqualStrings("word", camel);
}

View file

@ -34,8 +34,7 @@ pub fn log(
// Print the message to stderr, silently ignoring any errors // Print the message to stderr, silently ignoring any errors
std.debug.lockStdErr(); std.debug.lockStdErr();
defer std.debug.unlockStdErr(); defer std.debug.unlockStdErr();
var stderr_writer = std.fs.File.stderr().writer(&.{}); const stderr = std.io.getStdErr().writer();
const stderr = &stderr_writer.interface;
nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return; nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return;
} }
@ -63,14 +62,14 @@ pub fn main() anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){}; var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit(); defer _ = gpa.deinit();
const allocator = gpa.allocator(); const allocator = gpa.allocator();
var tests = try std.ArrayList(Tests).initCapacity(allocator, @typeInfo(Tests).@"enum".fields.len); var tests = std.ArrayList(Tests).init(allocator);
defer tests.deinit(allocator); defer tests.deinit();
var args = try std.process.argsWithAllocator(allocator); var args = try std.process.argsWithAllocator(allocator);
defer args.deinit(); defer args.deinit();
var stdout_buf: [4096]u8 = undefined; const stdout_raw = std.io.getStdOut().writer();
const stdout_raw = std.fs.File.stdout().writer(&stdout_buf); var bw = std.io.bufferedWriter(stdout_raw);
var stdout = stdout_raw.interface; defer bw.flush() catch unreachable;
defer stdout.flush() catch @panic("could not flush stdout"); const stdout = bw.writer();
var arg0: ?[]const u8 = null; var arg0: ?[]const u8 = null;
var proxy: ?std.http.Client.Proxy = null; var proxy: ?std.http.Client.Proxy = null;
while (args.next()) |arg| { while (args.next()) |arg| {
@ -100,14 +99,14 @@ pub fn main() anyerror!void {
} }
inline for (@typeInfo(Tests).@"enum".fields) |f| { inline for (@typeInfo(Tests).@"enum".fields) |f| {
if (std.mem.eql(u8, f.name, arg)) { if (std.mem.eql(u8, f.name, arg)) {
try tests.append(allocator, @field(Tests, f.name)); try tests.append(@field(Tests, f.name));
break; break;
} }
} }
} }
if (tests.items.len == 0) { if (tests.items.len == 0) {
inline for (@typeInfo(Tests).@"enum".fields) |f| inline for (@typeInfo(Tests).@"enum".fields) |f|
try tests.append(allocator, @field(Tests, f.name)); try tests.append(@field(Tests, f.name));
} }
std.log.info("Start\n", .{}); std.log.info("Start\n", .{});
@ -194,7 +193,7 @@ pub fn main() anyerror!void {
const arn = func.function_arn.?; const arn = func.function_arn.?;
// This is a bit ugly. Maybe a helper function in the library would help? // This is a bit ugly. Maybe a helper function in the library would help?
var tags = try std.ArrayList(aws.services.lambda.TagKeyValue).initCapacity(allocator, 1); var tags = try std.ArrayList(aws.services.lambda.TagKeyValue).initCapacity(allocator, 1);
defer tags.deinit(allocator); defer tags.deinit();
tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" }); tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" });
const req = services.lambda.tag_resource.Request{ .resource = arn, .tags = tags.items }; const req = services.lambda.tag_resource.Request{ .resource = arn, .tags = tags.items };
const addtag = try aws.Request(services.lambda.tag_resource).call(req, options); const addtag = try aws.Request(services.lambda.tag_resource).call(req, options);
@ -263,7 +262,7 @@ pub fn main() anyerror!void {
defer result.deinit(); defer result.deinit();
std.log.info("request id: {s}", .{result.response_metadata.request_id}); std.log.info("request id: {s}", .{result.response_metadata.request_id});
const list = result.response.key_group_list.?; const list = result.response.key_group_list.?;
std.log.info("key group list max: {d}", .{list.max_items}); std.log.info("key group list max: {?d}", .{list.max_items});
std.log.info("key group quantity: {d}", .{list.quantity}); std.log.info("key group quantity: {d}", .{list.quantity});
}, },
.rest_xml_work_with_s3 => { .rest_xml_work_with_s3 => {

View file

@ -14,7 +14,7 @@ pub fn Services(comptime service_imports: anytype) type {
.type = @TypeOf(import_field), .type = @TypeOf(import_field),
.default_value_ptr = &import_field, .default_value_ptr = &import_field,
.is_comptime = false, .is_comptime = false,
.alignment = std.meta.alignment(@TypeOf(import_field)), .alignment = 0,
}; };
} }

View file

@ -11,7 +11,7 @@ pub const EncodingOptions = struct {
field_name_transformer: fieldNameTransformerFn = defaultTransformer, field_name_transformer: fieldNameTransformerFn = defaultTransformer,
}; };
pub fn encode(allocator: std.mem.Allocator, obj: anytype, writer: *std.Io.Writer, comptime options: EncodingOptions) !void { pub fn encode(allocator: std.mem.Allocator, obj: anytype, writer: anytype, comptime options: EncodingOptions) !void {
_ = try encodeInternal(allocator, "", "", true, obj, writer, options); _ = try encodeInternal(allocator, "", "", true, obj, writer, options);
} }
@ -20,7 +20,7 @@ fn encodeStruct(
parent: []const u8, parent: []const u8,
first: bool, first: bool,
obj: anytype, obj: anytype,
writer: *std.Io.Writer, writer: anytype,
comptime options: EncodingOptions, comptime options: EncodingOptions,
) !bool { ) !bool {
var rc = first; var rc = first;
@ -41,7 +41,7 @@ pub fn encodeInternal(
field_name: []const u8, field_name: []const u8,
first: bool, first: bool,
obj: anytype, obj: anytype,
writer: *std.Io.Writer, writer: anytype,
comptime options: EncodingOptions, comptime options: EncodingOptions,
) !bool { ) !bool {
// @compileLog(@typeName(@TypeOf(obj))); // @compileLog(@typeName(@TypeOf(obj)));
@ -56,19 +56,10 @@ pub fn encodeInternal(
} else { } else {
if (!first) _ = try writer.write("&"); if (!first) _ = try writer.write("&");
// @compileLog(@typeInfo(@TypeOf(obj))); // @compileLog(@typeInfo(@TypeOf(obj)));
switch (ti.child) { if (ti.child == []const u8 or ti.child == u8)
// TODO: not sure this first one is valid. How should [][]const u8 be serialized here? try writer.print("{s}{s}={s}", .{ parent, field_name, obj })
[]const u8 => { else
// if (true) @panic("panic at the disco!"); try writer.print("{s}{s}={any}", .{ parent, field_name, obj });
std.log.warn(
"encoding object of type [][]const u8...pretty sure this is wrong {s}{s}={any}",
.{ parent, field_name, obj },
);
try writer.print("{s}{s}={any}", .{ parent, field_name, obj });
},
u8 => try writer.print("{s}{s}={s}", .{ parent, field_name, obj }),
else => try writer.print("{s}{s}={any}", .{ parent, field_name, obj }),
}
rc = false; rc = false;
}, },
.@"struct" => if (std.mem.eql(u8, "", field_name)) { .@"struct" => if (std.mem.eql(u8, "", field_name)) {
@ -104,29 +95,78 @@ pub fn encodeInternal(
return rc; return rc;
} }
fn testencode(allocator: std.mem.Allocator, expected: []const u8, value: anytype, comptime options: EncodingOptions) !void {
const ValidationWriter = struct {
const Self = @This();
pub const Writer = std.io.Writer(*Self, Error, write);
pub const Error = error{
TooMuchData,
DifferentData,
};
expected_remaining: []const u8,
fn init(exp: []const u8) Self {
return .{ .expected_remaining = exp };
}
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
fn write(self: *Self, bytes: []const u8) Error!usize {
// std.debug.print("{s}\n", .{bytes});
if (self.expected_remaining.len < bytes.len) {
std.log.warn(
\\====== expected this output: =========
\\{s}
\\======== instead found this: =========
\\{s}
\\======================================
, .{
self.expected_remaining,
bytes,
});
return error.TooMuchData;
}
if (!std.mem.eql(u8, self.expected_remaining[0..bytes.len], bytes)) {
std.log.warn(
\\====== expected this output: =========
\\{s}
\\======== instead found this: =========
\\{s}
\\======================================
, .{
self.expected_remaining[0..bytes.len],
bytes,
});
return error.DifferentData;
}
self.expected_remaining = self.expected_remaining[bytes.len..];
return bytes.len;
}
};
var vos = ValidationWriter.init(expected);
try encode(allocator, value, vos.writer(), options);
if (vos.expected_remaining.len > 0) return error.NotEnoughData;
}
test "can urlencode an object" { test "can urlencode an object" {
const expected = "Action=GetCallerIdentity&Version=2021-01-01"; try testencode(
var aw = std.Io.Writer.Allocating.init(std.testing.allocator);
defer aw.deinit();
try encode(
std.testing.allocator, std.testing.allocator,
"Action=GetCallerIdentity&Version=2021-01-01",
.{ .Action = "GetCallerIdentity", .Version = "2021-01-01" }, .{ .Action = "GetCallerIdentity", .Version = "2021-01-01" },
&aw.writer,
.{}, .{},
); );
try std.testing.expectEqualStrings(expected, aw.written());
} }
test "can urlencode an object with integer" { test "can urlencode an object with integer" {
const expected = "Action=GetCallerIdentity&Duration=32"; try testencode(
var aw = std.Io.Writer.Allocating.init(std.testing.allocator);
defer aw.deinit();
try encode(
std.testing.allocator, std.testing.allocator,
"Action=GetCallerIdentity&Duration=32",
.{ .Action = "GetCallerIdentity", .Duration = 32 }, .{ .Action = "GetCallerIdentity", .Duration = 32 },
&aw.writer,
.{}, .{},
); );
try std.testing.expectEqualStrings(expected, aw.written());
} }
const UnsetValues = struct { const UnsetValues = struct {
action: ?[]const u8 = null, action: ?[]const u8 = null,
@ -135,28 +175,30 @@ const UnsetValues = struct {
val2: ?[]const u8 = null, val2: ?[]const u8 = null,
}; };
test "can urlencode an object with unset values" { test "can urlencode an object with unset values" {
const expected = "action=GetCallerIdentity&duration=32"; // var buffer = std.ArrayList(u8).init(std.testing.allocator);
var aw = std.Io.Writer.Allocating.init(std.testing.allocator); // defer buffer.deinit();
defer aw.deinit(); // const writer = buffer.writer();
try encode( // try encode(
// std.testing.allocator,
// UnsetValues{ .action = "GetCallerIdentity", .duration = 32 },
// writer,
// .{},
// );
// std.debug.print("\n\nEncoded as '{s}'\n", .{buffer.items});
try testencode(
std.testing.allocator, std.testing.allocator,
"action=GetCallerIdentity&duration=32",
UnsetValues{ .action = "GetCallerIdentity", .duration = 32 }, UnsetValues{ .action = "GetCallerIdentity", .duration = 32 },
&aw.writer,
.{}, .{},
); );
try std.testing.expectEqualStrings(expected, aw.written());
} }
test "can urlencode a complex object" { test "can urlencode a complex object" {
const expected = "Action=GetCallerIdentity&Version=2021-01-01&complex.innermember=foo"; try testencode(
var aw = std.Io.Writer.Allocating.init(std.testing.allocator);
defer aw.deinit();
try encode(
std.testing.allocator, std.testing.allocator,
"Action=GetCallerIdentity&Version=2021-01-01&complex.innermember=foo",
.{ .Action = "GetCallerIdentity", .Version = "2021-01-01", .complex = .{ .innermember = "foo" } }, .{ .Action = "GetCallerIdentity", .Version = "2021-01-01", .complex = .{ .innermember = "foo" } },
&aw.writer,
.{}, .{},
); );
try std.testing.expectEqualStrings(expected, aw.written());
} }
const Filter = struct { const Filter = struct {
@ -179,28 +221,26 @@ const Request: type = struct {
all_regions: ?bool = null, all_regions: ?bool = null,
}; };
test "can urlencode an EC2 Filter" { test "can urlencode an EC2 Filter" {
// TODO: This is a strange test, mainly to document current behavior // TODO: Fix this encoding...
// EC2 filters are supposed to be something like testencode(
// Filter.Name=foo&Filter.Values=bar or, when there is more, something like
// Filter.1.Name=instance-type&Filter.1.Value.1=m1.small&Filter.1.Value.2=m1.large&Filter.2.Name=block-device-mapping.status&Filter.2.Value.1=attached
//
// This looks like a real PITA, so until it is actually needed, this is
// a placeholder test to track what actual encoding is happening. This
// changed between zig 0.14.x and 0.15.1, and I'm not entirely sure why
// yet, but because the remaining functionality is fine, we're going with
// this
const zig_14x_expected = "filters={ url.Filter{ .name = { 102, 111, 111 }, .values = { { ... } } } }";
_ = zig_14x_expected;
const expected = "filters={ .{ .name = { 102, 111, 111 }, .values = { { ... } } } }";
var aw = std.Io.Writer.Allocating.init(std.testing.allocator);
defer aw.deinit();
try encode(
std.testing.allocator, std.testing.allocator,
"filters={ url.Filter{ .name = { 102, 111, 111 }, .values = { { ... } } } }",
Request{ Request{
.filters = @constCast(&[_]Filter{.{ .name = "foo", .values = @constCast(&[_][]const u8{"bar"}) }}), .filters = @constCast(&[_]Filter{.{ .name = "foo", .values = @constCast(&[_][]const u8{"bar"}) }}),
}, },
&aw.writer,
.{}, .{},
); ) catch |err| {
try std.testing.expectEqualStrings(expected, aw.written()); var al = std.ArrayList(u8).init(std.testing.allocator);
defer al.deinit();
try encode(
std.testing.allocator,
Request{
.filters = @constCast(&[_]Filter{.{ .name = "foo", .values = @constCast(&[_][]const u8{"bar"}) }}),
},
al.writer(),
.{},
);
std.log.warn("Error found. Full encoding is '{s}'", .{al.items});
return err;
};
} }

View file

@ -26,14 +26,12 @@ pub const Element = struct {
attributes: AttributeList, attributes: AttributeList,
children: ContentList, children: ContentList,
next_sibling: ?*Element = null, next_sibling: ?*Element = null,
allocator: std.mem.Allocator,
fn init(tag: []const u8, alloc: Allocator) Element { fn init(tag: []const u8, alloc: Allocator) Element {
return .{ return .{
.tag = tag, .tag = tag,
.attributes = AttributeList{}, .attributes = AttributeList.init(alloc),
.children = ContentList{}, .children = ContentList.init(alloc),
.allocator = alloc,
}; };
} }
@ -456,7 +454,7 @@ fn tryParseElement(ctx: *ParseContext, alloc: Allocator, parent: ?*Element) !?*E
while (ctx.eatWs()) { while (ctx.eatWs()) {
const attr = (try tryParseAttr(ctx, alloc)) orelse break; const attr = (try tryParseAttr(ctx, alloc)) orelse break;
try element.attributes.append(element.allocator, attr); try element.attributes.append(attr);
} }
if (ctx.eatStr("/>")) { if (ctx.eatStr("/>")) {
@ -473,7 +471,7 @@ fn tryParseElement(ctx: *ParseContext, alloc: Allocator, parent: ?*Element) !?*E
} }
const content = try parseContent(ctx, alloc, element); const content = try parseContent(ctx, alloc, element);
try element.children.append(element.allocator, content); try element.children.append(content);
} }
const closing_tag = try parseNameNoDupe(ctx); const closing_tag = try parseNameNoDupe(ctx);

View file

@ -53,7 +53,7 @@ pub const XmlSerializeError = error{
pub fn stringify( pub fn stringify(
value: anytype, value: anytype,
options: StringifyOptions, options: StringifyOptions,
writer: *std.Io.Writer, writer: anytype,
) !void { ) !void {
// Write XML declaration if requested // Write XML declaration if requested
if (options.include_declaration) if (options.include_declaration)
@ -62,9 +62,9 @@ pub fn stringify(
// Start serialization with the root element // Start serialization with the root element
const root_name = options.root_name; const root_name = options.root_name;
if (@typeInfo(@TypeOf(value)) != .optional or value == null) if (@typeInfo(@TypeOf(value)) != .optional or value == null)
try serializeValue(value, root_name, options, writer, 0) try serializeValue(value, root_name, options, writer.any(), 0)
else else
try serializeValue(value.?, root_name, options, writer, 0); try serializeValue(value.?, root_name, options, writer.any(), 0);
} }
/// Serializes a value to XML and returns an allocated string /// Serializes a value to XML and returns an allocated string
@ -73,10 +73,10 @@ pub fn stringifyAlloc(
value: anytype, value: anytype,
options: StringifyOptions, options: StringifyOptions,
) ![]u8 { ) ![]u8 {
var list = std.Io.Writer.Allocating.init(allocator); var list = std.ArrayList(u8).init(allocator);
defer list.deinit(); errdefer list.deinit();
try stringify(value, options, &list.writer); try stringify(value, options, list.writer());
return list.toOwnedSlice(); return list.toOwnedSlice();
} }
@ -85,7 +85,7 @@ fn serializeValue(
value: anytype, value: anytype,
element_name: ?[]const u8, element_name: ?[]const u8,
options: StringifyOptions, options: StringifyOptions,
writer: *std.Io.Writer, writer: anytype,
depth: usize, depth: usize,
) !void { ) !void {
const T = @TypeOf(value); const T = @TypeOf(value);
@ -274,7 +274,7 @@ fn serializeValue(
try writeClose(writer, element_name); try writeClose(writer, element_name);
} }
fn writeClose(writer: *std.Io.Writer, element_name: ?[]const u8) !void { fn writeClose(writer: anytype, element_name: ?[]const u8) !void {
// Close element tag // Close element tag
if (element_name) |n| { if (element_name) |n| {
try writer.writeAll("</"); try writer.writeAll("</");
@ -284,7 +284,7 @@ fn writeClose(writer: *std.Io.Writer, element_name: ?[]const u8) !void {
} }
/// Writes indentation based on depth and indent level /// Writes indentation based on depth and indent level
fn writeIndent(writer: *std.Io.Writer, depth: usize, whitespace: StringifyOptions.Whitespace) std.Io.Writer.Error!void { fn writeIndent(writer: anytype, depth: usize, whitespace: StringifyOptions.Whitespace) @TypeOf(writer).Error!void {
var char: u8 = ' '; var char: u8 = ' ';
const n_chars = switch (whitespace) { const n_chars = switch (whitespace) {
.minified => return, .minified => return,
@ -298,16 +298,16 @@ fn writeIndent(writer: *std.Io.Writer, depth: usize, whitespace: StringifyOption
break :blk depth; break :blk depth;
}, },
}; };
try writer.splatBytesAll(&.{char}, n_chars); try writer.writeByteNTimes(char, n_chars);
} }
fn serializeString( fn serializeString(
writer: *std.Io.Writer, writer: anytype,
element_name: ?[]const u8, element_name: ?[]const u8,
value: []const u8, value: []const u8,
options: StringifyOptions, options: StringifyOptions,
depth: usize, depth: usize,
) error{ WriteFailed, OutOfMemory }!void { ) @TypeOf(writer).Error!void {
if (options.emit_strings_as_arrays) { if (options.emit_strings_as_arrays) {
// if (true) return error.seestackrun; // if (true) return error.seestackrun;
for (value) |c| { for (value) |c| {
@ -333,7 +333,7 @@ fn serializeString(
try escapeString(writer, value); try escapeString(writer, value);
} }
/// Escapes special characters in XML strings /// Escapes special characters in XML strings
fn escapeString(writer: *std.Io.Writer, value: []const u8) std.Io.Writer.Error!void { fn escapeString(writer: anytype, value: []const u8) @TypeOf(writer).Error!void {
for (value) |c| { for (value) |c| {
switch (c) { switch (c) {
'&' => try writer.writeAll("&amp;"), '&' => try writer.writeAll("&amp;"),
@ -413,8 +413,7 @@ test "stringify basic types" {
{ {
const result = try stringifyAlloc(allocator, 3.14, .{}); const result = try stringifyAlloc(allocator, 3.14, .{});
defer allocator.free(result); defer allocator.free(result);
// zig 0.14.x outputs 3.14e0, but zig 0.15.1 outputs 3.14. Either *should* be acceptable try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root>3.14e0</root>", result);
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root>3.14</root>", result);
} }
// Test string // Test string

View file

@ -381,17 +381,14 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
log.debug("type = {s}, style = {s}, ptr_info.child == {s}, element = {s}", .{ @typeName(T), @tagName(array_style), @typeName(ptr_info.child), element.tag }); log.debug("type = {s}, style = {s}, ptr_info.child == {s}, element = {s}", .{ @typeName(T), @tagName(array_style), @typeName(ptr_info.child), element.tag });
var children = std.ArrayList(ptr_info.child){}; var children = std.ArrayList(ptr_info.child).init(allocator);
defer children.deinit(allocator); defer children.deinit();
switch (array_style) { switch (array_style) {
.collection => { .collection => {
var iterator = element.elements(); var iterator = element.elements();
while (iterator.next()) |child_element| { while (iterator.next()) |child_element| {
try children.append( try children.append(try parseInternal(ptr_info.child, child_element, options));
allocator,
try parseInternal(ptr_info.child, child_element, options),
);
} }
}, },
.repeated_root => { .repeated_root => {
@ -399,15 +396,12 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
while (current) |el| : (current = el.next_sibling) { while (current) |el| : (current = el.next_sibling) {
if (!std.mem.eql(u8, el.tag, element.tag)) continue; if (!std.mem.eql(u8, el.tag, element.tag)) continue;
try children.append( try children.append(try parseInternal(ptr_info.child, el, options));
allocator,
try parseInternal(ptr_info.child, el, options),
);
} }
}, },
} }
return children.toOwnedSlice(allocator); return children.toOwnedSlice();
} }
return try allocator.dupe(u8, element.children.items[0].CharData); return try allocator.dupe(u8, element.children.items[0].CharData);
}, },

View file

@ -1,3 +0,0 @@
{
"ignore": ["lib/json/src/json.zig"]
}