From 0e63e501db1e9e3cba40c88aab8293ec67e222ee Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Thu, 6 Nov 2025 12:37:30 -0800 Subject: [PATCH 01/22] add note about nightly build --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index fdcf7d2..80eec97 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,8 @@ AWS SDK for Zig [![Build Status: Zig Nightly](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-nightly.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-nightly.yaml&state=closed) +**NOTE**: Nightly should be working currently, but a [bug in the http Client](https://github.com/ziglang/zig/issues/25811) is breaking the example + [Zig 0.14.1](https://ziglang.org/download/#release-0.14.1): [![Build Status: Zig 0.14.x](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-previous.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-previous.yaml&state=closed) From 79213a991d288bdeb02c8fce27ff1dbd675c19d8 Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Thu, 6 Nov 2025 13:08:21 -0800 Subject: [PATCH 02/22] temporary force nightly to home server On the home server, git.lerch.org will resolve to an A record (split-horizon DNS). This works around https://github.com/ziglang/zig/issues/25811. --- .gitea/workflows/zig-nightly.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitea/workflows/zig-nightly.yaml b/.gitea/workflows/zig-nightly.yaml index c33ee59..d3f3a92 100644 --- a/.gitea/workflows/zig-nightly.yaml +++ b/.gitea/workflows/zig-nightly.yaml @@ -15,7 +15,8 @@ jobs: # TODO: Remove this after https://github.com/ziglang/zig/issues/21815 is # addressed options: --cap-add CAP_SYS_PTRACE - runs-on: ubuntu-latest + #runs-on: ubuntu-latest + runs-on: ubuntu-latest-with-hsm # Need to use the default container with node and all that, so we can # use JS-based actions like actions/checkout@v3... # container: From 0dc0154c16a6ccef74ddc0ca97083eea1b2a3c16 Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Sat, 15 Nov 2025 11:12:25 -0800 Subject: [PATCH 03/22] Revert "temporary force nightly to home server" This reverts commit 79213a991d288bdeb02c8fce27ff1dbd675c19d8. --- .gitea/workflows/zig-nightly.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.gitea/workflows/zig-nightly.yaml b/.gitea/workflows/zig-nightly.yaml index d3f3a92..c33ee59 100644 --- a/.gitea/workflows/zig-nightly.yaml +++ b/.gitea/workflows/zig-nightly.yaml @@ -15,8 +15,7 @@ jobs: # TODO: Remove this after https://github.com/ziglang/zig/issues/21815 is # addressed options: --cap-add CAP_SYS_PTRACE - #runs-on: ubuntu-latest - runs-on: ubuntu-latest-with-hsm + runs-on: ubuntu-latest # Need to use the default container with node and all that, so we can # use JS-based actions like actions/checkout@v3... # container: From 6d80c2f56b69c12e68d6fec75ec4d3ab448b0a3d Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Sat, 15 Nov 2025 11:22:09 -0800 Subject: [PATCH 04/22] remove message about zig nightly - issue is resolved --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index 80eec97..fdcf7d2 100644 --- a/README.md +++ b/README.md @@ -9,8 +9,6 @@ AWS SDK for Zig [![Build Status: Zig Nightly](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-nightly.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-nightly.yaml&state=closed) -**NOTE**: Nightly should be working currently, but a [bug in the http Client](https://github.com/ziglang/zig/issues/25811) is breaking the example - [Zig 0.14.1](https://ziglang.org/download/#release-0.14.1): [![Build Status: Zig 0.14.x](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-previous.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-previous.yaml&state=closed) From d4fdd74f5b0496fb6af6765afddb3ca52ab6202f Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Thu, 29 Jan 2026 15:06:28 -0800 Subject: [PATCH 05/22] uncomment pub const aws in build.zig as this seems to be fixed in 0.15.2 --- build.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.zig b/build.zig index 51b993b..962d8e9 100644 --- a/build.zig +++ b/build.zig @@ -3,7 +3,7 @@ const Builder = @import("std").Build; const models_subdir = "codegen/sdk-codegen/aws-models/"; // note will probably not work on windows -// UNCOMMENT AFTER MODEL GEN TO USE IN BUILD SCRIPTS //pub const aws = @import("src/aws.zig"); +pub const aws = @import("src/aws.zig"); const test_targets = [_]std.Target.Query{ .{}, // native From df963c53163fda1895a78bc7c5ca7d5bd47c693e Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Thu, 29 Jan 2026 15:35:11 -0800 Subject: [PATCH 06/22] fix project packaging I believe this broke in 4a6c84e, but have not bisected. --- .gitea/workflows/build.yaml | 6 +- .mise.toml | 2 +- build.zig | 142 ++++++++++++++++++++++++++++++++++++ 3 files changed, 146 insertions(+), 4 deletions(-) diff --git a/.gitea/workflows/build.yaml b/.gitea/workflows/build.yaml index d36045f..f5ea89f 100644 --- a/.gitea/workflows/build.yaml +++ b/.gitea/workflows/build.yaml @@ -44,11 +44,11 @@ jobs: # should be using git archive, but we need our generated code to be part of it - name: Package source code with generated models run: | - sed -i 's#// UNCOMMENT AFTER MODEL GEN TO USE IN BUILD SCRIPTS //##' build.zig + zig build package tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \ --format ustar \ - --exclude 'zig-*' \ - * + -C zig-out/package \ + . # Something in this PR broke this transform. I don't mind removing it, but # the PR attempts to handle situations with or without a prefix, but it # doesn't. I have not yet determined what the problem is, though diff --git a/.mise.toml b/.mise.toml index 52bb2e6..8c63293 100644 --- a/.mise.toml +++ b/.mise.toml @@ -1,5 +1,5 @@ [tools] pre-commit = "latest" "ubi:DonIsaac/zlint" = "latest" -zig = "0.15.1" +zig = "0.15.2" zls = "0.15.0" diff --git a/build.zig b/build.zig index 962d8e9..ab5a576 100644 --- a/build.zig +++ b/build.zig @@ -212,6 +212,13 @@ pub fn build(b: *Builder) !void { } else { b.installArtifact(exe); } + + // Package step - creates distribution source directory + const pkg_step = PackageStep.create(b, cg_output_dir); + pkg_step.step.dependOn(cg); + + const package = b.step("package", "Copy code to zig-out/package with generated models"); + package.dependOn(&pkg_step.step); } fn configure(compile: *std.Build.Module, modules: std.StringHashMap(*std.Build.Module), include_time: bool) void { @@ -251,3 +258,138 @@ fn getDependencyModules(b: *std.Build, args: anytype) !std.StringHashMap(*std.Bu return result; } + +/// Custom build step that creates a distribution source directory +/// This copies all source files plus the generated service models into a +/// package directory suitable for distribution +const PackageStep = struct { + step: std.Build.Step, + cg_output_dir: std.Build.LazyPath, + + const base_id: std.Build.Step.Id = .custom; + + /// Files to include in the package (relative to build root) + const package_files = [_][]const u8{ + "build.zig", + "build.zig.zon", + "README.md", + "LICENSE", + }; + + /// Directories to include in the package (relative to build root) + const package_dirs = [_][]const u8{ + "src", + "lib", + }; + + pub fn create(owner: *std.Build, cg_output_dir: std.Build.LazyPath) *PackageStep { + const self = owner.allocator.create(PackageStep) catch @panic("OOM"); + self.* = .{ + .step = std.Build.Step.init(.{ + .id = base_id, + .name = "copy generated files", + .owner = owner, + .makeFn = make, + }), + .cg_output_dir = cg_output_dir, + }; + return self; + } + + fn make(step: *std.Build.Step, options: std.Build.Step.MakeOptions) anyerror!void { + _ = options; + const self: *PackageStep = @fieldParentPtr("step", step); + const b = step.owner; + + // Get the path to generated models + const models_path = self.cg_output_dir.getPath2(b, &self.step); + + // Create output directory for packaging + const package_dir = b.pathJoin(&.{ "zig-out", "package" }); + const models_dest_dir = b.pathJoin(&.{ package_dir, "src", "models" }); + std.fs.cwd().makePath(models_dest_dir) catch |err| { + return step.fail("Failed to create package directory: {}", .{err}); + }; + + // Copy all source files to package directory + for (package_files) |file_name| + copyFile(b, b.build_root.handle, file_name, package_dir) catch {}; + + // Copy directories + for (package_dirs) |dir_name| + copyDirRecursive(b, b.build_root.handle, dir_name, package_dir) catch |err| { + return step.fail("Failed to copy directory '{s}': {}", .{ dir_name, err }); + }; + + // Copy generated models to src/models/ + copyGeneratedModels(b, models_path, models_dest_dir) catch |err| { + return step.fail("Failed to copy generated models: {}", .{err}); + }; + + step.result_cached = false; + } + + fn copyFile(b: *std.Build, src_dir: std.fs.Dir, file_path: []const u8, dest_prefix: []const u8) !void { + const dest_path = b.pathJoin(&.{ dest_prefix, file_path }); + + // Ensure parent directory exists + if (std.fs.path.dirname(dest_path)) |parent| + std.fs.cwd().makePath(parent) catch {}; + + src_dir.copyFile(file_path, std.fs.cwd(), dest_path, .{}) catch return; + } + + fn copyDirRecursive(b: *std.Build, src_base: std.fs.Dir, dir_path: []const u8, dest_prefix: []const u8) !void { + var src_dir = src_base.openDir(dir_path, .{ .iterate = true }) catch return; + defer src_dir.close(); + + var walker = try src_dir.walk(b.allocator); + defer walker.deinit(); + + while (try walker.next()) |entry| { + // Skip zig build artifact directories + if (std.mem.indexOf(u8, entry.path, "zig-out") != null or + std.mem.indexOf(u8, entry.path, ".zig-cache") != null or + std.mem.indexOf(u8, entry.path, "zig-cache") != null) + continue; + + const src_path = b.pathJoin(&.{ dir_path, entry.path }); + const dest_path = b.pathJoin(&.{ dest_prefix, dir_path, entry.path }); + + switch (entry.kind) { + .directory => std.fs.cwd().makePath(dest_path) catch {}, + .file => { + // Ensure parent directory exists + if (std.fs.path.dirname(dest_path)) |parent| { + std.fs.cwd().makePath(parent) catch {}; + } + src_base.copyFile(src_path, std.fs.cwd(), dest_path, .{}) catch {}; + }, + .sym_link => { + var link_buf: [std.fs.max_path_bytes]u8 = undefined; + const link_target = entry.dir.readLink(entry.basename, &link_buf) catch continue; + // Ensure parent directory exists + if (std.fs.path.dirname(dest_path)) |parent| { + std.fs.cwd().makePath(parent) catch {}; + } + std.fs.cwd().symLink(link_target, dest_path, .{}) catch {}; + }, + else => {}, + } + } + } + + fn copyGeneratedModels(b: *std.Build, models_path: []const u8, models_dest_dir: []const u8) !void { + var models_dir = std.fs.cwd().openDir(models_path, .{ .iterate = true }) catch + return error.ModelsNotFound; + defer models_dir.close(); + + var iter = models_dir.iterate(); + while (try iter.next()) |entry| { + if (entry.kind != .file) continue; + + const dest_path = b.pathJoin(&.{ models_dest_dir, entry.name }); + models_dir.copyFile(entry.name, std.fs.cwd(), dest_path, .{}) catch continue; + } + } +}; From 4fed9954cb0851c32acfc0f1f0aa3a5a2539cd62 Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Fri, 30 Jan 2026 08:31:16 -0800 Subject: [PATCH 07/22] avoid ./ prefix on tar paths See https://github.com/ziglang/zig/issues/23152 --- .gitea/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitea/workflows/build.yaml b/.gitea/workflows/build.yaml index f5ea89f..f8ad8e9 100644 --- a/.gitea/workflows/build.yaml +++ b/.gitea/workflows/build.yaml @@ -48,7 +48,7 @@ jobs: tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \ --format ustar \ -C zig-out/package \ - . + * # Something in this PR broke this transform. I don't mind removing it, but # the PR attempts to handle situations with or without a prefix, but it # doesn't. I have not yet determined what the problem is, though From 615f92c6540a6b9ddd3f54e515946dcaf65f8167 Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Fri, 30 Jan 2026 08:41:06 -0800 Subject: [PATCH 08/22] glob expansion needs to be in target dir --- .gitea/workflows/build.yaml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.gitea/workflows/build.yaml b/.gitea/workflows/build.yaml index f8ad8e9..1a363f6 100644 --- a/.gitea/workflows/build.yaml +++ b/.gitea/workflows/build.yaml @@ -45,10 +45,7 @@ jobs: - name: Package source code with generated models run: | zig build package - tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \ - --format ustar \ - -C zig-out/package \ - * + (cd zig-out/package && tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz --format ustar *) # Something in this PR broke this transform. I don't mind removing it, but # the PR attempts to handle situations with or without a prefix, but it # doesn't. I have not yet determined what the problem is, though From efdef66fdbb2500d33a79a0b8d1855dd1bb20d56 Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Fri, 30 Jan 2026 09:38:10 -0800 Subject: [PATCH 09/22] bifrucate the service_model module based pre-packaging --- build.zig | 43 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 37 insertions(+), 6 deletions(-) diff --git a/build.zig b/build.zig index ab5a576..0501c49 100644 --- a/build.zig +++ b/build.zig @@ -114,12 +114,41 @@ pub fn build(b: *Builder) !void { cg.dependOn(&cg_cmd.step); - exe.step.dependOn(cg); + // Each module will need access to the generated AWS modules. These + // are all imported by service_manifest.zig, which is a generated list + // of services created by the codegen process. + // + // First, we need to check if pre-generated models exist, which only happens + // for packaged distribution. + // + // The idea here is that if we have a packaged distibution (tarball with + // models available, we are pre-generated, do not need the codegen step + // (and in fact do not have that available), and our service_manifest + // module needs to be the pre-packaged file. + // + // If we do not have a packaged distribution, the file will not exist, + // because it is generated by codegen and will live in the zig cache directory, + // so we depend on the codegen step and the service_manifest module will + // be based on the codegen output itself. + // + // Most of this complication comes from the fact that we want to enable + // consuming build.zig files to be able to use the SDK at build time for + // things like code deployments, e.g. https://git.lerch.org/lobo/lambda-zig + const has_pre_generated = + if (b.build_root.handle.access("src/models/service_manifest.zig", .{})) true else |_| false; + + // Only depend on codegen if we don't have pre-generated models + if (!has_pre_generated) + exe.step.dependOn(cg); + + // Use pre-generated models if available, otherwise use codegen output + const service_manifest_source: std.Build.LazyPath = if (has_pre_generated) + b.path("src/models/service_manifest.zig") + else + cg_output_dir.path(b, "service_manifest.zig"); - // This allows us to have each module depend on the - // generated service manifest. const service_manifest_module = b.createModule(.{ - .root_source_file = cg_output_dir.path(b, "service_manifest.zig"), + .root_source_file = service_manifest_source, .target = target, .optimize = optimize, }); @@ -179,7 +208,8 @@ pub fn build(b: *Builder) !void { .filters = test_filters, }); - unit_tests.step.dependOn(cg); + if (!has_pre_generated) + unit_tests.step.dependOn(cg); unit_tests.use_llvm = !no_llvm; const run_unit_tests = b.addRunArtifact(unit_tests); @@ -202,7 +232,8 @@ pub fn build(b: *Builder) !void { .filters = test_filters, }); smoke_test.use_llvm = !no_llvm; - smoke_test.step.dependOn(cg); + if (!has_pre_generated) + smoke_test.step.dependOn(cg); const run_smoke_test = b.addRunArtifact(smoke_test); From b4eddb6f82551c906e56353112260bc72462794f Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Fri, 30 Jan 2026 10:01:52 -0800 Subject: [PATCH 10/22] update example --- example/build.zig.zon | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/example/build.zig.zon b/example/build.zig.zon index 491e016..7566cc8 100644 --- a/example/build.zig.zon +++ b/example/build.zig.zon @@ -6,8 +6,8 @@ .dependencies = .{ .aws = .{ - .url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/e41f98b389539c8bc6b1a231d25e2980318e5ef4/e41f98b389539c8bc6b1a231d25e2980318e5ef4-with-models.tar.gz", - .hash = "aws-0.0.1-SbsFcI0RCgBdf1nak95gi1kAtI6sv3Ntb7BPETH30fpS", + .url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/efdef66fdbb2500d33a79a0b8d1855dd1bb20d56/efdef66fdbb2500d33a79a0b8d1855dd1bb20d56-with-models.tar.gz", + .hash = "aws-0.0.1-SbsFcObojgODT_JTNT3eMlZr-BUOU5PT_0y1nyMzqUkW", }, }, } From f788eed35cdf9340d7803fe551ad473738608fcf Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Fri, 30 Jan 2026 11:58:27 -0800 Subject: [PATCH 11/22] update example --- example/build.zig.zon | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/example/build.zig.zon b/example/build.zig.zon index 7566cc8..3cba877 100644 --- a/example/build.zig.zon +++ b/example/build.zig.zon @@ -6,8 +6,8 @@ .dependencies = .{ .aws = .{ - .url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/efdef66fdbb2500d33a79a0b8d1855dd1bb20d56/efdef66fdbb2500d33a79a0b8d1855dd1bb20d56-with-models.tar.gz", - .hash = "aws-0.0.1-SbsFcObojgODT_JTNT3eMlZr-BUOU5PT_0y1nyMzqUkW", + .url = "git+https://git.lerch.org/lobo/aws-sdk-for-zig.git?ref=master#efdef66fdbb2500d33a79a0b8d1855dd1bb20d56", + .hash = "aws-0.0.1-SbsFcLgtCgAndtGhoOyzQfmFtUux4tadFZv0tC6TAnL8", }, }, } From fdc208996900f1b17824b436a73df884993533fb Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Fri, 30 Jan 2026 12:04:30 -0800 Subject: [PATCH 12/22] remove build.zig aws constant This was always problematic, and bypasses the build system. It broke earlier when moving to proper modules, and really, it is not worth using. The idea was to allow usage of the SDK in a build context, but that does not work anyway, because the build operates in a sandboxed environment that effectively bars things like connecting to TLS endpoints. That is a feature of the build system, not a bug, and issues like https://github.com/ziglang/zig/issues/14286 demonstrate that the zig team wants to sandbox even further. For downstream, the right idea here is actually to create an executable that depends on aws and run it as part of the build. This is where https://git.lerch.org/lobo/lambda-zig is heading. --- build.zig | 2 -- 1 file changed, 2 deletions(-) diff --git a/build.zig b/build.zig index 0501c49..321b8af 100644 --- a/build.zig +++ b/build.zig @@ -3,8 +3,6 @@ const Builder = @import("std").Build; const models_subdir = "codegen/sdk-codegen/aws-models/"; // note will probably not work on windows -pub const aws = @import("src/aws.zig"); - const test_targets = [_]std.Target.Query{ .{}, // native .{ .cpu_arch = .x86_64, .os_tag = .linux }, From 6e34e83933aaa1120b7d0049f458608fdd6fa27b Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Sun, 1 Feb 2026 16:22:52 -0800 Subject: [PATCH 13/22] do not emit null optional fields --- codegen/src/serialization/json.zig | 25 +++++++++++++++++++------ src/aws.zig | 30 ++++++++++++++++++++++++++---- src/aws_test.zig | 27 ++++++++++++++++++++------- 3 files changed, 65 insertions(+), 17 deletions(-) diff --git a/codegen/src/serialization/json.zig b/codegen/src/serialization/json.zig index 02d5b1c..499e850 100644 --- a/codegen/src/serialization/json.zig +++ b/codegen/src/serialization/json.zig @@ -34,9 +34,9 @@ pub fn generateToJsonFunction(shape_id: []const u8, writer: *std.Io.Writer, stat const member_value = try getMemberValueJson(allocator, "self", member); defer allocator.free(member_value); - try writer.print("try jw.objectField(\"{s}\");\n", .{member.json_key}); try writeMemberJson( .{ + .object_field_name = member.json_key, .shape_id = member.target, .field_name = member.field_name, .field_value = member_value, @@ -146,6 +146,8 @@ fn writeMemberValue( } const WriteMemberJsonParams = struct { + object_field_name: []const u8, + quote_object_field_name: bool = true, shape_id: []const u8, field_name: []const u8, field_value: []const u8, @@ -196,9 +198,9 @@ fn writeStructureJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) !vo const member_value = try getMemberValueJson(allocator, object_value, member); defer allocator.free(member_value); - try writer.print("try jw.objectField(\"{s}\");\n", .{member.json_key}); try writeMemberJson( .{ + .object_field_name = member.json_key, .shape_id = member.target, .field_name = member.field_name, .field_value = member_value, @@ -214,7 +216,7 @@ fn writeStructureJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) !vo if (is_optional) { try writer.writeAll("} else {\n"); - try writer.writeAll("try jw.write(null);\n"); + try writer.writeAll("//try jw.write(null);\n"); try writer.writeAll("}\n"); } } @@ -268,7 +270,7 @@ fn writeListJson(list: smithy_tools.ListShape, params: WriteMemberJsonParams, wr if (list_is_optional) { try writer.writeAll("} else {\n"); - try writer.writeAll("try jw.write(null);\n"); + try writer.writeAll("//try jw.write(null);\n"); try writer.writeAll("}\n"); } } @@ -327,9 +329,10 @@ fn writeMapJson(map: smithy_tools.MapShape, params: WriteMemberJsonParams, write // start loop try writer.print("for ({s}) |{s}|", .{ map_value, map_value_capture }); try writer.writeAll("{\n"); - try writer.print("try jw.objectField({s});\n", .{map_capture_key}); try writeMemberJson(.{ + .object_field_name = map_capture_key, + .quote_object_field_name = false, .shape_id = map.value, .field_name = "value", .field_value = map_capture_value, @@ -345,7 +348,7 @@ fn writeMapJson(map: smithy_tools.MapShape, params: WriteMemberJsonParams, write if (map_is_optional) { try writer.writeAll("} else {\n"); - try writer.writeAll("try jw.write(null);\n"); + try writer.writeAll("//try jw.write(null);\n"); try writer.writeAll("}\n"); } } @@ -361,7 +364,16 @@ fn writeMemberJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerr const shape_info = try smithy_tools.getShapeInfo(shape_id, state.file_state.shapes); const shape = shape_info.shape; + const quote = if (params.quote_object_field_name) "\"" else ""; + const is_optional = smithy_tools.shapeIsOptional(params.member.traits); + if (is_optional) { + try writer.print("if ({s}) |_|\n", .{params.field_value}); + try writer.writeAll("{\n"); + } + try writer.print("try jw.objectField({s}{s}{s});\n", .{ quote, params.object_field_name, quote }); + if (state.getTypeRecurrenceCount(shape_id) > 2) { + if (is_optional) try writer.writeAll("\n}\n"); return; } @@ -389,4 +401,5 @@ fn writeMemberJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerr .short => try writeScalarJson("short", params, writer), .service, .resource, .operation, .member, .set => std.debug.panic("Shape type not supported: {}", .{shape}), } + if (is_optional) try writer.writeAll("\n}\n"); } diff --git a/src/aws.zig b/src/aws.zig index f764cce..cf318e2 100644 --- a/src/aws.zig +++ b/src/aws.zig @@ -231,8 +231,18 @@ pub fn Request(comptime request_action: anytype) type { var buffer = std.Io.Writer.Allocating.init(options.client.allocator); defer buffer.deinit(); if (Self.service_meta.aws_protocol == .rest_json_1) { - if (std.mem.eql(u8, "PUT", aws_request.method) or std.mem.eql(u8, "POST", aws_request.method)) - try buffer.writer.print("{f}", .{std.json.fmt(request, .{ .whitespace = .indent_4 })}); + if (std.mem.eql(u8, "PUT", aws_request.method) or std.mem.eql(u8, "POST", aws_request.method)) { + // Buried in the tests are our answer here: + // https://github.com/smithy-lang/smithy/blob/main/smithy-aws-protocol-tests/model/restJson1/json-structs.smithy#L71C24-L71C78 + // documentation: "Rest Json should not serialize null structure values", + try buffer.writer.print( + "{f}", + .{std.json.fmt(request, .{ + .whitespace = .indent_4, + .emit_null_optional_fields = false, + })}, + ); + } } aws_request.body = buffer.written(); var rest_xml_body: ?[]const u8 = null; @@ -320,11 +330,20 @@ pub fn Request(comptime request_action: anytype) type { // smithy spec, "A null value MAY be provided or omitted // for a boxed member with no observable difference." But we're // seeing a lot of differences here between spec and reality + // + // This is deliciously unclear: + // https://github.com/smithy-lang/smithy/blob/main/smithy-aws-protocol-tests/model/awsJson1_1/null.smithy#L36 + // + // It looks like struct nulls are meant to be dropped, but sparse + // lists/maps included. We'll err here on the side of eliminating them const body = try std.fmt.allocPrint( options.client.allocator, "{f}", - .{std.json.fmt(request, .{ .whitespace = .indent_4 })}, + .{std.json.fmt(request, .{ + .whitespace = .indent_4, + .emit_null_optional_fields = false, + })}, ); defer options.client.allocator.free(body); @@ -1193,7 +1212,10 @@ fn buildPath( "{f}", .{std.json.fmt( @field(request, field.name), - .{ .whitespace = .indent_4 }, + .{ + .whitespace = .indent_4, + .emit_null_optional_fields = false, + }, )}, ); const trimmed_replacement_val = std.mem.trim(u8, replacement_buffer.written(), "\""); diff --git a/src/aws_test.zig b/src/aws_test.zig index 3e92483..a05967e 100644 --- a/src/aws_test.zig +++ b/src/aws_test.zig @@ -129,7 +129,7 @@ test "proper serialization for kms" { const parsed_body = try std.json.parseFromSlice(struct { KeyId: []const u8, Plaintext: []const u8, - EncryptionContext: ?struct {}, + EncryptionContext: ?struct {} = null, GrantTokens: [][]const u8, EncryptionAlgorithm: []const u8, DryRun: bool, @@ -166,7 +166,6 @@ test "basic json request serialization" { try buffer.writer.print("{f}", .{std.json.fmt(request, .{ .whitespace = .indent_4 })}); try std.testing.expectEqualStrings( \\{ - \\ "ExclusiveStartTableName": null, \\ "Limit": 1 \\} , buffer.written()); @@ -632,7 +631,7 @@ test "json_1_0_query_with_input: dynamodb listTables runtime" { try req_actuals.expectHeader("X-Amz-Target", "DynamoDB_20120810.ListTables"); const parsed_body = try std.json.parseFromSlice(struct { - ExclusiveStartTableName: ?[]const u8, + ExclusiveStartTableName: ?[]const u8 = null, Limit: u8, }, std.testing.allocator, req_actuals.body.?, .{}); defer parsed_body.deinit(); @@ -701,7 +700,7 @@ test "json_1_1_query_with_input: ecs listClusters runtime" { try req_actuals.expectHeader("X-Amz-Target", "AmazonEC2ContainerServiceV20141113.ListClusters"); const parsed_body = try std.json.parseFromSlice(struct { - nextToken: ?[]const u8, + nextToken: ?[]const u8 = null, maxResults: u8, }, std.testing.allocator, req_actuals.body.?, .{}); defer parsed_body.deinit(); @@ -741,8 +740,8 @@ test "json_1_1_query_no_input: ecs listClusters runtime" { try req_actuals.expectHeader("X-Amz-Target", "AmazonEC2ContainerServiceV20141113.ListClusters"); const parsed_body = try std.json.parseFromSlice(struct { - nextToken: ?[]const u8, - maxResults: ?u8, + nextToken: ?[]const u8 = null, + maxResults: ?u8 = null, }, std.testing.allocator, req_actuals.body.?, .{}); defer parsed_body.deinit(); @@ -1250,6 +1249,20 @@ test "jsonStringify" { try std.testing.expectEqualStrings("1234", json_parsed.value.arn); try std.testing.expectEqualStrings("bar", json_parsed.value.tags.foo); } +test "jsonStringify does not emit null values on serialization" { + { + const lambda = (Services(.{.lambda}){}).lambda; + const request = lambda.CreateFunctionRequest{ + .function_name = "foo", + .role = "bar", + .code = .{}, + }; + + const request_json = try std.fmt.allocPrint(std.testing.allocator, "{f}", .{std.json.fmt(request, .{})}); + defer std.testing.allocator.free(request_json); + try std.testing.expect(std.mem.indexOf(u8, request_json, "null") == null); + } +} test "jsonStringify nullable object" { // structure is not null @@ -1272,7 +1285,7 @@ test "jsonStringify nullable object" { FunctionVersion: []const u8, Name: []const u8, RoutingConfig: struct { - AdditionalVersionWeights: ?struct {}, + AdditionalVersionWeights: ?struct {} = null, }, }, std.testing.allocator, request_json, .{ .ignore_unknown_fields = true }); defer json_parsed.deinit(); From 742a820eebc5f2d447307d7b0c3afcf751398a21 Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Mon, 2 Feb 2026 16:11:03 -0800 Subject: [PATCH 14/22] add credential options to aws options so profile can be passed --- src/aws.zig | 4 +++- src/aws_http.zig | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/aws.zig b/src/aws.zig index cf318e2..0afdd97 100644 --- a/src/aws.zig +++ b/src/aws.zig @@ -6,6 +6,7 @@ const date = @import("date"); const json = @import("json"); const zeit = @import("zeit"); +const credentials = @import("aws_credentials.zig"); const awshttp = @import("aws_http.zig"); const url = @import("url.zig"); const servicemodel = @import("servicemodel.zig"); @@ -19,7 +20,6 @@ const scoped_log = std.log.scoped(.aws); /// controls are insufficient (e.g. use in build script) pub fn globalLogControl(aws_level: std.log.Level, http_level: std.log.Level, signing_level: std.log.Level, off: bool) void { const signing = @import("aws_signing.zig"); - const credentials = @import("aws_credentials.zig"); logs_off = off; signing.logs_off = off; credentials.logs_off = off; @@ -84,6 +84,7 @@ pub const Options = struct { dualstack: bool = false, success_http_code: i64 = 200, client: Client, + credential_options: credentials.Options = .{}, diagnostics: ?*Diagnostics = null, @@ -415,6 +416,7 @@ pub fn Request(comptime request_action: anytype) type { .dualstack = options.dualstack, .sigv4_service_name = Self.service_meta.sigv4_name, .mock = options.mock, + .credential_options = options.credential_options, }, ); defer response.deinit(); diff --git a/src/aws_http.zig b/src/aws_http.zig index bf14d1d..89fefb4 100644 --- a/src/aws_http.zig +++ b/src/aws_http.zig @@ -90,6 +90,8 @@ pub const Options = struct { dualstack: bool = false, sigv4_service_name: ?[]const u8 = null, + credential_options: credentials.Options = .{}, + mock: ?Mock = null, }; @@ -186,7 +188,7 @@ pub const AwsHttp = struct { defer endpoint.deinit(); log.debug("Calling endpoint {s}", .{endpoint.uri}); // TODO: Should we allow customization here? - const creds = try credentials.getCredentials(self.allocator, .{}); + const creds = try credentials.getCredentials(self.allocator, options.credential_options); defer creds.deinit(); const signing_config: signing.Config = .{ .region = getRegion(service, options.region), From 686b18d1f4329e80cf6d9b916eaa0c231333edb9 Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Mon, 2 Feb 2026 16:13:09 -0800 Subject: [PATCH 15/22] use profile_name in credential options --- src/aws_credentials.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aws_credentials.zig b/src/aws_credentials.zig index 11b620a..2e79e7c 100644 --- a/src/aws_credentials.zig +++ b/src/aws_credentials.zig @@ -408,7 +408,7 @@ fn getProfileCredentials(allocator: std.mem.Allocator, options: Profile) !?auth. // Get active profile const profile = (try getEnvironmentVariable(allocator, "AWS_PROFILE")) orelse - try allocator.dupe(u8, "default"); + try allocator.dupe(u8, options.profile_name orelse "default"); defer allocator.free(profile); log.debug("Looking for file credentials using profile '{s}'", .{profile}); log.debug("Checking credentials file: {s}", .{creds_file_path.evaluated_path}); From 31240cd21e6b6dde0156c7dc581c177ac63fb628 Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Mon, 2 Feb 2026 17:11:18 -0800 Subject: [PATCH 16/22] provide consumers a way to change order of precedence based on cli flag --- src/aws_credentials.zig | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/aws_credentials.zig b/src/aws_credentials.zig index 2e79e7c..cc8ad74 100644 --- a/src/aws_credentials.zig +++ b/src/aws_credentials.zig @@ -69,6 +69,11 @@ pub const Profile = struct { config_file: ?[]const u8 = null, /// Config file. Defaults to AWS_PROFILE or default profile_name: ?[]const u8 = null, + /// Profile name specified via command line should change precedence of operation, + /// moves credential file checking to the top. The sdk does not have a + /// way to know if this is coming from a command line, so this field + /// serves as a way to accomplish that task + prefer_profile_from_file: bool = false, }; pub const Options = struct { @@ -79,6 +84,15 @@ pub var static_credentials: ?auth.Credentials = null; pub fn getCredentials(allocator: std.mem.Allocator, options: Options) !auth.Credentials { if (static_credentials) |c| return c; + if (options.profile.prefer_profile_from_file) { + log.debug( + "Command line profile specified. Checking credentials file first. Profile name {s}", + .{options.profile.profile_name orelse "default"}, + ); + if (try getProfileCredentials(allocator, options.profile)) |cred| return cred; + // Profile not found. We'll mirror the cli here and bail early + return error.CredentialsNotFound; + } if (try getEnvironmentCredentials(allocator)) |cred| { log.debug("Found credentials in environment. Access key: {s}", .{cred.access_key}); return cred; From 4df27142d0efa560bd13f14cef8298ee9bceafc8 Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Mon, 2 Feb 2026 17:11:44 -0800 Subject: [PATCH 17/22] fix issue with config file reading the wrong options --- src/aws_credentials.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aws_credentials.zig b/src/aws_credentials.zig index cc8ad74..1c635d5 100644 --- a/src/aws_credentials.zig +++ b/src/aws_credentials.zig @@ -412,8 +412,8 @@ fn getProfileCredentials(allocator: std.mem.Allocator, options: Profile) !?auth. default_path = default_path orelse creds_file_path.home; const config_file_path = try filePath( allocator, - options.credential_file, - "AWS_SHARED_CREDENTIALS_FILE", + options.config_file, + "AWS_CONFIG_FILE", default_path, "config", ); From fd568f26b976e5f84b27261fccd7b5c2fc9a14c0 Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Mon, 2 Feb 2026 18:00:00 -0800 Subject: [PATCH 18/22] refactor rest calls so all fields are included appropriately --- src/aws.zig | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/src/aws.zig b/src/aws.zig index 0afdd97..be50673 100644 --- a/src/aws.zig +++ b/src/aws.zig @@ -304,14 +304,9 @@ pub fn Request(comptime request_action: anytype) type { } } - return try Self.callAws(aws_request, .{ - .success_http_code = Action.http_config.success_code, - .region = options.region, - .dualstack = options.dualstack, - .client = options.client, - .diagnostics = options.diagnostics, - .mock = options.mock, - }); + var rest_options = options; + rest_options.success_http_code = Action.http_config.success_code; + return try Self.callAws(aws_request, rest_options); } /// Calls using one of the json protocols (json_1_0, json_1_1) From c1df6ef3a6f4eb4eb75608c3cc6488cffc300793 Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Tue, 3 Feb 2026 15:30:59 -0800 Subject: [PATCH 19/22] avoid outputting errors if diagnostics field provided --- src/aws.zig | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/aws.zig b/src/aws.zig index be50673..86e78a4 100644 --- a/src/aws.zig +++ b/src/aws.zig @@ -417,11 +417,21 @@ pub fn Request(comptime request_action: anytype) type { defer response.deinit(); if (response.response_code != options.success_http_code and response.response_code != 404) { - try reportTraffic(options.client.allocator, "Call Failed", aws_request, response, log.err); + // If the consumer prrovided diagnostics, they are likely handling + // this error themselves. We'll not spam them with log.err + // output. Note that we may need to add additional information + // in diagnostics, as reportTraffic provides more information + // than what exists in the diagnostics data if (options.diagnostics) |d| { d.http_code = response.response_code; d.response_body = try d.allocator.dupe(u8, response.body); - } + } else try reportTraffic( + options.client.allocator, + "Call Failed", + aws_request, + response, + log.err, + ); return error.HttpFailure; } From 5c7aed071f6251d53a1627080a21d604ff58f0a5 Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Wed, 4 Feb 2026 00:37:27 -0800 Subject: [PATCH 20/22] switch to http status enum --- codegen/src/main.zig | 2 +- src/aws.zig | 10 +++++----- src/aws_http.zig | 2 +- src/aws_http_base.zig | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/codegen/src/main.zig b/codegen/src/main.zig index a8aa1c7..53918bd 100644 --- a/codegen/src/main.zig +++ b/codegen/src/main.zig @@ -645,7 +645,7 @@ fn generateOperation(allocator: std.mem.Allocator, operation: smithy.ShapeInfo, try outputIndent(child_state, writer); try writer.print(".uri = \"{s}\",\n", .{trait.http.uri}); try outputIndent(child_state, writer); - try writer.print(".success_code = {d},\n", .{trait.http.code}); + try writer.print(".success_code = @as(u10, {d}),\n", .{trait.http.code}); try outputIndent(state, writer); _ = try writer.write("};\n\n"); } diff --git a/src/aws.zig b/src/aws.zig index 86e78a4..91854aa 100644 --- a/src/aws.zig +++ b/src/aws.zig @@ -82,7 +82,7 @@ const log = struct { pub const Options = struct { region: []const u8 = "aws-global", dualstack: bool = false, - success_http_code: i64 = 200, + success_http_status: std.http.Status = .ok, client: Client, credential_options: credentials.Options = .{}, @@ -92,7 +92,7 @@ pub const Options = struct { }; pub const Diagnostics = struct { - http_code: i64, + response_status: std.http.Status, response_body: []const u8, allocator: std.mem.Allocator, @@ -305,7 +305,7 @@ pub fn Request(comptime request_action: anytype) type { } var rest_options = options; - rest_options.success_http_code = Action.http_config.success_code; + rest_options.success_http_status = @enumFromInt(Action.http_config.success_code); return try Self.callAws(aws_request, rest_options); } @@ -416,14 +416,14 @@ pub fn Request(comptime request_action: anytype) type { ); defer response.deinit(); - if (response.response_code != options.success_http_code and response.response_code != 404) { + if (response.response_code != options.success_http_status and response.response_code != .not_found) { // If the consumer prrovided diagnostics, they are likely handling // this error themselves. We'll not spam them with log.err // output. Note that we may need to add additional information // in diagnostics, as reportTraffic provides more information // than what exists in the diagnostics data if (options.diagnostics) |d| { - d.http_code = response.response_code; + d.response_status = response.response_code; d.response_body = try d.allocator.dupe(u8, response.body); } else try reportTraffic( options.client.allocator, diff --git a/src/aws_http.zig b/src/aws_http.zig index 89fefb4..33cec29 100644 --- a/src/aws_http.zig +++ b/src/aws_http.zig @@ -363,7 +363,7 @@ pub const AwsHttp = struct { log.debug("raw response body:\n{s}", .{aw.written()}); const rc = HttpResult{ - .response_code = @intFromEnum(response.head.status), + .response_code = response.head.status, .body = try aw.toOwnedSlice(), .headers = try resp_headers.toOwnedSlice(self.allocator), .allocator = self.allocator, diff --git a/src/aws_http_base.zig b/src/aws_http_base.zig index 4a8f8e4..7065c94 100644 --- a/src/aws_http_base.zig +++ b/src/aws_http_base.zig @@ -9,7 +9,7 @@ pub const Request = struct { headers: []const std.http.Header = &.{}, }; pub const Result = struct { - response_code: u16, // actually 3 digits can fit in u10 + response_code: std.http.Status, body: []const u8, headers: []const std.http.Header, allocator: std.mem.Allocator, From 45d9a369ae7b1e3513b3f3a5df824f8b7cff7675 Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Wed, 4 Feb 2026 16:49:17 -0800 Subject: [PATCH 21/22] update versions --- .gitea/workflows/build.yaml | 2 +- .gitea/workflows/zig-mach.yaml | 2 +- .gitea/workflows/zig-nightly.yaml | 2 +- .gitea/workflows/zig-previous.yaml | 2 +- .mise.toml | 6 +++--- .pre-commit-config.yaml | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.gitea/workflows/build.yaml b/.gitea/workflows/build.yaml index 1a363f6..a867ffe 100644 --- a/.gitea/workflows/build.yaml +++ b/.gitea/workflows/build.yaml @@ -18,7 +18,7 @@ jobs: - name: Check out repository code uses: actions/checkout@v4 - name: Setup Zig - uses: https://github.com/mlugg/setup-zig@v2.0.5 + uses: https://codeberg.org/mlugg/setup-zig@v2.2.1 # We will let setup-zig use minimum_zig_version from build.zig.zon # setup-zig also sets up the zig cache appropriately - name: Ulimit diff --git a/.gitea/workflows/zig-mach.yaml b/.gitea/workflows/zig-mach.yaml index 3de5440..70fa4f7 100644 --- a/.gitea/workflows/zig-mach.yaml +++ b/.gitea/workflows/zig-mach.yaml @@ -26,7 +26,7 @@ jobs: with: ref: zig-mach - name: Setup Zig - uses: https://github.com/mlugg/setup-zig@v2.0.1 + uses: https://codeberg.org/mlugg/setup-zig@v2.2.1 with: version: mach-latest - name: Restore Zig caches diff --git a/.gitea/workflows/zig-nightly.yaml b/.gitea/workflows/zig-nightly.yaml index c33ee59..decdd9f 100644 --- a/.gitea/workflows/zig-nightly.yaml +++ b/.gitea/workflows/zig-nightly.yaml @@ -26,7 +26,7 @@ jobs: with: ref: zig-develop - name: Setup Zig - uses: https://github.com/mlugg/setup-zig@v2.0.5 + uses: https://codeberg.org/mlugg/setup-zig@v2.2.1 with: version: master - name: Run smoke test diff --git a/.gitea/workflows/zig-previous.yaml b/.gitea/workflows/zig-previous.yaml index e2a14a0..b9d13f7 100644 --- a/.gitea/workflows/zig-previous.yaml +++ b/.gitea/workflows/zig-previous.yaml @@ -20,7 +20,7 @@ jobs: with: ref: zig-0.14.x - name: Setup Zig - uses: https://github.com/mlugg/setup-zig@v2.0.1 + uses: https://codeberg.org/mlugg/setup-zig@v2.2.1 with: version: 0.14.0 - name: Run smoke test diff --git a/.mise.toml b/.mise.toml index 8c63293..9927c37 100644 --- a/.mise.toml +++ b/.mise.toml @@ -1,5 +1,5 @@ [tools] -pre-commit = "latest" -"ubi:DonIsaac/zlint" = "latest" +prek = "0.3.1" +"ubi:DonIsaac/zlint" = "0.7.9" zig = "0.15.2" -zls = "0.15.0" +zls = "0.15.1" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8e6ed25..3c4395c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,7 @@ # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.2.0 + rev: v6.0.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer From 1a03250fbeb2840ab8b6010f1ad4e899cdfc185a Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Thu, 5 Feb 2026 12:49:11 -0800 Subject: [PATCH 22/22] remove special exception for 404 (not found) responses These will now be handled just like any other error, allowing downstream to catch and deal with through diagnostics (that did not exist when that code was written) --- src/aws.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aws.zig b/src/aws.zig index 91854aa..6f98d1f 100644 --- a/src/aws.zig +++ b/src/aws.zig @@ -416,7 +416,7 @@ pub fn Request(comptime request_action: anytype) type { ); defer response.deinit(); - if (response.response_code != options.success_http_status and response.response_code != .not_found) { + if (response.response_code != options.success_http_status) { // If the consumer prrovided diagnostics, they are likely handling // this error themselves. We'll not spam them with log.err // output. Note that we may need to add additional information