diff --git a/.gitea/workflows/build.yaml b/.gitea/workflows/build.yaml index d36045f..a867ffe 100644 --- a/.gitea/workflows/build.yaml +++ b/.gitea/workflows/build.yaml @@ -18,7 +18,7 @@ jobs: - name: Check out repository code uses: actions/checkout@v4 - name: Setup Zig - uses: https://github.com/mlugg/setup-zig@v2.0.5 + uses: https://codeberg.org/mlugg/setup-zig@v2.2.1 # We will let setup-zig use minimum_zig_version from build.zig.zon # setup-zig also sets up the zig cache appropriately - name: Ulimit @@ -44,11 +44,8 @@ jobs: # should be using git archive, but we need our generated code to be part of it - name: Package source code with generated models run: | - sed -i 's#// UNCOMMENT AFTER MODEL GEN TO USE IN BUILD SCRIPTS //##' build.zig - tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \ - --format ustar \ - --exclude 'zig-*' \ - * + zig build package + (cd zig-out/package && tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz --format ustar *) # Something in this PR broke this transform. I don't mind removing it, but # the PR attempts to handle situations with or without a prefix, but it # doesn't. I have not yet determined what the problem is, though diff --git a/.gitea/workflows/zig-mach.yaml b/.gitea/workflows/zig-mach.yaml index 3de5440..70fa4f7 100644 --- a/.gitea/workflows/zig-mach.yaml +++ b/.gitea/workflows/zig-mach.yaml @@ -26,7 +26,7 @@ jobs: with: ref: zig-mach - name: Setup Zig - uses: https://github.com/mlugg/setup-zig@v2.0.1 + uses: https://codeberg.org/mlugg/setup-zig@v2.2.1 with: version: mach-latest - name: Restore Zig caches diff --git a/.gitea/workflows/zig-nightly.yaml b/.gitea/workflows/zig-nightly.yaml index c33ee59..decdd9f 100644 --- a/.gitea/workflows/zig-nightly.yaml +++ b/.gitea/workflows/zig-nightly.yaml @@ -26,7 +26,7 @@ jobs: with: ref: zig-develop - name: Setup Zig - uses: https://github.com/mlugg/setup-zig@v2.0.5 + uses: https://codeberg.org/mlugg/setup-zig@v2.2.1 with: version: master - name: Run smoke test diff --git a/.gitea/workflows/zig-previous.yaml b/.gitea/workflows/zig-previous.yaml index e2a14a0..b9d13f7 100644 --- a/.gitea/workflows/zig-previous.yaml +++ b/.gitea/workflows/zig-previous.yaml @@ -20,7 +20,7 @@ jobs: with: ref: zig-0.14.x - name: Setup Zig - uses: https://github.com/mlugg/setup-zig@v2.0.1 + uses: https://codeberg.org/mlugg/setup-zig@v2.2.1 with: version: 0.14.0 - name: Run smoke test diff --git a/.mise.toml b/.mise.toml index aeb1885..c338b44 100644 --- a/.mise.toml +++ b/.mise.toml @@ -1,5 +1,5 @@ [tools] -pre-commit = "latest" -"ubi:DonIsaac/zlint" = "latest" -zig = "master" -zls = "0.15.0" +prek = "0.3.1" +"ubi:DonIsaac/zlint" = "0.7.9" +zig = "0.16.0" +zls = "0.15.1" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8e6ed25..3c4395c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,7 @@ # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.2.0 + rev: v6.0.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer diff --git a/build.zig b/build.zig index 51b993b..321b8af 100644 --- a/build.zig +++ b/build.zig @@ -3,8 +3,6 @@ const Builder = @import("std").Build; const models_subdir = "codegen/sdk-codegen/aws-models/"; // note will probably not work on windows -// UNCOMMENT AFTER MODEL GEN TO USE IN BUILD SCRIPTS //pub const aws = @import("src/aws.zig"); - const test_targets = [_]std.Target.Query{ .{}, // native .{ .cpu_arch = .x86_64, .os_tag = .linux }, @@ -114,12 +112,41 @@ pub fn build(b: *Builder) !void { cg.dependOn(&cg_cmd.step); - exe.step.dependOn(cg); + // Each module will need access to the generated AWS modules. These + // are all imported by service_manifest.zig, which is a generated list + // of services created by the codegen process. + // + // First, we need to check if pre-generated models exist, which only happens + // for packaged distribution. + // + // The idea here is that if we have a packaged distibution (tarball with + // models available, we are pre-generated, do not need the codegen step + // (and in fact do not have that available), and our service_manifest + // module needs to be the pre-packaged file. + // + // If we do not have a packaged distribution, the file will not exist, + // because it is generated by codegen and will live in the zig cache directory, + // so we depend on the codegen step and the service_manifest module will + // be based on the codegen output itself. + // + // Most of this complication comes from the fact that we want to enable + // consuming build.zig files to be able to use the SDK at build time for + // things like code deployments, e.g. https://git.lerch.org/lobo/lambda-zig + const has_pre_generated = + if (b.build_root.handle.access("src/models/service_manifest.zig", .{})) true else |_| false; + + // Only depend on codegen if we don't have pre-generated models + if (!has_pre_generated) + exe.step.dependOn(cg); + + // Use pre-generated models if available, otherwise use codegen output + const service_manifest_source: std.Build.LazyPath = if (has_pre_generated) + b.path("src/models/service_manifest.zig") + else + cg_output_dir.path(b, "service_manifest.zig"); - // This allows us to have each module depend on the - // generated service manifest. const service_manifest_module = b.createModule(.{ - .root_source_file = cg_output_dir.path(b, "service_manifest.zig"), + .root_source_file = service_manifest_source, .target = target, .optimize = optimize, }); @@ -179,7 +206,8 @@ pub fn build(b: *Builder) !void { .filters = test_filters, }); - unit_tests.step.dependOn(cg); + if (!has_pre_generated) + unit_tests.step.dependOn(cg); unit_tests.use_llvm = !no_llvm; const run_unit_tests = b.addRunArtifact(unit_tests); @@ -202,7 +230,8 @@ pub fn build(b: *Builder) !void { .filters = test_filters, }); smoke_test.use_llvm = !no_llvm; - smoke_test.step.dependOn(cg); + if (!has_pre_generated) + smoke_test.step.dependOn(cg); const run_smoke_test = b.addRunArtifact(smoke_test); @@ -212,6 +241,13 @@ pub fn build(b: *Builder) !void { } else { b.installArtifact(exe); } + + // Package step - creates distribution source directory + const pkg_step = PackageStep.create(b, cg_output_dir); + pkg_step.step.dependOn(cg); + + const package = b.step("package", "Copy code to zig-out/package with generated models"); + package.dependOn(&pkg_step.step); } fn configure(compile: *std.Build.Module, modules: std.StringHashMap(*std.Build.Module), include_time: bool) void { @@ -251,3 +287,138 @@ fn getDependencyModules(b: *std.Build, args: anytype) !std.StringHashMap(*std.Bu return result; } + +/// Custom build step that creates a distribution source directory +/// This copies all source files plus the generated service models into a +/// package directory suitable for distribution +const PackageStep = struct { + step: std.Build.Step, + cg_output_dir: std.Build.LazyPath, + + const base_id: std.Build.Step.Id = .custom; + + /// Files to include in the package (relative to build root) + const package_files = [_][]const u8{ + "build.zig", + "build.zig.zon", + "README.md", + "LICENSE", + }; + + /// Directories to include in the package (relative to build root) + const package_dirs = [_][]const u8{ + "src", + "lib", + }; + + pub fn create(owner: *std.Build, cg_output_dir: std.Build.LazyPath) *PackageStep { + const self = owner.allocator.create(PackageStep) catch @panic("OOM"); + self.* = .{ + .step = std.Build.Step.init(.{ + .id = base_id, + .name = "copy generated files", + .owner = owner, + .makeFn = make, + }), + .cg_output_dir = cg_output_dir, + }; + return self; + } + + fn make(step: *std.Build.Step, options: std.Build.Step.MakeOptions) anyerror!void { + _ = options; + const self: *PackageStep = @fieldParentPtr("step", step); + const b = step.owner; + + // Get the path to generated models + const models_path = self.cg_output_dir.getPath2(b, &self.step); + + // Create output directory for packaging + const package_dir = b.pathJoin(&.{ "zig-out", "package" }); + const models_dest_dir = b.pathJoin(&.{ package_dir, "src", "models" }); + std.fs.cwd().makePath(models_dest_dir) catch |err| { + return step.fail("Failed to create package directory: {}", .{err}); + }; + + // Copy all source files to package directory + for (package_files) |file_name| + copyFile(b, b.build_root.handle, file_name, package_dir) catch {}; + + // Copy directories + for (package_dirs) |dir_name| + copyDirRecursive(b, b.build_root.handle, dir_name, package_dir) catch |err| { + return step.fail("Failed to copy directory '{s}': {}", .{ dir_name, err }); + }; + + // Copy generated models to src/models/ + copyGeneratedModels(b, models_path, models_dest_dir) catch |err| { + return step.fail("Failed to copy generated models: {}", .{err}); + }; + + step.result_cached = false; + } + + fn copyFile(b: *std.Build, src_dir: std.fs.Dir, file_path: []const u8, dest_prefix: []const u8) !void { + const dest_path = b.pathJoin(&.{ dest_prefix, file_path }); + + // Ensure parent directory exists + if (std.fs.path.dirname(dest_path)) |parent| + std.fs.cwd().makePath(parent) catch {}; + + src_dir.copyFile(file_path, std.fs.cwd(), dest_path, .{}) catch return; + } + + fn copyDirRecursive(b: *std.Build, src_base: std.fs.Dir, dir_path: []const u8, dest_prefix: []const u8) !void { + var src_dir = src_base.openDir(dir_path, .{ .iterate = true }) catch return; + defer src_dir.close(); + + var walker = try src_dir.walk(b.allocator); + defer walker.deinit(); + + while (try walker.next()) |entry| { + // Skip zig build artifact directories + if (std.mem.indexOf(u8, entry.path, "zig-out") != null or + std.mem.indexOf(u8, entry.path, ".zig-cache") != null or + std.mem.indexOf(u8, entry.path, "zig-cache") != null) + continue; + + const src_path = b.pathJoin(&.{ dir_path, entry.path }); + const dest_path = b.pathJoin(&.{ dest_prefix, dir_path, entry.path }); + + switch (entry.kind) { + .directory => std.fs.cwd().makePath(dest_path) catch {}, + .file => { + // Ensure parent directory exists + if (std.fs.path.dirname(dest_path)) |parent| { + std.fs.cwd().makePath(parent) catch {}; + } + src_base.copyFile(src_path, std.fs.cwd(), dest_path, .{}) catch {}; + }, + .sym_link => { + var link_buf: [std.fs.max_path_bytes]u8 = undefined; + const link_target = entry.dir.readLink(entry.basename, &link_buf) catch continue; + // Ensure parent directory exists + if (std.fs.path.dirname(dest_path)) |parent| { + std.fs.cwd().makePath(parent) catch {}; + } + std.fs.cwd().symLink(link_target, dest_path, .{}) catch {}; + }, + else => {}, + } + } + } + + fn copyGeneratedModels(b: *std.Build, models_path: []const u8, models_dest_dir: []const u8) !void { + var models_dir = std.fs.cwd().openDir(models_path, .{ .iterate = true }) catch + return error.ModelsNotFound; + defer models_dir.close(); + + var iter = models_dir.iterate(); + while (try iter.next()) |entry| { + if (entry.kind != .file) continue; + + const dest_path = b.pathJoin(&.{ models_dest_dir, entry.name }); + models_dir.copyFile(entry.name, std.fs.cwd(), dest_path, .{}) catch continue; + } + } +}; diff --git a/codegen/src/main.zig b/codegen/src/main.zig index 94a96cb..4b826ca 100644 --- a/codegen/src/main.zig +++ b/codegen/src/main.zig @@ -652,7 +652,7 @@ fn generateOperation(allocator: std.mem.Allocator, operation: smithy.ShapeInfo, try outputIndent(child_state, writer); try writer.print(".uri = \"{s}\",\n", .{trait.http.uri}); try outputIndent(child_state, writer); - try writer.print(".success_code = {d},\n", .{trait.http.code}); + try writer.print(".success_code = @as(u10, {d}),\n", .{trait.http.code}); try outputIndent(state, writer); _ = try writer.write("};\n\n"); } diff --git a/codegen/src/serialization/json.zig b/codegen/src/serialization/json.zig index 02d5b1c..499e850 100644 --- a/codegen/src/serialization/json.zig +++ b/codegen/src/serialization/json.zig @@ -34,9 +34,9 @@ pub fn generateToJsonFunction(shape_id: []const u8, writer: *std.Io.Writer, stat const member_value = try getMemberValueJson(allocator, "self", member); defer allocator.free(member_value); - try writer.print("try jw.objectField(\"{s}\");\n", .{member.json_key}); try writeMemberJson( .{ + .object_field_name = member.json_key, .shape_id = member.target, .field_name = member.field_name, .field_value = member_value, @@ -146,6 +146,8 @@ fn writeMemberValue( } const WriteMemberJsonParams = struct { + object_field_name: []const u8, + quote_object_field_name: bool = true, shape_id: []const u8, field_name: []const u8, field_value: []const u8, @@ -196,9 +198,9 @@ fn writeStructureJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) !vo const member_value = try getMemberValueJson(allocator, object_value, member); defer allocator.free(member_value); - try writer.print("try jw.objectField(\"{s}\");\n", .{member.json_key}); try writeMemberJson( .{ + .object_field_name = member.json_key, .shape_id = member.target, .field_name = member.field_name, .field_value = member_value, @@ -214,7 +216,7 @@ fn writeStructureJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) !vo if (is_optional) { try writer.writeAll("} else {\n"); - try writer.writeAll("try jw.write(null);\n"); + try writer.writeAll("//try jw.write(null);\n"); try writer.writeAll("}\n"); } } @@ -268,7 +270,7 @@ fn writeListJson(list: smithy_tools.ListShape, params: WriteMemberJsonParams, wr if (list_is_optional) { try writer.writeAll("} else {\n"); - try writer.writeAll("try jw.write(null);\n"); + try writer.writeAll("//try jw.write(null);\n"); try writer.writeAll("}\n"); } } @@ -327,9 +329,10 @@ fn writeMapJson(map: smithy_tools.MapShape, params: WriteMemberJsonParams, write // start loop try writer.print("for ({s}) |{s}|", .{ map_value, map_value_capture }); try writer.writeAll("{\n"); - try writer.print("try jw.objectField({s});\n", .{map_capture_key}); try writeMemberJson(.{ + .object_field_name = map_capture_key, + .quote_object_field_name = false, .shape_id = map.value, .field_name = "value", .field_value = map_capture_value, @@ -345,7 +348,7 @@ fn writeMapJson(map: smithy_tools.MapShape, params: WriteMemberJsonParams, write if (map_is_optional) { try writer.writeAll("} else {\n"); - try writer.writeAll("try jw.write(null);\n"); + try writer.writeAll("//try jw.write(null);\n"); try writer.writeAll("}\n"); } } @@ -361,7 +364,16 @@ fn writeMemberJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerr const shape_info = try smithy_tools.getShapeInfo(shape_id, state.file_state.shapes); const shape = shape_info.shape; + const quote = if (params.quote_object_field_name) "\"" else ""; + const is_optional = smithy_tools.shapeIsOptional(params.member.traits); + if (is_optional) { + try writer.print("if ({s}) |_|\n", .{params.field_value}); + try writer.writeAll("{\n"); + } + try writer.print("try jw.objectField({s}{s}{s});\n", .{ quote, params.object_field_name, quote }); + if (state.getTypeRecurrenceCount(shape_id) > 2) { + if (is_optional) try writer.writeAll("\n}\n"); return; } @@ -389,4 +401,5 @@ fn writeMemberJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerr .short => try writeScalarJson("short", params, writer), .service, .resource, .operation, .member, .set => std.debug.panic("Shape type not supported: {}", .{shape}), } + if (is_optional) try writer.writeAll("\n}\n"); } diff --git a/example/build.zig.zon b/example/build.zig.zon index d7c1ce8..491e016 100644 --- a/example/build.zig.zon +++ b/example/build.zig.zon @@ -6,8 +6,8 @@ .dependencies = .{ .aws = .{ - .url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/9b870aa969124de05de2a71e0afb9050a2998b14/9b870aa969124de05de2a71e0afb9050a2998b14nightly-zig-with-models.tar.gz", - .hash = "aws-0.0.1-SbsFcFsaCgBDSmjnC9Lue34UN_csGkkAEBJ4EkUl9r6w", + .url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/e41f98b389539c8bc6b1a231d25e2980318e5ef4/e41f98b389539c8bc6b1a231d25e2980318e5ef4-with-models.tar.gz", + .hash = "aws-0.0.1-SbsFcI0RCgBdf1nak95gi1kAtI6sv3Ntb7BPETH30fpS", }, }, } diff --git a/src/aws.zig b/src/aws.zig index 7a8d1ab..5d7cbe4 100644 --- a/src/aws.zig +++ b/src/aws.zig @@ -6,6 +6,7 @@ const date = @import("date"); const json = @import("json"); const zeit = @import("zeit"); +const credentials = @import("aws_credentials.zig"); const awshttp = @import("aws_http.zig"); const url = @import("url.zig"); const servicemodel = @import("servicemodel.zig"); @@ -19,7 +20,6 @@ const scoped_log = std.log.scoped(.aws); /// controls are insufficient (e.g. use in build script) pub fn globalLogControl(aws_level: std.log.Level, http_level: std.log.Level, signing_level: std.log.Level, off: bool) void { const signing = @import("aws_signing.zig"); - const credentials = @import("aws_credentials.zig"); logs_off = off; signing.logs_off = off; credentials.logs_off = off; @@ -82,8 +82,9 @@ const log = struct { pub const Options = struct { region: []const u8 = "aws-global", dualstack: bool = false, - success_http_code: i64 = 200, + success_http_status: std.http.Status = .ok, client: Client, + credential_options: credentials.Options = .{}, diagnostics: ?*Diagnostics = null, @@ -91,7 +92,7 @@ pub const Options = struct { }; pub const Diagnostics = struct { - http_code: i64, + response_status: std.http.Status, response_body: []const u8, allocator: std.mem.Allocator, @@ -232,8 +233,18 @@ pub fn Request(comptime request_action: anytype) type { var buffer = std.Io.Writer.Allocating.init(options.client.allocator); defer buffer.deinit(); if (Self.service_meta.aws_protocol == .rest_json_1) { - if (std.mem.eql(u8, "PUT", aws_request.method) or std.mem.eql(u8, "POST", aws_request.method)) - try buffer.writer.print("{f}", .{std.json.fmt(request, .{ .whitespace = .indent_4 })}); + if (std.mem.eql(u8, "PUT", aws_request.method) or std.mem.eql(u8, "POST", aws_request.method)) { + // Buried in the tests are our answer here: + // https://github.com/smithy-lang/smithy/blob/main/smithy-aws-protocol-tests/model/restJson1/json-structs.smithy#L71C24-L71C78 + // documentation: "Rest Json should not serialize null structure values", + try buffer.writer.print( + "{f}", + .{std.json.fmt(request, .{ + .whitespace = .indent_4, + .emit_null_optional_fields = false, + })}, + ); + } } aws_request.body = buffer.written(); var rest_xml_body: ?[]const u8 = null; @@ -294,14 +305,9 @@ pub fn Request(comptime request_action: anytype) type { } } - return try Self.callAws(aws_request, .{ - .success_http_code = Action.http_config.success_code, - .region = options.region, - .dualstack = options.dualstack, - .client = options.client, - .diagnostics = options.diagnostics, - .mock = options.mock, - }); + var rest_options = options; + rest_options.success_http_status = @enumFromInt(Action.http_config.success_code); + return try Self.callAws(aws_request, rest_options); } /// Calls using one of the json protocols (json_1_0, json_1_1) @@ -321,11 +327,20 @@ pub fn Request(comptime request_action: anytype) type { // smithy spec, "A null value MAY be provided or omitted // for a boxed member with no observable difference." But we're // seeing a lot of differences here between spec and reality + // + // This is deliciously unclear: + // https://github.com/smithy-lang/smithy/blob/main/smithy-aws-protocol-tests/model/awsJson1_1/null.smithy#L36 + // + // It looks like struct nulls are meant to be dropped, but sparse + // lists/maps included. We'll err here on the side of eliminating them const body = try std.fmt.allocPrint( options.client.allocator, "{f}", - .{std.json.fmt(request, .{ .whitespace = .indent_4 })}, + .{std.json.fmt(request, .{ + .whitespace = .indent_4, + .emit_null_optional_fields = false, + })}, ); defer options.client.allocator.free(body); @@ -397,16 +412,27 @@ pub fn Request(comptime request_action: anytype) type { .dualstack = options.dualstack, .sigv4_service_name = Self.service_meta.sigv4_name, .mock = options.mock, + .credential_options = options.credential_options, }, ); defer response.deinit(); - if (response.response_code != options.success_http_code and response.response_code != 404) { - try reportTraffic(options.client.allocator, "Call Failed", aws_request, response, log.err); + if (response.response_code != options.success_http_status) { + // If the consumer prrovided diagnostics, they are likely handling + // this error themselves. We'll not spam them with log.err + // output. Note that we may need to add additional information + // in diagnostics, as reportTraffic provides more information + // than what exists in the diagnostics data if (options.diagnostics) |d| { - d.http_code = response.response_code; + d.response_status = response.response_code; d.response_body = try d.allocator.dupe(u8, response.body); - } + } else try reportTraffic( + options.client.allocator, + "Call Failed", + aws_request, + response, + log.err, + ); return error.HttpFailure; } @@ -1186,7 +1212,10 @@ fn buildPath( "{f}", .{std.json.fmt( @field(request, field.name), - .{ .whitespace = .indent_4 }, + .{ + .whitespace = .indent_4, + .emit_null_optional_fields = false, + }, )}, ); const trimmed_replacement_val = std.mem.trim(u8, replacement_buffer.written(), "\""); diff --git a/src/aws_credentials.zig b/src/aws_credentials.zig index 5cdc9b9..1079d82 100644 --- a/src/aws_credentials.zig +++ b/src/aws_credentials.zig @@ -69,6 +69,11 @@ pub const Profile = struct { config_file: ?[]const u8 = null, /// Config file. Defaults to AWS_PROFILE or default profile_name: ?[]const u8 = null, + /// Profile name specified via command line should change precedence of operation, + /// moves credential file checking to the top. The sdk does not have a + /// way to know if this is coming from a command line, so this field + /// serves as a way to accomplish that task + prefer_profile_from_file: bool = false, }; pub const Options = struct { @@ -79,6 +84,15 @@ pub var static_credentials: ?auth.Credentials = null; pub fn getCredentials(allocator: std.mem.Allocator, io: std.Io, options: Options) !auth.Credentials { if (static_credentials) |c| return c; + if (options.profile.prefer_profile_from_file) { + log.debug( + "Command line profile specified. Checking credentials file first. Profile name {s}", + .{options.profile.profile_name orelse "default"}, + ); + if (try getProfileCredentials(allocator, io, options.profile)) |cred| return cred; + // Profile not found. We'll mirror the cli here and bail early + return error.CredentialsNotFound; + } if (try getEnvironmentCredentials(allocator)) |cred| { log.debug("Found credentials in environment. Access key: {s}", .{cred.access_key}); return cred; @@ -398,8 +412,8 @@ fn getProfileCredentials(allocator: std.mem.Allocator, io: std.Io, options: Prof default_path = default_path orelse creds_file_path.home; const config_file_path = try filePath( allocator, - options.credential_file, - "AWS_SHARED_CREDENTIALS_FILE", + options.config_file, + "AWS_CONFIG_FILE", default_path, "config", ); @@ -408,7 +422,7 @@ fn getProfileCredentials(allocator: std.mem.Allocator, io: std.Io, options: Prof // Get active profile const profile = (try getEnvironmentVariable(allocator, "AWS_PROFILE")) orelse - try allocator.dupe(u8, "default"); + try allocator.dupe(u8, options.profile_name orelse "default"); defer allocator.free(profile); log.debug("Looking for file credentials using profile '{s}'", .{profile}); log.debug("Checking credentials file: {s}", .{creds_file_path.evaluated_path}); diff --git a/src/aws_http.zig b/src/aws_http.zig index d50555b..53cddf6 100644 --- a/src/aws_http.zig +++ b/src/aws_http.zig @@ -90,6 +90,8 @@ pub const Options = struct { dualstack: bool = false, sigv4_service_name: ?[]const u8 = null, + credential_options: credentials.Options = .{}, + mock: ?Mock = null, }; @@ -188,7 +190,7 @@ pub const AwsHttp = struct { defer endpoint.deinit(); log.debug("Calling endpoint {s}", .{endpoint.uri}); // TODO: Should we allow customization here? - const creds = try credentials.getCredentials(self.allocator, self.io, .{}); + const creds = try credentials.getCredentials(self.allocator, self.io, options.credential_options); defer creds.deinit(); const signing_config: signing.Config = .{ .region = getRegion(service, options.region), diff --git a/src/aws_http_base.zig b/src/aws_http_base.zig index 4a8f8e4..7065c94 100644 --- a/src/aws_http_base.zig +++ b/src/aws_http_base.zig @@ -9,7 +9,7 @@ pub const Request = struct { headers: []const std.http.Header = &.{}, }; pub const Result = struct { - response_code: u16, // actually 3 digits can fit in u10 + response_code: std.http.Status, body: []const u8, headers: []const std.http.Header, allocator: std.mem.Allocator, diff --git a/src/aws_test.zig b/src/aws_test.zig index 1d6a869..d4e7b77 100644 --- a/src/aws_test.zig +++ b/src/aws_test.zig @@ -129,7 +129,7 @@ test "proper serialization for kms" { const parsed_body = try std.json.parseFromSlice(struct { KeyId: []const u8, Plaintext: []const u8, - EncryptionContext: ?struct {}, + EncryptionContext: ?struct {} = null, GrantTokens: [][]const u8, EncryptionAlgorithm: []const u8, DryRun: bool, @@ -166,7 +166,6 @@ test "basic json request serialization" { try buffer.writer.print("{f}", .{std.json.fmt(request, .{ .whitespace = .indent_4 })}); try std.testing.expectEqualStrings( \\{ - \\ "ExclusiveStartTableName": null, \\ "Limit": 1 \\} , buffer.written()); @@ -650,7 +649,7 @@ test "json_1_0_query_with_input: dynamodb listTables runtime" { try req_actuals.expectHeader("X-Amz-Target", "DynamoDB_20120810.ListTables"); const parsed_body = try std.json.parseFromSlice(struct { - ExclusiveStartTableName: ?[]const u8, + ExclusiveStartTableName: ?[]const u8 = null, Limit: u8, }, std.testing.allocator, req_actuals.body.?, .{}); defer parsed_body.deinit(); @@ -719,7 +718,7 @@ test "json_1_1_query_with_input: ecs listClusters runtime" { try req_actuals.expectHeader("X-Amz-Target", "AmazonEC2ContainerServiceV20141113.ListClusters"); const parsed_body = try std.json.parseFromSlice(struct { - nextToken: ?[]const u8, + nextToken: ?[]const u8 = null, maxResults: u8, }, std.testing.allocator, req_actuals.body.?, .{}); defer parsed_body.deinit(); @@ -759,8 +758,8 @@ test "json_1_1_query_no_input: ecs listClusters runtime" { try req_actuals.expectHeader("X-Amz-Target", "AmazonEC2ContainerServiceV20141113.ListClusters"); const parsed_body = try std.json.parseFromSlice(struct { - nextToken: ?[]const u8, - maxResults: ?u8, + nextToken: ?[]const u8 = null, + maxResults: ?u8 = null, }, std.testing.allocator, req_actuals.body.?, .{}); defer parsed_body.deinit(); @@ -1268,6 +1267,20 @@ test "jsonStringify" { try std.testing.expectEqualStrings("1234", json_parsed.value.arn); try std.testing.expectEqualStrings("bar", json_parsed.value.tags.foo); } +test "jsonStringify does not emit null values on serialization" { + { + const lambda = (Services(.{.lambda}){}).lambda; + const request = lambda.CreateFunctionRequest{ + .function_name = "foo", + .role = "bar", + .code = .{}, + }; + + const request_json = try std.fmt.allocPrint(std.testing.allocator, "{f}", .{std.json.fmt(request, .{})}); + defer std.testing.allocator.free(request_json); + try std.testing.expect(std.mem.indexOf(u8, request_json, "null") == null); + } +} test "jsonStringify nullable object" { // structure is not null @@ -1290,7 +1303,7 @@ test "jsonStringify nullable object" { FunctionVersion: []const u8, Name: []const u8, RoutingConfig: struct { - AdditionalVersionWeights: ?struct {}, + AdditionalVersionWeights: ?struct {} = null, }, }, std.testing.allocator, request_json, .{ .ignore_unknown_fields = true }); defer json_parsed.deinit();