Compare commits
21 Commits
927871c59e
...
3d78705ea5
Author | SHA1 | Date | |
---|---|---|---|
3d78705ea5 | |||
1e2b3a6759 | |||
908c9d2d42 | |||
1fdff0bacd | |||
1fe39007c5 | |||
c5cb3dde29 | |||
f5663fd84d | |||
c056dbb0ff | |||
9e8198cee4 | |||
43238a97eb | |||
b048b1193d | |||
f85eb4caf1 | |||
0bd583cae0 | |||
3b35936ac6 | |||
262cdefe12 | |||
238952d127 | |||
38b51c768b | |||
86877ca264 | |||
e5b662873a | |||
a9f99c0205 | |||
c1c40644ac |
|
@ -24,6 +24,12 @@ jobs:
|
||||||
version: 0.13.0
|
version: 0.13.0
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: zig build test --verbose
|
run: zig build test --verbose
|
||||||
|
# Zig build scripts don't have the ability to import depenedencies directly
|
||||||
|
# (https://github.com/ziglang/zig/issues/18164). We can allow downstream
|
||||||
|
# build scripts to import aws with a few tweaks, but we can't @import("src/aws.zig")
|
||||||
|
# until we have our models built. So we have to have the build script
|
||||||
|
# basically modified, only during packaging, to allow this use case
|
||||||
|
#
|
||||||
# Zig package manager expects everything to be inside a directory in the archive,
|
# Zig package manager expects everything to be inside a directory in the archive,
|
||||||
# which it then strips out on download. So we need to shove everything inside a directory
|
# which it then strips out on download. So we need to shove everything inside a directory
|
||||||
# the way GitHub/Gitea does for repo archives
|
# the way GitHub/Gitea does for repo archives
|
||||||
|
@ -33,6 +39,7 @@ jobs:
|
||||||
# should be using git archive, but we need our generated code to be part of it
|
# should be using git archive, but we need our generated code to be part of it
|
||||||
- name: Package source code with generated models
|
- name: Package source code with generated models
|
||||||
run: |
|
run: |
|
||||||
|
sed -i 's#// UNCOMMENT AFTER MODEL GEN TO USE IN BUILD SCRIPTS //##' build.zig
|
||||||
tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||||
--format ustar \
|
--format ustar \
|
||||||
--exclude 'zig-*' \
|
--exclude 'zig-*' \
|
||||||
|
|
|
@ -2,7 +2,7 @@ name: aws-zig nightly build
|
||||||
run-name: ${{ github.actor }} building AWS Zig SDK
|
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '0 12 30 * *' # 12:30 UTC, 4:30AM Pacific
|
- cron: '30 12 * * *' # 12:30 UTC, 4:30AM Pacific
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'zig-develop*'
|
- 'zig-develop*'
|
||||||
|
|
61
build.zig
61
build.zig
|
@ -4,44 +4,22 @@ const Builder = @import("std").Build;
|
||||||
|
|
||||||
const models_subdir = "codegen/sdk-codegen/aws-models/"; // note will probably not work on windows
|
const models_subdir = "codegen/sdk-codegen/aws-models/"; // note will probably not work on windows
|
||||||
|
|
||||||
|
// UNCOMMENT AFTER MODEL GEN TO USE IN BUILD SCRIPTS //pub const aws = @import("src/aws.zig");
|
||||||
|
|
||||||
const test_targets = [_]std.Target.Query{
|
const test_targets = [_]std.Target.Query{
|
||||||
.{}, // native
|
.{}, // native
|
||||||
.{
|
.{ .cpu_arch = .x86_64, .os_tag = .linux },
|
||||||
.cpu_arch = .x86_64,
|
.{ .cpu_arch = .aarch64, .os_tag = .linux },
|
||||||
.os_tag = .linux,
|
|
||||||
},
|
|
||||||
.{
|
|
||||||
.cpu_arch = .aarch64,
|
|
||||||
.os_tag = .linux,
|
|
||||||
},
|
|
||||||
// The test executable linking process just spins forever in LLVM using nominated zig 0.13 May 2024
|
// The test executable linking process just spins forever in LLVM using nominated zig 0.13 May 2024
|
||||||
// This is likely a LLVM problem unlikely to be fixed in zig 0.13
|
// This is likely a LLVM problem unlikely to be fixed in zig 0.13
|
||||||
// Potentially this issue: https://github.com/llvm/llvm-project/issues/81440
|
// Potentially this issue: https://github.com/llvm/llvm-project/issues/81440
|
||||||
// Zig tracker: https://github.com/ziglang/zig/issues/18872
|
// Zig tracker: https://github.com/ziglang/zig/issues/18872
|
||||||
// .{
|
// .{ .cpu_arch = .riscv64, .os_tag = .linux },
|
||||||
// .cpu_arch = .riscv64,
|
.{ .cpu_arch = .arm, .os_tag = .linux },
|
||||||
// .os_tag = .linux,
|
.{ .cpu_arch = .x86_64, .os_tag = .windows },
|
||||||
// },
|
.{ .cpu_arch = .aarch64, .os_tag = .macos },
|
||||||
.{
|
.{ .cpu_arch = .x86_64, .os_tag = .macos },
|
||||||
.cpu_arch = .arm,
|
// .{ .cpu_arch = .wasm32, .os_tag = .wasi },
|
||||||
.os_tag = .linux,
|
|
||||||
},
|
|
||||||
.{
|
|
||||||
.cpu_arch = .x86_64,
|
|
||||||
.os_tag = .windows,
|
|
||||||
},
|
|
||||||
.{
|
|
||||||
.cpu_arch = .aarch64,
|
|
||||||
.os_tag = .macos,
|
|
||||||
},
|
|
||||||
.{
|
|
||||||
.cpu_arch = .x86_64,
|
|
||||||
.os_tag = .macos,
|
|
||||||
},
|
|
||||||
// .{
|
|
||||||
// .cpu_arch = .wasm32,
|
|
||||||
// .os_tag = .wasi,
|
|
||||||
// },
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn build(b: *Builder) !void {
|
pub fn build(b: *Builder) !void {
|
||||||
|
@ -213,5 +191,24 @@ pub fn build(b: *Builder) !void {
|
||||||
}
|
}
|
||||||
const check = b.step("check", "Check compilation errors");
|
const check = b.step("check", "Check compilation errors");
|
||||||
check.dependOn(&exe.step);
|
check.dependOn(&exe.step);
|
||||||
|
|
||||||
|
// Similar to creating the run step earlier, this exposes a `test` step to
|
||||||
|
// the `zig build --help` menu, providing a way for the user to request
|
||||||
|
// running the unit tests.
|
||||||
|
const smoke_test_step = b.step("smoke-test", "Run unit tests");
|
||||||
|
|
||||||
|
// Creates a step for unit testing. This only builds the test executable
|
||||||
|
// but does not run it.
|
||||||
|
const smoke_test = b.addTest(.{
|
||||||
|
.root_source_file = b.path("src/aws.zig"),
|
||||||
|
.target = target,
|
||||||
|
.optimize = optimize,
|
||||||
|
});
|
||||||
|
smoke_test.root_module.addImport("smithy", smithy_dep.module("smithy"));
|
||||||
|
smoke_test.step.dependOn(gen_step);
|
||||||
|
|
||||||
|
const run_smoke_test = b.addRunArtifact(smoke_test);
|
||||||
|
|
||||||
|
smoke_test_step.dependOn(&run_smoke_test.step);
|
||||||
b.installArtifact(exe);
|
b.installArtifact(exe);
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,8 +11,8 @@
|
||||||
|
|
||||||
.dependencies = .{
|
.dependencies = .{
|
||||||
.smithy = .{
|
.smithy = .{
|
||||||
.url = "https://git.lerch.org/lobo/smithy/archive/c419359a8c47027839bf0ad9a1adbc7e35795dbf.tar.gz",
|
.url = "https://git.lerch.org/lobo/smithy/archive/3ed98751bc414e005af6ad185feb213d4366c0db.tar.gz",
|
||||||
.hash = "12208cba35178ab76d5a4d966df0394d8d3cd399642595d1126f02f1f21142f0ba6c",
|
.hash = "12204a784751a4ad5ed6c8955ba91fcbc4a3cad6c5a7da38f39abf074ef801d13172",
|
||||||
},
|
},
|
||||||
.models = .{
|
.models = .{
|
||||||
.url = "https://github.com/aws/aws-sdk-go-v2/archive/58cf6509525a12d64fd826da883bfdbacbd2f00e.tar.gz",
|
.url = "https://github.com/aws/aws-sdk-go-v2/archive/58cf6509525a12d64fd826da883bfdbacbd2f00e.tar.gz",
|
||||||
|
|
|
@ -2,6 +2,16 @@ const std = @import("std");
|
||||||
// options is a json.Options, but since we're using our hacked json.zig we don't want to
|
// options is a json.Options, but since we're using our hacked json.zig we don't want to
|
||||||
// specifically call this out
|
// specifically call this out
|
||||||
pub fn serializeMap(map: anytype, key: []const u8, options: anytype, out_stream: anytype) !bool {
|
pub fn serializeMap(map: anytype, key: []const u8, options: anytype, out_stream: anytype) !bool {
|
||||||
|
if (@typeInfo(@TypeOf(map)) == .optional) {
|
||||||
|
if (map == null)
|
||||||
|
return true
|
||||||
|
else
|
||||||
|
return serializeMapInternal(map.?, key, options, out_stream);
|
||||||
|
}
|
||||||
|
return serializeMapInternal(map, key, options, out_stream);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn serializeMapInternal(map: anytype, key: []const u8, options: anytype, out_stream: anytype) !bool {
|
||||||
if (map.len == 0) return true;
|
if (map.len == 0) return true;
|
||||||
// TODO: Map might be [][]struct{key, value} rather than []struct{key, value}
|
// TODO: Map might be [][]struct{key, value} rather than []struct{key, value}
|
||||||
var child_options = options;
|
var child_options = options;
|
||||||
|
|
|
@ -5,8 +5,8 @@
|
||||||
|
|
||||||
.dependencies = .{
|
.dependencies = .{
|
||||||
.aws = .{
|
.aws = .{
|
||||||
.url = "https://git.lerch.org/lobo/-/packages/generic/aws-sdk-with-models/3e89ec468a493b239f01df81b1d0998ca925709f/files/510",
|
.url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/e5b662873a6745a7e761643b1ca3d8637bf1222f/e5b662873a6745a7e761643b1ca3d8637bf1222f-with-models.tar.gz",
|
||||||
.hash = "122015b35d150e9c641a8f577ea07bb5cf52020038d052bcabc5d89214ac36e82c4e",
|
.hash = "12206394d50a9df1bf3fa6390cd5525bf97448d0f74a85113ef70c3bb60dcf4b7292",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,10 +36,8 @@ pub fn main() anyerror!void {
|
||||||
.client = client,
|
.client = client,
|
||||||
};
|
};
|
||||||
|
|
||||||
// As of 2023-08-28, only ECS from this list supports TLS v1.3
|
|
||||||
// AWS commitment is to enable all services by 2023-12-31
|
|
||||||
const services = aws.Services(.{ .sts, .kms }){};
|
const services = aws.Services(.{ .sts, .kms }){};
|
||||||
try stdout.print("Calling KMS ListKeys, a TLS 1.3 enabled service\n", .{});
|
try stdout.print("Calling KMS ListKeys\n", .{});
|
||||||
try stdout.print("You likely have at least some AWS-generated keys in your account,\n", .{});
|
try stdout.print("You likely have at least some AWS-generated keys in your account,\n", .{});
|
||||||
try stdout.print("but if the account has not had many services used, this may return 0 keys\n\n", .{});
|
try stdout.print("but if the account has not had many services used, this may return 0 keys\n\n", .{});
|
||||||
const call_kms = try aws.Request(services.kms.list_keys).call(.{}, options);
|
const call_kms = try aws.Request(services.kms.list_keys).call(.{}, options);
|
||||||
|
@ -51,8 +49,7 @@ pub fn main() anyerror!void {
|
||||||
}
|
}
|
||||||
defer call_kms.deinit();
|
defer call_kms.deinit();
|
||||||
|
|
||||||
try stdout.print("\n\n\nCalling STS GetCallerIdentity. This does not have TLS 1.3 in September 2023\n", .{});
|
try stdout.print("\n\n\nCalling STS GetCallerIdentity\n", .{});
|
||||||
try stdout.print("A failure may occur\n\n", .{});
|
|
||||||
const call = try aws.Request(services.sts.get_caller_identity).call(.{}, options);
|
const call = try aws.Request(services.sts.get_caller_identity).call(.{}, options);
|
||||||
defer call.deinit();
|
defer call.deinit();
|
||||||
try stdout.print("\tarn: {s}\n", .{call.response.arn.?});
|
try stdout.print("\tarn: {s}\n", .{call.response.arn.?});
|
||||||
|
|
278
src/aws.zig
278
src/aws.zig
|
@ -9,7 +9,72 @@ const date = @import("date.zig");
|
||||||
const servicemodel = @import("servicemodel.zig");
|
const servicemodel = @import("servicemodel.zig");
|
||||||
const xml_shaper = @import("xml_shaper.zig");
|
const xml_shaper = @import("xml_shaper.zig");
|
||||||
|
|
||||||
const log = std.log.scoped(.aws);
|
const scoped_log = std.log.scoped(.aws);
|
||||||
|
|
||||||
|
/// control all logs directly/indirectly used by aws sdk. Not recommended for
|
||||||
|
/// use under normal circumstances, but helpful for times when the zig logging
|
||||||
|
/// controls are insufficient (e.g. use in build script)
|
||||||
|
pub fn globalLogControl(aws_level: std.log.Level, http_level: std.log.Level, signing_level: std.log.Level, off: bool) void {
|
||||||
|
const signing = @import("aws_signing.zig");
|
||||||
|
const credentials = @import("aws_credentials.zig");
|
||||||
|
logs_off = off;
|
||||||
|
signing.logs_off = off;
|
||||||
|
credentials.logs_off = off;
|
||||||
|
awshttp.logs_off = off;
|
||||||
|
log_level = aws_level;
|
||||||
|
awshttp.log_level = http_level;
|
||||||
|
signing.log_level = signing_level;
|
||||||
|
credentials.log_level = signing_level;
|
||||||
|
}
|
||||||
|
/// Specifies logging level. This should not be touched unless the normal
|
||||||
|
/// zig logging capabilities are inaccessible (e.g. during a build)
|
||||||
|
pub var log_level: std.log.Level = .debug;
|
||||||
|
|
||||||
|
/// Turn off logging completely
|
||||||
|
pub var logs_off: bool = false;
|
||||||
|
const log = struct {
|
||||||
|
/// Log an error message. This log level is intended to be used
|
||||||
|
/// when something has gone wrong. This might be recoverable or might
|
||||||
|
/// be followed by the program exiting.
|
||||||
|
pub fn err(
|
||||||
|
comptime format: []const u8,
|
||||||
|
args: anytype,
|
||||||
|
) void {
|
||||||
|
if (!logs_off and @intFromEnum(std.log.Level.err) <= @intFromEnum(log_level))
|
||||||
|
scoped_log.err(format, args);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Log a warning message. This log level is intended to be used if
|
||||||
|
/// it is uncertain whether something has gone wrong or not, but the
|
||||||
|
/// circumstances would be worth investigating.
|
||||||
|
pub fn warn(
|
||||||
|
comptime format: []const u8,
|
||||||
|
args: anytype,
|
||||||
|
) void {
|
||||||
|
if (!logs_off and @intFromEnum(std.log.Level.warn) <= @intFromEnum(log_level))
|
||||||
|
scoped_log.warn(format, args);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Log an info message. This log level is intended to be used for
|
||||||
|
/// general messages about the state of the program.
|
||||||
|
pub fn info(
|
||||||
|
comptime format: []const u8,
|
||||||
|
args: anytype,
|
||||||
|
) void {
|
||||||
|
if (!logs_off and @intFromEnum(std.log.Level.info) <= @intFromEnum(log_level))
|
||||||
|
scoped_log.info(format, args);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Log a debug message. This log level is intended to be used for
|
||||||
|
/// messages which are only useful for debugging.
|
||||||
|
pub fn debug(
|
||||||
|
comptime format: []const u8,
|
||||||
|
args: anytype,
|
||||||
|
) void {
|
||||||
|
if (!logs_off and @intFromEnum(std.log.Level.debug) <= @intFromEnum(log_level))
|
||||||
|
scoped_log.debug(format, args);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
pub const Options = struct {
|
pub const Options = struct {
|
||||||
region: []const u8 = "aws-global",
|
region: []const u8 = "aws-global",
|
||||||
|
@ -19,6 +84,18 @@ pub const Options = struct {
|
||||||
|
|
||||||
/// Used for testing to provide consistent signing. If null, will use current time
|
/// Used for testing to provide consistent signing. If null, will use current time
|
||||||
signing_time: ?i64 = null,
|
signing_time: ?i64 = null,
|
||||||
|
diagnostics: ?*Diagnostics = null,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const Diagnostics = struct {
|
||||||
|
http_code: i64,
|
||||||
|
response_body: []const u8,
|
||||||
|
allocator: std.mem.Allocator,
|
||||||
|
|
||||||
|
pub fn deinit(self: *Diagnostics) void {
|
||||||
|
self.allocator.free(self.response_body);
|
||||||
|
self.response_body = undefined;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Using this constant may blow up build times. Recommed using Services()
|
/// Using this constant may blow up build times. Recommed using Services()
|
||||||
|
@ -114,12 +191,15 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
log.debug("Rest method: '{s}'", .{aws_request.method});
|
log.debug("Rest method: '{s}'", .{aws_request.method});
|
||||||
log.debug("Rest success code: '{d}'", .{Action.http_config.success_code});
|
log.debug("Rest success code: '{d}'", .{Action.http_config.success_code});
|
||||||
log.debug("Rest raw uri: '{s}'", .{Action.http_config.uri});
|
log.debug("Rest raw uri: '{s}'", .{Action.http_config.uri});
|
||||||
|
var al = std.ArrayList([]const u8).init(options.client.allocator);
|
||||||
|
defer al.deinit();
|
||||||
aws_request.path = try buildPath(
|
aws_request.path = try buildPath(
|
||||||
options.client.allocator,
|
options.client.allocator,
|
||||||
Action.http_config.uri,
|
Action.http_config.uri,
|
||||||
ActionRequest,
|
ActionRequest,
|
||||||
request,
|
request,
|
||||||
!std.mem.eql(u8, Self.service_meta.sdk_id, "S3"),
|
!std.mem.eql(u8, Self.service_meta.sdk_id, "S3"),
|
||||||
|
&al,
|
||||||
);
|
);
|
||||||
defer options.client.allocator.free(aws_request.path);
|
defer options.client.allocator.free(aws_request.path);
|
||||||
log.debug("Rest processed uri: '{s}'", .{aws_request.path});
|
log.debug("Rest processed uri: '{s}'", .{aws_request.path});
|
||||||
|
@ -151,7 +231,7 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
defer nameAllocator.deinit();
|
defer nameAllocator.deinit();
|
||||||
if (Self.service_meta.aws_protocol == .rest_json_1) {
|
if (Self.service_meta.aws_protocol == .rest_json_1) {
|
||||||
if (std.mem.eql(u8, "PUT", aws_request.method) or std.mem.eql(u8, "POST", aws_request.method)) {
|
if (std.mem.eql(u8, "PUT", aws_request.method) or std.mem.eql(u8, "POST", aws_request.method)) {
|
||||||
try json.stringify(request, .{ .whitespace = .{} }, buffer.writer());
|
try json.stringify(request, .{ .whitespace = .{}, .emit_null = false, .exclude_fields = al.items }, buffer.writer());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
aws_request.body = buffer.items;
|
aws_request.body = buffer.items;
|
||||||
|
@ -175,6 +255,7 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
.dualstack = options.dualstack,
|
.dualstack = options.dualstack,
|
||||||
.client = options.client,
|
.client = options.client,
|
||||||
.signing_time = options.signing_time,
|
.signing_time = options.signing_time,
|
||||||
|
.diagnostics = options.diagnostics,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -272,6 +353,10 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
defer response.deinit();
|
defer response.deinit();
|
||||||
if (response.response_code != options.success_http_code) {
|
if (response.response_code != options.success_http_code) {
|
||||||
try reportTraffic(options.client.allocator, "Call Failed", aws_request, response, log.err);
|
try reportTraffic(options.client.allocator, "Call Failed", aws_request, response, log.err);
|
||||||
|
if (options.diagnostics) |d| {
|
||||||
|
d.http_code = response.response_code;
|
||||||
|
d.response_body = try d.allocator.dupe(u8, response.body);
|
||||||
|
}
|
||||||
return error.HttpFailure;
|
return error.HttpFailure;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -353,7 +438,7 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
// First, we need to determine if we care about a response at all
|
// First, we need to determine if we care about a response at all
|
||||||
// If the expected result has no fields, there's no sense in
|
// If the expected result has no fields, there's no sense in
|
||||||
// doing any more work. Let's bail early
|
// doing any more work. Let's bail early
|
||||||
var expected_body_field_len = std.meta.fields(action.Response).len;
|
comptime var expected_body_field_len = std.meta.fields(action.Response).len;
|
||||||
if (@hasDecl(action.Response, "http_header"))
|
if (@hasDecl(action.Response, "http_header"))
|
||||||
expected_body_field_len -= std.meta.fields(@TypeOf(action.Response.http_header)).len;
|
expected_body_field_len -= std.meta.fields(@TypeOf(action.Response.http_header)).len;
|
||||||
if (@hasDecl(action.Response, "http_payload")) {
|
if (@hasDecl(action.Response, "http_payload")) {
|
||||||
|
@ -379,8 +464,6 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
|
|
||||||
// We don't care about the body if there are no fields we expect there...
|
// We don't care about the body if there are no fields we expect there...
|
||||||
if (std.meta.fields(action.Response).len == 0 or expected_body_field_len == 0) {
|
if (std.meta.fields(action.Response).len == 0 or expected_body_field_len == 0) {
|
||||||
// ^^ This should be redundant, but is necessary. I suspect it's a compiler quirk
|
|
||||||
//
|
|
||||||
// Do we care if an unexpected body comes in?
|
// Do we care if an unexpected body comes in?
|
||||||
return FullResponseType{
|
return FullResponseType{
|
||||||
.response = .{},
|
.response = .{},
|
||||||
|
@ -434,9 +517,9 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
// And the response property below will pull whatever is the ActionResult object
|
// And the response property below will pull whatever is the ActionResult object
|
||||||
// We can grab index [0] as structs are guaranteed by zig to be returned in the order
|
// We can grab index [0] as structs are guaranteed by zig to be returned in the order
|
||||||
// declared, and we're declaring in that order in ServerResponse().
|
// declared, and we're declaring in that order in ServerResponse().
|
||||||
const real_response = @field(parsed_response, @typeInfo(response_types.NormalResponse).Struct.fields[0].name);
|
const real_response = @field(parsed_response, @typeInfo(response_types.NormalResponse).@"struct".fields[0].name);
|
||||||
return FullResponseType{
|
return FullResponseType{
|
||||||
.response = @field(real_response, @typeInfo(@TypeOf(real_response)).Struct.fields[0].name),
|
.response = @field(real_response, @typeInfo(@TypeOf(real_response)).@"struct".fields[0].name),
|
||||||
.response_metadata = .{
|
.response_metadata = .{
|
||||||
.request_id = try options.client.allocator.dupe(u8, real_response.ResponseMetadata.RequestId),
|
.request_id = try options.client.allocator.dupe(u8, real_response.ResponseMetadata.RequestId),
|
||||||
},
|
},
|
||||||
|
@ -679,7 +762,7 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn coerceFromString(comptime T: type, val: []const u8) anyerror!T {
|
fn coerceFromString(comptime T: type, val: []const u8) anyerror!T {
|
||||||
if (@typeInfo(T) == .Optional) return try coerceFromString(@typeInfo(T).Optional.child, val);
|
if (@typeInfo(T) == .optional) return try coerceFromString(@typeInfo(T).optional.child, val);
|
||||||
// TODO: This is terrible...fix it
|
// TODO: This is terrible...fix it
|
||||||
switch (T) {
|
switch (T) {
|
||||||
bool => return std.ascii.eqlIgnoreCase(val, "true"),
|
bool => return std.ascii.eqlIgnoreCase(val, "true"),
|
||||||
|
@ -706,8 +789,8 @@ fn parseInt(comptime T: type, val: []const u8) !T {
|
||||||
|
|
||||||
fn generalAllocPrint(allocator: std.mem.Allocator, val: anytype) !?[]const u8 {
|
fn generalAllocPrint(allocator: std.mem.Allocator, val: anytype) !?[]const u8 {
|
||||||
switch (@typeInfo(@TypeOf(val))) {
|
switch (@typeInfo(@TypeOf(val))) {
|
||||||
.Optional => if (val) |v| return generalAllocPrint(allocator, v) else return null,
|
.optional => if (val) |v| return generalAllocPrint(allocator, v) else return null,
|
||||||
.Array, .Pointer => return try std.fmt.allocPrint(allocator, "{s}", .{val}),
|
.array, .pointer => return try std.fmt.allocPrint(allocator, "{s}", .{val}),
|
||||||
else => return try std.fmt.allocPrint(allocator, "{any}", .{val}),
|
else => return try std.fmt.allocPrint(allocator, "{any}", .{val}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -826,7 +909,7 @@ fn ServerResponse(comptime action: anytype) type {
|
||||||
RequestId: []u8,
|
RequestId: []u8,
|
||||||
};
|
};
|
||||||
const Result = @Type(.{
|
const Result = @Type(.{
|
||||||
.Struct = .{
|
.@"struct" = .{
|
||||||
.layout = .auto,
|
.layout = .auto,
|
||||||
.fields = &[_]std.builtin.Type.StructField{
|
.fields = &[_]std.builtin.Type.StructField{
|
||||||
.{
|
.{
|
||||||
|
@ -849,7 +932,7 @@ fn ServerResponse(comptime action: anytype) type {
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
return @Type(.{
|
return @Type(.{
|
||||||
.Struct = .{
|
.@"struct" = .{
|
||||||
.layout = .auto,
|
.layout = .auto,
|
||||||
.fields = &[_]std.builtin.Type.StructField{
|
.fields = &[_]std.builtin.Type.StructField{
|
||||||
.{
|
.{
|
||||||
|
@ -915,8 +998,8 @@ fn FullResponse(comptime action: anytype) type {
|
||||||
}
|
}
|
||||||
fn safeFree(allocator: std.mem.Allocator, obj: anytype) void {
|
fn safeFree(allocator: std.mem.Allocator, obj: anytype) void {
|
||||||
switch (@typeInfo(@TypeOf(obj))) {
|
switch (@typeInfo(@TypeOf(obj))) {
|
||||||
.Pointer => allocator.free(obj),
|
.pointer => allocator.free(obj),
|
||||||
.Optional => if (obj) |o| safeFree(allocator, o),
|
.optional => if (obj) |o| safeFree(allocator, o),
|
||||||
else => {},
|
else => {},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -930,6 +1013,7 @@ fn buildPath(
|
||||||
comptime ActionRequest: type,
|
comptime ActionRequest: type,
|
||||||
request: anytype,
|
request: anytype,
|
||||||
encode_slash: bool,
|
encode_slash: bool,
|
||||||
|
replaced_fields: *std.ArrayList([]const u8),
|
||||||
) ![]const u8 {
|
) ![]const u8 {
|
||||||
var buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
|
var buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
|
||||||
// const writer = buffer.writer();
|
// const writer = buffer.writer();
|
||||||
|
@ -951,6 +1035,7 @@ fn buildPath(
|
||||||
const replacement_label = raw_uri[start..end];
|
const replacement_label = raw_uri[start..end];
|
||||||
inline for (std.meta.fields(ActionRequest)) |field| {
|
inline for (std.meta.fields(ActionRequest)) |field| {
|
||||||
if (std.mem.eql(u8, request.fieldNameFor(field.name), replacement_label)) {
|
if (std.mem.eql(u8, request.fieldNameFor(field.name), replacement_label)) {
|
||||||
|
try replaced_fields.append(replacement_label);
|
||||||
var replacement_buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
|
var replacement_buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
|
||||||
defer replacement_buffer.deinit();
|
defer replacement_buffer.deinit();
|
||||||
var encoded_buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
|
var encoded_buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
|
||||||
|
@ -1023,7 +1108,7 @@ fn buildQuery(allocator: std.mem.Allocator, request: anytype) ![]const u8 {
|
||||||
var prefix = "?";
|
var prefix = "?";
|
||||||
if (@hasDecl(@TypeOf(request), "http_query")) {
|
if (@hasDecl(@TypeOf(request), "http_query")) {
|
||||||
const query_arguments = @field(@TypeOf(request), "http_query");
|
const query_arguments = @field(@TypeOf(request), "http_query");
|
||||||
inline for (@typeInfo(@TypeOf(query_arguments)).Struct.fields) |arg| {
|
inline for (@typeInfo(@TypeOf(query_arguments)).@"struct".fields) |arg| {
|
||||||
const val = @field(request, arg.name);
|
const val = @field(request, arg.name);
|
||||||
if (try addQueryArg(arg.type, prefix, @field(query_arguments, arg.name), val, writer))
|
if (try addQueryArg(arg.type, prefix, @field(query_arguments, arg.name), val, writer))
|
||||||
prefix = "&";
|
prefix = "&";
|
||||||
|
@ -1034,13 +1119,13 @@ fn buildQuery(allocator: std.mem.Allocator, request: anytype) ![]const u8 {
|
||||||
|
|
||||||
fn addQueryArg(comptime ValueType: type, prefix: []const u8, key: []const u8, value: anytype, writer: anytype) !bool {
|
fn addQueryArg(comptime ValueType: type, prefix: []const u8, key: []const u8, value: anytype, writer: anytype) !bool {
|
||||||
switch (@typeInfo(@TypeOf(value))) {
|
switch (@typeInfo(@TypeOf(value))) {
|
||||||
.Optional => {
|
.optional => {
|
||||||
if (value) |v|
|
if (value) |v|
|
||||||
return try addQueryArg(ValueType, prefix, key, v, writer);
|
return try addQueryArg(ValueType, prefix, key, v, writer);
|
||||||
return false;
|
return false;
|
||||||
},
|
},
|
||||||
// if this is a pointer, we want to make sure it is more than just a string
|
// if this is a pointer, we want to make sure it is more than just a string
|
||||||
.Pointer => |ptr| {
|
.pointer => |ptr| {
|
||||||
if (ptr.child == u8 or ptr.size != .Slice) {
|
if (ptr.child == u8 or ptr.size != .Slice) {
|
||||||
// This is just a string
|
// This is just a string
|
||||||
return try addBasicQueryArg(prefix, key, value, writer);
|
return try addBasicQueryArg(prefix, key, value, writer);
|
||||||
|
@ -1052,7 +1137,7 @@ fn addQueryArg(comptime ValueType: type, prefix: []const u8, key: []const u8, va
|
||||||
}
|
}
|
||||||
return std.mem.eql(u8, "&", p);
|
return std.mem.eql(u8, "&", p);
|
||||||
},
|
},
|
||||||
.Array => |arr| {
|
.array => |arr| {
|
||||||
if (arr.child == u8)
|
if (arr.child == u8)
|
||||||
return try addBasicQueryArg(prefix, key, value, writer);
|
return try addBasicQueryArg(prefix, key, value, writer);
|
||||||
var p = prefix;
|
var p = prefix;
|
||||||
|
@ -1172,8 +1257,8 @@ fn reportTraffic(
|
||||||
fn typeForField(comptime T: type, comptime field_name: []const u8) !type {
|
fn typeForField(comptime T: type, comptime field_name: []const u8) !type {
|
||||||
const ti = @typeInfo(T);
|
const ti = @typeInfo(T);
|
||||||
switch (ti) {
|
switch (ti) {
|
||||||
.Struct => {
|
.@"struct" => {
|
||||||
inline for (ti.Struct.fields) |field| {
|
inline for (ti.@"struct".fields) |field| {
|
||||||
if (std.mem.eql(u8, field.name, field_name))
|
if (std.mem.eql(u8, field.name, field_name))
|
||||||
return field.type;
|
return field.type;
|
||||||
}
|
}
|
||||||
|
@ -1187,7 +1272,7 @@ test "custom serialization for map objects" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var buffer = std.ArrayList(u8).init(allocator);
|
var buffer = std.ArrayList(u8).init(allocator);
|
||||||
defer buffer.deinit();
|
defer buffer.deinit();
|
||||||
var tags = try std.ArrayList(@typeInfo(try typeForField(services.lambda.tag_resource.Request, "tags")).Pointer.child).initCapacity(allocator, 2);
|
var tags = try std.ArrayList(@typeInfo(try typeForField(services.lambda.tag_resource.Request, "tags")).pointer.child).initCapacity(allocator, 2);
|
||||||
defer tags.deinit();
|
defer tags.deinit();
|
||||||
tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" });
|
tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" });
|
||||||
tags.appendAssumeCapacity(.{ .key = "Baz", .value = "Qux" });
|
tags.appendAssumeCapacity(.{ .key = "Baz", .value = "Qux" });
|
||||||
|
@ -1240,23 +1325,27 @@ test "REST Json v1 serializes lists in queries" {
|
||||||
}
|
}
|
||||||
test "REST Json v1 buildpath substitutes" {
|
test "REST Json v1 buildpath substitutes" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
|
var al = std.ArrayList([]const u8).init(allocator);
|
||||||
|
defer al.deinit();
|
||||||
const svs = Services(.{.lambda}){};
|
const svs = Services(.{.lambda}){};
|
||||||
const request = svs.lambda.list_functions.Request{
|
const request = svs.lambda.list_functions.Request{
|
||||||
.max_items = 1,
|
.max_items = 1,
|
||||||
};
|
};
|
||||||
const input_path = "https://myhost/{MaxItems}/";
|
const input_path = "https://myhost/{MaxItems}/";
|
||||||
const output_path = try buildPath(allocator, input_path, @TypeOf(request), request, true);
|
const output_path = try buildPath(allocator, input_path, @TypeOf(request), request, true, &al);
|
||||||
defer allocator.free(output_path);
|
defer allocator.free(output_path);
|
||||||
try std.testing.expectEqualStrings("https://myhost/1/", output_path);
|
try std.testing.expectEqualStrings("https://myhost/1/", output_path);
|
||||||
}
|
}
|
||||||
test "REST Json v1 buildpath handles restricted characters" {
|
test "REST Json v1 buildpath handles restricted characters" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
|
var al = std.ArrayList([]const u8).init(allocator);
|
||||||
|
defer al.deinit();
|
||||||
const svs = Services(.{.lambda}){};
|
const svs = Services(.{.lambda}){};
|
||||||
const request = svs.lambda.list_functions.Request{
|
const request = svs.lambda.list_functions.Request{
|
||||||
.marker = ":",
|
.marker = ":",
|
||||||
};
|
};
|
||||||
const input_path = "https://myhost/{Marker}/";
|
const input_path = "https://myhost/{Marker}/";
|
||||||
const output_path = try buildPath(allocator, input_path, @TypeOf(request), request, true);
|
const output_path = try buildPath(allocator, input_path, @TypeOf(request), request, true, &al);
|
||||||
defer allocator.free(output_path);
|
defer allocator.free(output_path);
|
||||||
try std.testing.expectEqualStrings("https://myhost/%3A/", output_path);
|
try std.testing.expectEqualStrings("https://myhost/%3A/", output_path);
|
||||||
}
|
}
|
||||||
|
@ -1380,6 +1469,49 @@ const TestOptions = struct {
|
||||||
|
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
|
|
||||||
|
/// Builtin hashmap for strings as keys.
|
||||||
|
/// Key memory is managed by the caller. Keys and values
|
||||||
|
/// will not automatically be freed.
|
||||||
|
pub fn StringCaseInsensitiveHashMap(comptime V: type) type {
|
||||||
|
return std.HashMap([]const u8, V, StringInsensitiveContext, std.hash_map.default_max_load_percentage);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const StringInsensitiveContext = struct {
|
||||||
|
pub fn hash(self: @This(), s: []const u8) u64 {
|
||||||
|
_ = self;
|
||||||
|
return hashString(s);
|
||||||
|
}
|
||||||
|
pub fn eql(self: @This(), a: []const u8, b: []const u8) bool {
|
||||||
|
_ = self;
|
||||||
|
return eqlString(a, b);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn eqlString(a: []const u8, b: []const u8) bool {
|
||||||
|
return std.ascii.eqlIgnoreCase(a, b);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn hashString(s: []const u8) u64 {
|
||||||
|
var buf: [1024]u8 = undefined;
|
||||||
|
if (s.len > buf.len) unreachable; // tolower has a debug assert, but we want non-debug check too
|
||||||
|
const lower_s = std.ascii.lowerString(buf[0..], s);
|
||||||
|
return std.hash.Wyhash.hash(0, lower_s);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn expectNoDuplicateHeaders(self: *Self) !void {
|
||||||
|
// As header keys are
|
||||||
|
var hm = StringCaseInsensitiveHashMap(void).init(self.allocator);
|
||||||
|
try hm.ensureTotalCapacity(@intCast(self.request_headers.len));
|
||||||
|
defer hm.deinit();
|
||||||
|
for (self.request_headers) |h| {
|
||||||
|
if (hm.getKey(h.name)) |_| {
|
||||||
|
log.err("Duplicate key detected. Key name: {s}", .{h.name});
|
||||||
|
return error.duplicateKeyDetected;
|
||||||
|
}
|
||||||
|
try hm.put(h.name, {});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn expectHeader(self: *Self, name: []const u8, value: []const u8) !void {
|
fn expectHeader(self: *Self, name: []const u8, value: []const u8) !void {
|
||||||
for (self.request_headers) |h|
|
for (self.request_headers) |h|
|
||||||
if (std.ascii.eqlIgnoreCase(name, h.name) and
|
if (std.ascii.eqlIgnoreCase(name, h.name) and
|
||||||
|
@ -1593,6 +1725,58 @@ test "query_no_input: sts getCallerIdentity comptime" {
|
||||||
try std.testing.expectEqualStrings("123456789012", call.response.account.?);
|
try std.testing.expectEqualStrings("123456789012", call.response.account.?);
|
||||||
try std.testing.expectEqualStrings("8f0d54da-1230-40f7-b4ac-95015c4b84cd", call.response_metadata.request_id);
|
try std.testing.expectEqualStrings("8f0d54da-1230-40f7-b4ac-95015c4b84cd", call.response_metadata.request_id);
|
||||||
}
|
}
|
||||||
|
test "query_with_input: iam getRole runtime" {
|
||||||
|
// sqs switched from query to json in aws sdk for go v2 commit f5a08768ef820ff5efd62a49ba50c61c9ca5dbcb
|
||||||
|
const allocator = std.testing.allocator;
|
||||||
|
var test_harness = TestSetup.init(.{
|
||||||
|
.allocator = allocator,
|
||||||
|
.server_response =
|
||||||
|
\\<GetRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||||
|
\\<GetRoleResult>
|
||||||
|
\\ <Role>
|
||||||
|
\\ <Path>/application_abc/component_xyz/</Path>
|
||||||
|
\\ <Arn>arn:aws:iam::123456789012:role/application_abc/component_xyz/S3Access</Arn>
|
||||||
|
\\ <RoleName>S3Access</RoleName>
|
||||||
|
\\ <AssumeRolePolicyDocument>
|
||||||
|
\\ {"Version":"2012-10-17","Statement":[{"Effect":"Allow",
|
||||||
|
\\ "Principal":{"Service":["ec2.amazonaws.com"]},"Action":["sts:AssumeRole"]}]}
|
||||||
|
\\ </AssumeRolePolicyDocument>
|
||||||
|
\\ <CreateDate>2012-05-08T23:34:01Z</CreateDate>
|
||||||
|
\\ <RoleId>AROADBQP57FF2AEXAMPLE</RoleId>
|
||||||
|
\\ <RoleLastUsed>
|
||||||
|
\\ <LastUsedDate>2019-11-20T17:09:20Z</LastUsedDate>
|
||||||
|
\\ <Region>us-east-1</Region>
|
||||||
|
\\ </RoleLastUsed>
|
||||||
|
\\ </Role>
|
||||||
|
\\</GetRoleResult>
|
||||||
|
\\<ResponseMetadata>
|
||||||
|
\\ <RequestId>df37e965-9967-11e1-a4c3-270EXAMPLE04</RequestId>
|
||||||
|
\\</ResponseMetadata>
|
||||||
|
\\</GetRoleResponse>
|
||||||
|
,
|
||||||
|
.server_response_headers = &.{
|
||||||
|
.{ .name = "Content-Type", .value = "text/xml" },
|
||||||
|
.{ .name = "x-amzn-RequestId", .value = "df37e965-9967-11e1-a4c3-270EXAMPLE04" },
|
||||||
|
},
|
||||||
|
});
|
||||||
|
defer test_harness.deinit();
|
||||||
|
const options = try test_harness.start();
|
||||||
|
const iam = (Services(.{.iam}){}).iam;
|
||||||
|
const call = try test_harness.client.call(iam.get_role.Request{
|
||||||
|
.role_name = "S3Access",
|
||||||
|
}, options);
|
||||||
|
defer call.deinit();
|
||||||
|
test_harness.stop();
|
||||||
|
// Request expectations
|
||||||
|
try std.testing.expectEqual(std.http.Method.POST, test_harness.request_options.request_method);
|
||||||
|
try std.testing.expectEqualStrings("/", test_harness.request_options.request_target);
|
||||||
|
try std.testing.expectEqualStrings(
|
||||||
|
\\Action=GetRole&Version=2010-05-08&RoleName=S3Access
|
||||||
|
, test_harness.request_options.request_body);
|
||||||
|
// Response expectations
|
||||||
|
try std.testing.expectEqualStrings("arn:aws:iam::123456789012:role/application_abc/component_xyz/S3Access", call.response.role.arn);
|
||||||
|
try std.testing.expectEqualStrings("df37e965-9967-11e1-a4c3-270EXAMPLE04", call.response_metadata.request_id);
|
||||||
|
}
|
||||||
test "query_with_input: sts getAccessKeyInfo runtime" {
|
test "query_with_input: sts getAccessKeyInfo runtime" {
|
||||||
// sqs switched from query to json in aws sdk for go v2 commit f5a08768ef820ff5efd62a49ba50c61c9ca5dbcb
|
// sqs switched from query to json in aws sdk for go v2 commit f5a08768ef820ff5efd62a49ba50c61c9ca5dbcb
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
|
@ -1850,7 +2034,7 @@ test "rest_json_1_work_with_lambda: lambda tagResource (only), to excercise zig
|
||||||
defer test_harness.deinit();
|
defer test_harness.deinit();
|
||||||
const options = try test_harness.start();
|
const options = try test_harness.start();
|
||||||
const lambda = (Services(.{.lambda}){}).lambda;
|
const lambda = (Services(.{.lambda}){}).lambda;
|
||||||
var tags = try std.ArrayList(@typeInfo(try typeForField(lambda.tag_resource.Request, "tags")).Pointer.child).initCapacity(allocator, 1);
|
var tags = try std.ArrayList(@typeInfo(try typeForField(lambda.tag_resource.Request, "tags")).pointer.child).initCapacity(allocator, 1);
|
||||||
defer tags.deinit();
|
defer tags.deinit();
|
||||||
tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" });
|
tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" });
|
||||||
const req = services.lambda.tag_resource.Request{ .resource = "arn:aws:lambda:us-west-2:550620852718:function:awsome-lambda-LambdaStackawsomeLambda", .tags = tags.items };
|
const req = services.lambda.tag_resource.Request{ .resource = "arn:aws:lambda:us-west-2:550620852718:function:awsome-lambda-LambdaStackawsomeLambda", .tags = tags.items };
|
||||||
|
@ -1861,7 +2045,6 @@ test "rest_json_1_work_with_lambda: lambda tagResource (only), to excercise zig
|
||||||
try std.testing.expectEqual(std.http.Method.POST, test_harness.request_options.request_method);
|
try std.testing.expectEqual(std.http.Method.POST, test_harness.request_options.request_method);
|
||||||
try std.testing.expectEqualStrings(
|
try std.testing.expectEqualStrings(
|
||||||
\\{
|
\\{
|
||||||
\\ "Resource": "arn:aws:lambda:us-west-2:550620852718:function:awsome-lambda-LambdaStackawsomeLambda",
|
|
||||||
\\ "Tags": {
|
\\ "Tags": {
|
||||||
\\ "Foo": "Bar"
|
\\ "Foo": "Bar"
|
||||||
\\ }
|
\\ }
|
||||||
|
@ -1872,6 +2055,45 @@ test "rest_json_1_work_with_lambda: lambda tagResource (only), to excercise zig
|
||||||
// Response expectations
|
// Response expectations
|
||||||
try std.testing.expectEqualStrings("a521e152-6e32-4e67-9fb3-abc94e34551b", call.response_metadata.request_id);
|
try std.testing.expectEqualStrings("a521e152-6e32-4e67-9fb3-abc94e34551b", call.response_metadata.request_id);
|
||||||
}
|
}
|
||||||
|
test "rest_json_1_url_parameters_not_in_request: lambda update_function_code" {
|
||||||
|
const allocator = std.testing.allocator;
|
||||||
|
var test_harness = TestSetup.init(.{
|
||||||
|
.allocator = allocator,
|
||||||
|
.server_response = "{\"CodeSize\": 42}",
|
||||||
|
.server_response_status = .ok,
|
||||||
|
.server_response_headers = &.{
|
||||||
|
.{ .name = "Content-Type", .value = "application/json" },
|
||||||
|
.{ .name = "x-amzn-RequestId", .value = "a521e152-6e32-4e67-9fb3-abc94e34551b" },
|
||||||
|
},
|
||||||
|
});
|
||||||
|
defer test_harness.deinit();
|
||||||
|
const options = try test_harness.start();
|
||||||
|
const lambda = (Services(.{.lambda}){}).lambda;
|
||||||
|
const architectures = [_][]const u8{"x86_64"};
|
||||||
|
const arches: [][]const u8 = @constCast(architectures[0..]);
|
||||||
|
const req = services.lambda.update_function_code.Request{
|
||||||
|
.function_name = "functionname",
|
||||||
|
.architectures = arches,
|
||||||
|
.zip_file = "zipfile",
|
||||||
|
};
|
||||||
|
const call = try Request(lambda.update_function_code).call(req, options);
|
||||||
|
defer call.deinit();
|
||||||
|
test_harness.stop();
|
||||||
|
// Request expectations
|
||||||
|
try std.testing.expectEqual(std.http.Method.PUT, test_harness.request_options.request_method);
|
||||||
|
try std.testing.expectEqualStrings(
|
||||||
|
\\{
|
||||||
|
\\ "ZipFile": "zipfile",
|
||||||
|
\\ "Architectures": [
|
||||||
|
\\ "x86_64"
|
||||||
|
\\ ]
|
||||||
|
\\}
|
||||||
|
, test_harness.request_options.request_body);
|
||||||
|
// Due to 17015, we see %253A instead of %3A
|
||||||
|
try std.testing.expectEqualStrings("/2015-03-31/functions/functionname/code", test_harness.request_options.request_target);
|
||||||
|
// Response expectations
|
||||||
|
try std.testing.expectEqualStrings("a521e152-6e32-4e67-9fb3-abc94e34551b", call.response_metadata.request_id);
|
||||||
|
}
|
||||||
test "ec2_query_no_input: EC2 describe regions" {
|
test "ec2_query_no_input: EC2 describe regions" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(.{
|
var test_harness = TestSetup.init(.{
|
||||||
|
@ -1992,6 +2214,9 @@ test "rest_xml_anything_but_s3: CloudFront list key groups" {
|
||||||
try std.testing.expectEqual(@as(i64, 100), call.response.key_group_list.?.max_items);
|
try std.testing.expectEqual(@as(i64, 100), call.response.key_group_list.?.max_items);
|
||||||
}
|
}
|
||||||
test "rest_xml_with_input: S3 put object" {
|
test "rest_xml_with_input: S3 put object" {
|
||||||
|
// const old = std.testing.log_level;
|
||||||
|
// defer std.testing.log_level = old;
|
||||||
|
// std.testing.log_level = .debug;
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(.{
|
var test_harness = TestSetup.init(.{
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
|
@ -2018,13 +2243,14 @@ test "rest_xml_with_input: S3 put object" {
|
||||||
.body = "bar",
|
.body = "bar",
|
||||||
.storage_class = "STANDARD",
|
.storage_class = "STANDARD",
|
||||||
}, s3opts);
|
}, s3opts);
|
||||||
|
defer result.deinit();
|
||||||
for (test_harness.request_options.request_headers) |header| {
|
for (test_harness.request_options.request_headers) |header| {
|
||||||
std.log.info("Request header: {s}: {s}", .{ header.name, header.value });
|
std.log.info("Request header: {s}: {s}", .{ header.name, header.value });
|
||||||
}
|
}
|
||||||
|
try test_harness.request_options.expectNoDuplicateHeaders();
|
||||||
std.log.info("PutObject Request id: {s}", .{result.response_metadata.request_id});
|
std.log.info("PutObject Request id: {s}", .{result.response_metadata.request_id});
|
||||||
std.log.info("PutObject etag: {s}", .{result.response.e_tag.?});
|
std.log.info("PutObject etag: {s}", .{result.response.e_tag.?});
|
||||||
//mysfitszj3t6webstack-hostingbucketa91a61fe-1ep3ezkgwpxr0.s3.us-west-2.amazonaws.com
|
//mysfitszj3t6webstack-hostingbucketa91a61fe-1ep3ezkgwpxr0.s3.us-west-2.amazonaws.com
|
||||||
defer result.deinit();
|
|
||||||
test_harness.stop();
|
test_harness.stop();
|
||||||
// Request expectations
|
// Request expectations
|
||||||
try std.testing.expectEqual(std.http.Method.PUT, test_harness.request_options.request_method);
|
try std.testing.expectEqual(std.http.Method.PUT, test_harness.request_options.request_method);
|
||||||
|
|
|
@ -11,7 +11,56 @@ const std = @import("std");
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
const auth = @import("aws_authentication.zig");
|
const auth = @import("aws_authentication.zig");
|
||||||
|
|
||||||
const log = std.log.scoped(.aws_credentials);
|
const scoped_log = std.log.scoped(.aws_credentials);
|
||||||
|
/// Specifies logging level. This should not be touched unless the normal
|
||||||
|
/// zig logging capabilities are inaccessible (e.g. during a build)
|
||||||
|
pub var log_level: std.log.Level = .debug;
|
||||||
|
|
||||||
|
/// Turn off logging completely
|
||||||
|
pub var logs_off: bool = false;
|
||||||
|
const log = struct {
|
||||||
|
/// Log an error message. This log level is intended to be used
|
||||||
|
/// when something has gone wrong. This might be recoverable or might
|
||||||
|
/// be followed by the program exiting.
|
||||||
|
pub fn err(
|
||||||
|
comptime format: []const u8,
|
||||||
|
args: anytype,
|
||||||
|
) void {
|
||||||
|
if (!logs_off and @intFromEnum(std.log.Level.err) <= @intFromEnum(log_level))
|
||||||
|
scoped_log.err(format, args);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Log a warning message. This log level is intended to be used if
|
||||||
|
/// it is uncertain whether something has gone wrong or not, but the
|
||||||
|
/// circumstances would be worth investigating.
|
||||||
|
pub fn warn(
|
||||||
|
comptime format: []const u8,
|
||||||
|
args: anytype,
|
||||||
|
) void {
|
||||||
|
if (!logs_off and @intFromEnum(std.log.Level.warn) <= @intFromEnum(log_level))
|
||||||
|
scoped_log.warn(format, args);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Log an info message. This log level is intended to be used for
|
||||||
|
/// general messages about the state of the program.
|
||||||
|
pub fn info(
|
||||||
|
comptime format: []const u8,
|
||||||
|
args: anytype,
|
||||||
|
) void {
|
||||||
|
if (!logs_off and @intFromEnum(std.log.Level.info) <= @intFromEnum(log_level))
|
||||||
|
scoped_log.info(format, args);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Log a debug message. This log level is intended to be used for
|
||||||
|
/// messages which are only useful for debugging.
|
||||||
|
pub fn debug(
|
||||||
|
comptime format: []const u8,
|
||||||
|
args: anytype,
|
||||||
|
) void {
|
||||||
|
if (!logs_off and @intFromEnum(std.log.Level.debug) <= @intFromEnum(log_level))
|
||||||
|
scoped_log.debug(format, args);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
pub const Profile = struct {
|
pub const Profile = struct {
|
||||||
/// Credential file. Defaults to AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials
|
/// Credential file. Defaults to AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials
|
||||||
|
|
|
@ -17,7 +17,57 @@ const CN_NORTHWEST_1_HASH = std.hash_map.hashString("cn-northwest-1");
|
||||||
const US_ISO_EAST_1_HASH = std.hash_map.hashString("us-iso-east-1");
|
const US_ISO_EAST_1_HASH = std.hash_map.hashString("us-iso-east-1");
|
||||||
const US_ISOB_EAST_1_HASH = std.hash_map.hashString("us-isob-east-1");
|
const US_ISOB_EAST_1_HASH = std.hash_map.hashString("us-isob-east-1");
|
||||||
|
|
||||||
const log = std.log.scoped(.awshttp);
|
const scoped_log = std.log.scoped(.awshttp);
|
||||||
|
|
||||||
|
/// Specifies logging level. This should not be touched unless the normal
|
||||||
|
/// zig logging capabilities are inaccessible (e.g. during a build)
|
||||||
|
pub var log_level: std.log.Level = .debug;
|
||||||
|
|
||||||
|
/// Turn off logging completely
|
||||||
|
pub var logs_off: bool = false;
|
||||||
|
const log = struct {
|
||||||
|
/// Log an error message. This log level is intended to be used
|
||||||
|
/// when something has gone wrong. This might be recoverable or might
|
||||||
|
/// be followed by the program exiting.
|
||||||
|
pub fn err(
|
||||||
|
comptime format: []const u8,
|
||||||
|
args: anytype,
|
||||||
|
) void {
|
||||||
|
if (!logs_off and @intFromEnum(std.log.Level.err) <= @intFromEnum(log_level))
|
||||||
|
scoped_log.err(format, args);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Log a warning message. This log level is intended to be used if
|
||||||
|
/// it is uncertain whether something has gone wrong or not, but the
|
||||||
|
/// circumstances would be worth investigating.
|
||||||
|
pub fn warn(
|
||||||
|
comptime format: []const u8,
|
||||||
|
args: anytype,
|
||||||
|
) void {
|
||||||
|
if (!logs_off and @intFromEnum(std.log.Level.warn) <= @intFromEnum(log_level))
|
||||||
|
scoped_log.warn(format, args);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Log an info message. This log level is intended to be used for
|
||||||
|
/// general messages about the state of the program.
|
||||||
|
pub fn info(
|
||||||
|
comptime format: []const u8,
|
||||||
|
args: anytype,
|
||||||
|
) void {
|
||||||
|
if (!logs_off and @intFromEnum(std.log.Level.info) <= @intFromEnum(log_level))
|
||||||
|
scoped_log.info(format, args);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Log a debug message. This log level is intended to be used for
|
||||||
|
/// messages which are only useful for debugging.
|
||||||
|
pub fn debug(
|
||||||
|
comptime format: []const u8,
|
||||||
|
args: anytype,
|
||||||
|
) void {
|
||||||
|
if (!logs_off and @intFromEnum(std.log.Level.debug) <= @intFromEnum(log_level))
|
||||||
|
scoped_log.debug(format, args);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
pub const AwsError = error{
|
pub const AwsError = error{
|
||||||
AddHeaderError,
|
AddHeaderError,
|
||||||
|
@ -190,6 +240,16 @@ pub const AwsHttp = struct {
|
||||||
.response_storage = .{ .dynamic = &resp_payload },
|
.response_storage = .{ .dynamic = &resp_payload },
|
||||||
.raw_uri = true,
|
.raw_uri = true,
|
||||||
.location = .{ .url = url },
|
.location = .{ .url = url },
|
||||||
|
// we need full control over most headers. I wish libraries would do a
|
||||||
|
// better job of having default headers as an opt-in...
|
||||||
|
.headers = .{
|
||||||
|
.host = .omit,
|
||||||
|
.authorization = .omit,
|
||||||
|
.user_agent = .omit,
|
||||||
|
.connection = .default, // we can let the client manage this...it has no impact to us
|
||||||
|
.accept_encoding = .default, // accept encoding (gzip, deflate) *should* be ok
|
||||||
|
.content_type = .omit,
|
||||||
|
},
|
||||||
.extra_headers = headers.items,
|
.extra_headers = headers.items,
|
||||||
});
|
});
|
||||||
// TODO: Need to test for payloads > 2^14. I believe one of our tests does this, but not sure
|
// TODO: Need to test for payloads > 2^14. I believe one of our tests does this, but not sure
|
||||||
|
@ -241,6 +301,7 @@ pub const AwsHttp = struct {
|
||||||
|
|
||||||
fn getRegion(service: []const u8, region: []const u8) []const u8 {
|
fn getRegion(service: []const u8, region: []const u8) []const u8 {
|
||||||
if (std.mem.eql(u8, service, "cloudfront")) return "us-east-1";
|
if (std.mem.eql(u8, service, "cloudfront")) return "us-east-1";
|
||||||
|
if (std.mem.eql(u8, service, "iam")) return "us-east-1";
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -328,6 +389,26 @@ fn endpointException(
|
||||||
dualstack: []const u8,
|
dualstack: []const u8,
|
||||||
domain: []const u8,
|
domain: []const u8,
|
||||||
) !?EndPoint {
|
) !?EndPoint {
|
||||||
|
// Global endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#global-endpoints):
|
||||||
|
// ✓ Amazon CloudFront
|
||||||
|
// AWS Global Accelerator
|
||||||
|
// ✓ AWS Identity and Access Management (IAM)
|
||||||
|
// AWS Network Manager
|
||||||
|
// AWS Organizations
|
||||||
|
// Amazon Route 53
|
||||||
|
// AWS Shield Advanced
|
||||||
|
// AWS WAF Classic
|
||||||
|
|
||||||
|
if (std.mem.eql(u8, service, "iam")) {
|
||||||
|
return EndPoint{
|
||||||
|
.uri = try allocator.dupe(u8, "https://iam.amazonaws.com"),
|
||||||
|
.host = try allocator.dupe(u8, "iam.amazonaws.com"),
|
||||||
|
.scheme = "https",
|
||||||
|
.port = 443,
|
||||||
|
.allocator = allocator,
|
||||||
|
.path = try allocator.dupe(u8, request.path),
|
||||||
|
};
|
||||||
|
}
|
||||||
if (std.mem.eql(u8, service, "cloudfront")) {
|
if (std.mem.eql(u8, service, "cloudfront")) {
|
||||||
return EndPoint{
|
return EndPoint{
|
||||||
.uri = try allocator.dupe(u8, "https://cloudfront.amazonaws.com"),
|
.uri = try allocator.dupe(u8, "https://cloudfront.amazonaws.com"),
|
||||||
|
|
|
@ -22,7 +22,7 @@ pub const Result = struct {
|
||||||
self.allocator.free(h.value);
|
self.allocator.free(h.value);
|
||||||
}
|
}
|
||||||
self.allocator.free(self.headers);
|
self.allocator.free(self.headers);
|
||||||
log.debug("http result deinit complete", .{});
|
//log.debug("http result deinit complete", .{});
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -3,8 +3,57 @@ const base = @import("aws_http_base.zig");
|
||||||
const auth = @import("aws_authentication.zig");
|
const auth = @import("aws_authentication.zig");
|
||||||
const date = @import("date.zig");
|
const date = @import("date.zig");
|
||||||
|
|
||||||
const log = std.log.scoped(.aws_signing);
|
const scoped_log = std.log.scoped(.aws_signing);
|
||||||
|
|
||||||
|
/// Specifies logging level. This should not be touched unless the normal
|
||||||
|
/// zig logging capabilities are inaccessible (e.g. during a build)
|
||||||
|
pub var log_level: std.log.Level = .debug;
|
||||||
|
|
||||||
|
/// Turn off logging completely
|
||||||
|
pub var logs_off: bool = false;
|
||||||
|
const log = struct {
|
||||||
|
/// Log an error message. This log level is intended to be used
|
||||||
|
/// when something has gone wrong. This might be recoverable or might
|
||||||
|
/// be followed by the program exiting.
|
||||||
|
pub fn err(
|
||||||
|
comptime format: []const u8,
|
||||||
|
args: anytype,
|
||||||
|
) void {
|
||||||
|
if (!logs_off and @intFromEnum(std.log.Level.err) <= @intFromEnum(log_level))
|
||||||
|
scoped_log.err(format, args);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Log a warning message. This log level is intended to be used if
|
||||||
|
/// it is uncertain whether something has gone wrong or not, but the
|
||||||
|
/// circumstances would be worth investigating.
|
||||||
|
pub fn warn(
|
||||||
|
comptime format: []const u8,
|
||||||
|
args: anytype,
|
||||||
|
) void {
|
||||||
|
if (!logs_off and @intFromEnum(std.log.Level.warn) <= @intFromEnum(log_level))
|
||||||
|
scoped_log.warn(format, args);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Log an info message. This log level is intended to be used for
|
||||||
|
/// general messages about the state of the program.
|
||||||
|
pub fn info(
|
||||||
|
comptime format: []const u8,
|
||||||
|
args: anytype,
|
||||||
|
) void {
|
||||||
|
if (!logs_off and @intFromEnum(std.log.Level.info) <= @intFromEnum(log_level))
|
||||||
|
scoped_log.info(format, args);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Log a debug message. This log level is intended to be used for
|
||||||
|
/// messages which are only useful for debugging.
|
||||||
|
pub fn debug(
|
||||||
|
comptime format: []const u8,
|
||||||
|
args: anytype,
|
||||||
|
) void {
|
||||||
|
if (!logs_off and @intFromEnum(std.log.Level.debug) <= @intFromEnum(log_level))
|
||||||
|
scoped_log.debug(format, args);
|
||||||
|
}
|
||||||
|
};
|
||||||
// TODO: Remove this?! This is an aws_signing, so we should know a thing
|
// TODO: Remove this?! This is an aws_signing, so we should know a thing
|
||||||
// or two about aws. So perhaps the right level of abstraction here
|
// or two about aws. So perhaps the right level of abstraction here
|
||||||
// is to have our service signing idiosyncracies dealt with in this
|
// is to have our service signing idiosyncracies dealt with in this
|
||||||
|
|
142
src/json.zig
142
src/json.zig
|
@ -1560,21 +1560,21 @@ fn skipValue(tokens: *TokenStream) SkipValueError!void {
|
||||||
|
|
||||||
fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options: ParseOptions) !T {
|
fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options: ParseOptions) !T {
|
||||||
switch (@typeInfo(T)) {
|
switch (@typeInfo(T)) {
|
||||||
.Bool => {
|
.bool => {
|
||||||
return switch (token) {
|
return switch (token) {
|
||||||
.True => true,
|
.True => true,
|
||||||
.False => false,
|
.False => false,
|
||||||
else => error.UnexpectedToken,
|
else => error.UnexpectedToken,
|
||||||
};
|
};
|
||||||
},
|
},
|
||||||
.Float, .ComptimeFloat => {
|
.float, .comptime_float => {
|
||||||
const numberToken = switch (token) {
|
const numberToken = switch (token) {
|
||||||
.Number => |n| n,
|
.Number => |n| n,
|
||||||
else => return error.UnexpectedToken,
|
else => return error.UnexpectedToken,
|
||||||
};
|
};
|
||||||
return try std.fmt.parseFloat(T, numberToken.slice(tokens.slice, tokens.i - 1));
|
return try std.fmt.parseFloat(T, numberToken.slice(tokens.slice, tokens.i - 1));
|
||||||
},
|
},
|
||||||
.Int, .ComptimeInt => {
|
.int, .comptime_int => {
|
||||||
const numberToken = switch (token) {
|
const numberToken = switch (token) {
|
||||||
.Number => |n| n,
|
.Number => |n| n,
|
||||||
else => return error.UnexpectedToken,
|
else => return error.UnexpectedToken,
|
||||||
|
@ -1587,14 +1587,14 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
if (std.math.round(float) != float) return error.InvalidNumber;
|
if (std.math.round(float) != float) return error.InvalidNumber;
|
||||||
return @as(T, @intFromFloat(float));
|
return @as(T, @intFromFloat(float));
|
||||||
},
|
},
|
||||||
.Optional => |optionalInfo| {
|
.optional => |optionalInfo| {
|
||||||
if (token == .Null) {
|
if (token == .Null) {
|
||||||
return null;
|
return null;
|
||||||
} else {
|
} else {
|
||||||
return try parseInternal(optionalInfo.child, token, tokens, options);
|
return try parseInternal(optionalInfo.child, token, tokens, options);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Enum => |enumInfo| {
|
.@"enum" => |enumInfo| {
|
||||||
switch (token) {
|
switch (token) {
|
||||||
.Number => |numberToken| {
|
.Number => |numberToken| {
|
||||||
if (!numberToken.is_integer) return error.UnexpectedToken;
|
if (!numberToken.is_integer) return error.UnexpectedToken;
|
||||||
|
@ -1618,7 +1618,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
else => return error.UnexpectedToken,
|
else => return error.UnexpectedToken,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Union => |unionInfo| {
|
.@"union" => |unionInfo| {
|
||||||
if (unionInfo.tag_type) |_| {
|
if (unionInfo.tag_type) |_| {
|
||||||
// try each of the union fields until we find one that matches
|
// try each of the union fields until we find one that matches
|
||||||
inline for (unionInfo.fields) |u_field| {
|
inline for (unionInfo.fields) |u_field| {
|
||||||
|
@ -1642,7 +1642,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
@compileError("Unable to parse into untagged union '" ++ @typeName(T) ++ "'");
|
@compileError("Unable to parse into untagged union '" ++ @typeName(T) ++ "'");
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Struct => |structInfo| {
|
.@"struct" => |structInfo| {
|
||||||
switch (token) {
|
switch (token) {
|
||||||
.ObjectBegin => {},
|
.ObjectBegin => {},
|
||||||
else => return error.UnexpectedToken,
|
else => return error.UnexpectedToken,
|
||||||
|
@ -1736,7 +1736,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
}
|
}
|
||||||
return r;
|
return r;
|
||||||
},
|
},
|
||||||
.Array => |arrayInfo| {
|
.array => |arrayInfo| {
|
||||||
switch (token) {
|
switch (token) {
|
||||||
.ArrayBegin => {
|
.ArrayBegin => {
|
||||||
var r: T = undefined;
|
var r: T = undefined;
|
||||||
|
@ -1770,7 +1770,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
else => return error.UnexpectedToken,
|
else => return error.UnexpectedToken,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Pointer => |ptrInfo| {
|
.pointer => |ptrInfo| {
|
||||||
const allocator = options.allocator orelse return error.AllocatorRequired;
|
const allocator = options.allocator orelse return error.AllocatorRequired;
|
||||||
switch (ptrInfo.size) {
|
switch (ptrInfo.size) {
|
||||||
.One => {
|
.One => {
|
||||||
|
@ -1863,8 +1863,8 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
fn typeForField(comptime T: type, comptime field_name: []const u8) ?type {
|
fn typeForField(comptime T: type, comptime field_name: []const u8) ?type {
|
||||||
const ti = @typeInfo(T);
|
const ti = @typeInfo(T);
|
||||||
switch (ti) {
|
switch (ti) {
|
||||||
.Struct => {
|
.@"struct" => {
|
||||||
inline for (ti.Struct.fields) |field| {
|
inline for (ti.@"struct".fields) |field| {
|
||||||
if (std.mem.eql(u8, field.name, field_name))
|
if (std.mem.eql(u8, field.name, field_name))
|
||||||
return field.type;
|
return field.type;
|
||||||
}
|
}
|
||||||
|
@ -1878,14 +1878,14 @@ fn isMapPattern(comptime T: type) bool {
|
||||||
// We should be getting a type that is a pointer to a slice.
|
// We should be getting a type that is a pointer to a slice.
|
||||||
// Let's just double check before proceeding
|
// Let's just double check before proceeding
|
||||||
const ti = @typeInfo(T);
|
const ti = @typeInfo(T);
|
||||||
if (ti != .Pointer) return false;
|
if (ti != .pointer) return false;
|
||||||
if (ti.Pointer.size != .Slice) return false;
|
if (ti.pointer.size != .Slice) return false;
|
||||||
const ti_child = @typeInfo(ti.Pointer.child);
|
const ti_child = @typeInfo(ti.pointer.child);
|
||||||
if (ti_child != .Struct) return false;
|
if (ti_child != .@"struct") return false;
|
||||||
if (ti_child.Struct.fields.len != 2) return false;
|
if (ti_child.@"struct".fields.len != 2) return false;
|
||||||
var key_found = false;
|
var key_found = false;
|
||||||
var value_found = false;
|
var value_found = false;
|
||||||
inline for (ti_child.Struct.fields) |field| {
|
inline for (ti_child.@"struct".fields) |field| {
|
||||||
if (std.mem.eql(u8, "key", field.name))
|
if (std.mem.eql(u8, "key", field.name))
|
||||||
key_found = true;
|
key_found = true;
|
||||||
if (std.mem.eql(u8, "value", field.name))
|
if (std.mem.eql(u8, "value", field.name))
|
||||||
|
@ -1903,13 +1903,13 @@ pub fn parse(comptime T: type, tokens: *TokenStream, options: ParseOptions) !T {
|
||||||
/// Should be called with the same type and `ParseOptions` that were passed to `parse`
|
/// Should be called with the same type and `ParseOptions` that were passed to `parse`
|
||||||
pub fn parseFree(comptime T: type, value: T, options: ParseOptions) void {
|
pub fn parseFree(comptime T: type, value: T, options: ParseOptions) void {
|
||||||
switch (@typeInfo(T)) {
|
switch (@typeInfo(T)) {
|
||||||
.Bool, .Float, .ComptimeFloat, .Int, .ComptimeInt, .Enum => {},
|
.bool, .float, .comptime_float, .int, .comptime_int, .@"enum" => {},
|
||||||
.Optional => {
|
.optional => {
|
||||||
if (value) |v| {
|
if (value) |v| {
|
||||||
return parseFree(@TypeOf(v), v, options);
|
return parseFree(@TypeOf(v), v, options);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Union => |unionInfo| {
|
.@"union" => |unionInfo| {
|
||||||
if (unionInfo.tag_type) |UnionTagType| {
|
if (unionInfo.tag_type) |UnionTagType| {
|
||||||
inline for (unionInfo.fields) |u_field| {
|
inline for (unionInfo.fields) |u_field| {
|
||||||
if (value == @field(UnionTagType, u_field.name)) {
|
if (value == @field(UnionTagType, u_field.name)) {
|
||||||
|
@ -1921,17 +1921,17 @@ pub fn parseFree(comptime T: type, value: T, options: ParseOptions) void {
|
||||||
unreachable;
|
unreachable;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Struct => |structInfo| {
|
.@"struct" => |structInfo| {
|
||||||
inline for (structInfo.fields) |field| {
|
inline for (structInfo.fields) |field| {
|
||||||
parseFree(field.type, @field(value, field.name), options);
|
parseFree(field.type, @field(value, field.name), options);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Array => |arrayInfo| {
|
.array => |arrayInfo| {
|
||||||
for (value) |v| {
|
for (value) |v| {
|
||||||
parseFree(arrayInfo.child, v, options);
|
parseFree(arrayInfo.child, v, options);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Pointer => |ptrInfo| {
|
.pointer => |ptrInfo| {
|
||||||
const allocator = options.allocator orelse unreachable;
|
const allocator = options.allocator orelse unreachable;
|
||||||
switch (ptrInfo.size) {
|
switch (ptrInfo.size) {
|
||||||
.One => {
|
.One => {
|
||||||
|
@ -2756,6 +2756,10 @@ pub const StringifyOptions = struct {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
emit_null: bool = true,
|
||||||
|
|
||||||
|
exclude_fields: ?[][]const u8 = null,
|
||||||
|
|
||||||
/// Controls the whitespace emitted
|
/// Controls the whitespace emitted
|
||||||
whitespace: ?Whitespace = null,
|
whitespace: ?Whitespace = null,
|
||||||
|
|
||||||
|
@ -2807,38 +2811,38 @@ pub fn stringify(
|
||||||
) !void {
|
) !void {
|
||||||
const T = @TypeOf(value);
|
const T = @TypeOf(value);
|
||||||
switch (@typeInfo(T)) {
|
switch (@typeInfo(T)) {
|
||||||
.Float, .ComptimeFloat => {
|
.float, .comptime_float => {
|
||||||
return std.fmt.format(out_stream, "{e}", .{value});
|
return std.fmt.format(out_stream, "{e}", .{value});
|
||||||
},
|
},
|
||||||
.Int, .ComptimeInt => {
|
.int, .comptime_int => {
|
||||||
return std.fmt.formatIntValue(value, "", std.fmt.FormatOptions{}, out_stream);
|
return std.fmt.formatIntValue(value, "", std.fmt.FormatOptions{}, out_stream);
|
||||||
},
|
},
|
||||||
.Bool => {
|
.bool => {
|
||||||
return out_stream.writeAll(if (value) "true" else "false");
|
return out_stream.writeAll(if (value) "true" else "false");
|
||||||
},
|
},
|
||||||
.Null => {
|
.null => {
|
||||||
return out_stream.writeAll("null");
|
return out_stream.writeAll("null");
|
||||||
},
|
},
|
||||||
.Optional => {
|
.optional => {
|
||||||
if (value) |payload| {
|
if (value) |payload| {
|
||||||
return try stringify(payload, options, out_stream);
|
return try stringify(payload, options, out_stream);
|
||||||
} else {
|
} else {
|
||||||
return try stringify(null, options, out_stream);
|
return try stringify(null, options, out_stream);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Enum => {
|
.@"enum" => {
|
||||||
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
||||||
return value.jsonStringify(options, out_stream);
|
return value.jsonStringify(options, out_stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
@compileError("Unable to stringify enum '" ++ @typeName(T) ++ "'");
|
@compileError("Unable to stringify enum '" ++ @typeName(T) ++ "'");
|
||||||
},
|
},
|
||||||
.Union => {
|
.@"union" => {
|
||||||
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
||||||
return value.jsonStringify(options, out_stream);
|
return value.jsonStringify(options, out_stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
const info = @typeInfo(T).Union;
|
const info = @typeInfo(T).@"union";
|
||||||
if (info.tag_type) |UnionTagType| {
|
if (info.tag_type) |UnionTagType| {
|
||||||
inline for (info.fields) |u_field| {
|
inline for (info.fields) |u_field| {
|
||||||
if (value == @field(UnionTagType, u_field.name)) {
|
if (value == @field(UnionTagType, u_field.name)) {
|
||||||
|
@ -2849,13 +2853,13 @@ pub fn stringify(
|
||||||
@compileError("Unable to stringify untagged union '" ++ @typeName(T) ++ "'");
|
@compileError("Unable to stringify untagged union '" ++ @typeName(T) ++ "'");
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Struct => |S| {
|
.@"struct" => |S| {
|
||||||
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
||||||
return value.jsonStringify(options, out_stream);
|
return value.jsonStringify(options, out_stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
try out_stream.writeByte('{');
|
try out_stream.writeByte('{');
|
||||||
comptime var field_output = false;
|
var field_output = false;
|
||||||
var child_options = options;
|
var child_options = options;
|
||||||
if (child_options.whitespace) |*child_whitespace| {
|
if (child_options.whitespace) |*child_whitespace| {
|
||||||
child_whitespace.indent_level += 1;
|
child_whitespace.indent_level += 1;
|
||||||
|
@ -2864,34 +2868,46 @@ pub fn stringify(
|
||||||
// don't include void fields
|
// don't include void fields
|
||||||
if (Field.type == void) continue;
|
if (Field.type == void) continue;
|
||||||
|
|
||||||
if (!field_output) {
|
var output_this_field = true;
|
||||||
field_output = true;
|
if (!options.emit_null and @typeInfo(Field.type) == .optional and @field(value, Field.name) == null) output_this_field = false;
|
||||||
} else {
|
|
||||||
try out_stream.writeByte(',');
|
|
||||||
}
|
|
||||||
if (child_options.whitespace) |child_whitespace| {
|
|
||||||
try out_stream.writeByte('\n');
|
|
||||||
try child_whitespace.outputIndent(out_stream);
|
|
||||||
}
|
|
||||||
var field_written = false;
|
|
||||||
if (comptime std.meta.hasFn(T, "jsonStringifyField"))
|
|
||||||
field_written = try value.jsonStringifyField(Field.name, child_options, out_stream);
|
|
||||||
|
|
||||||
if (!field_written) {
|
const final_name = if (comptime std.meta.hasFn(T, "fieldNameFor"))
|
||||||
if (comptime std.meta.hasFn(T, "fieldNameFor")) {
|
value.fieldNameFor(Field.name)
|
||||||
const name = value.fieldNameFor(Field.name);
|
else
|
||||||
try stringify(name, options, out_stream);
|
Field.name;
|
||||||
} else {
|
if (options.exclude_fields) |exclude_fields| {
|
||||||
try stringify(Field.name, options, out_stream);
|
for (exclude_fields) |exclude_field| {
|
||||||
}
|
if (std.mem.eql(u8, final_name, exclude_field)) {
|
||||||
|
output_this_field = false;
|
||||||
try out_stream.writeByte(':');
|
|
||||||
if (child_options.whitespace) |child_whitespace| {
|
|
||||||
if (child_whitespace.separator) {
|
|
||||||
try out_stream.writeByte(' ');
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
try stringify(@field(value, Field.name), child_options, out_stream);
|
}
|
||||||
|
|
||||||
|
if (!field_output) {
|
||||||
|
field_output = output_this_field;
|
||||||
|
} else {
|
||||||
|
if (output_this_field) try out_stream.writeByte(',');
|
||||||
|
}
|
||||||
|
if (child_options.whitespace) |child_whitespace| {
|
||||||
|
if (output_this_field) try out_stream.writeByte('\n');
|
||||||
|
if (output_this_field) try child_whitespace.outputIndent(out_stream);
|
||||||
|
}
|
||||||
|
var field_written = false;
|
||||||
|
if (comptime std.meta.hasFn(T, "jsonStringifyField")) {
|
||||||
|
if (output_this_field) field_written = try value.jsonStringifyField(Field.name, child_options, out_stream);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!field_written) {
|
||||||
|
if (output_this_field) {
|
||||||
|
try stringify(final_name, options, out_stream);
|
||||||
|
try out_stream.writeByte(':');
|
||||||
|
}
|
||||||
|
if (child_options.whitespace) |child_whitespace| {
|
||||||
|
if (child_whitespace.separator) {
|
||||||
|
if (output_this_field) try out_stream.writeByte(' ');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (output_this_field) try stringify(@field(value, Field.name), child_options, out_stream);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (field_output) {
|
if (field_output) {
|
||||||
|
@ -2903,10 +2919,10 @@ pub fn stringify(
|
||||||
try out_stream.writeByte('}');
|
try out_stream.writeByte('}');
|
||||||
return;
|
return;
|
||||||
},
|
},
|
||||||
.ErrorSet => return stringify(@as([]const u8, @errorName(value)), options, out_stream),
|
.error_set => return stringify(@as([]const u8, @errorName(value)), options, out_stream),
|
||||||
.Pointer => |ptr_info| switch (ptr_info.size) {
|
.pointer => |ptr_info| switch (ptr_info.size) {
|
||||||
.One => switch (@typeInfo(ptr_info.child)) {
|
.One => switch (@typeInfo(ptr_info.child)) {
|
||||||
.Array => {
|
.array => {
|
||||||
const Slice = []const std.meta.Elem(ptr_info.child);
|
const Slice = []const std.meta.Elem(ptr_info.child);
|
||||||
return stringify(@as(Slice, value), options, out_stream);
|
return stringify(@as(Slice, value), options, out_stream);
|
||||||
},
|
},
|
||||||
|
@ -2985,8 +3001,8 @@ pub fn stringify(
|
||||||
},
|
},
|
||||||
else => @compileError("Unable to stringify type '" ++ @typeName(T) ++ "'"),
|
else => @compileError("Unable to stringify type '" ++ @typeName(T) ++ "'"),
|
||||||
},
|
},
|
||||||
.Array => return stringify(&value, options, out_stream),
|
.array => return stringify(&value, options, out_stream),
|
||||||
.Vector => |info| {
|
.vector => |info| {
|
||||||
const array: [info.len]info.child = value;
|
const array: [info.len]info.child = value;
|
||||||
return stringify(&array, options, out_stream);
|
return stringify(&array, options, out_stream);
|
||||||
},
|
},
|
||||||
|
|
|
@ -20,7 +20,7 @@ pub fn Services(comptime service_imports: anytype) type {
|
||||||
|
|
||||||
// finally, generate the type
|
// finally, generate the type
|
||||||
return @Type(.{
|
return @Type(.{
|
||||||
.Struct = .{
|
.@"struct" = .{
|
||||||
.layout = .auto,
|
.layout = .auto,
|
||||||
.fields = &fields,
|
.fields = &fields,
|
||||||
.decls = &[_]std.builtin.Type.Declaration{},
|
.decls = &[_]std.builtin.Type.Declaration{},
|
||||||
|
|
12
src/url.zig
12
src/url.zig
|
@ -24,7 +24,7 @@ fn encodeStruct(
|
||||||
comptime options: EncodingOptions,
|
comptime options: EncodingOptions,
|
||||||
) !bool {
|
) !bool {
|
||||||
var rc = first;
|
var rc = first;
|
||||||
inline for (@typeInfo(@TypeOf(obj)).Struct.fields) |field| {
|
inline for (@typeInfo(@TypeOf(obj)).@"struct".fields) |field| {
|
||||||
const field_name = try options.field_name_transformer(allocator, field.name);
|
const field_name = try options.field_name_transformer(allocator, field.name);
|
||||||
defer if (options.field_name_transformer.* != defaultTransformer)
|
defer if (options.field_name_transformer.* != defaultTransformer)
|
||||||
allocator.free(field_name);
|
allocator.free(field_name);
|
||||||
|
@ -47,10 +47,10 @@ pub fn encodeInternal(
|
||||||
// @compileLog(@typeInfo(@TypeOf(obj)));
|
// @compileLog(@typeInfo(@TypeOf(obj)));
|
||||||
var rc = first;
|
var rc = first;
|
||||||
switch (@typeInfo(@TypeOf(obj))) {
|
switch (@typeInfo(@TypeOf(obj))) {
|
||||||
.Optional => if (obj) |o| {
|
.optional => if (obj) |o| {
|
||||||
rc = try encodeInternal(allocator, parent, field_name, first, o, writer, options);
|
rc = try encodeInternal(allocator, parent, field_name, first, o, writer, options);
|
||||||
},
|
},
|
||||||
.Pointer => |ti| if (ti.size == .One) {
|
.pointer => |ti| if (ti.size == .One) {
|
||||||
rc = try encodeInternal(allocator, parent, field_name, first, obj.*, writer, options);
|
rc = try encodeInternal(allocator, parent, field_name, first, obj.*, writer, options);
|
||||||
} else {
|
} else {
|
||||||
if (!first) _ = try writer.write("&");
|
if (!first) _ = try writer.write("&");
|
||||||
|
@ -61,7 +61,7 @@ pub fn encodeInternal(
|
||||||
try writer.print("{s}{s}={any}", .{ parent, field_name, obj });
|
try writer.print("{s}{s}={any}", .{ parent, field_name, obj });
|
||||||
rc = false;
|
rc = false;
|
||||||
},
|
},
|
||||||
.Struct => if (std.mem.eql(u8, "", field_name)) {
|
.@"struct" => if (std.mem.eql(u8, "", field_name)) {
|
||||||
rc = try encodeStruct(allocator, parent, first, obj, writer, options);
|
rc = try encodeStruct(allocator, parent, first, obj, writer, options);
|
||||||
} else {
|
} else {
|
||||||
// TODO: It would be lovely if we could concat at compile time or allocPrint at runtime
|
// TODO: It would be lovely if we could concat at compile time or allocPrint at runtime
|
||||||
|
@ -73,12 +73,12 @@ pub fn encodeInternal(
|
||||||
rc = try encodeStruct(allocator, new_parent, first, obj, writer, options);
|
rc = try encodeStruct(allocator, new_parent, first, obj, writer, options);
|
||||||
// try encodeStruct(parent ++ field_name ++ ".", first, obj, writer, options);
|
// try encodeStruct(parent ++ field_name ++ ".", first, obj, writer, options);
|
||||||
},
|
},
|
||||||
.Array => {
|
.array => {
|
||||||
if (!first) _ = try writer.write("&");
|
if (!first) _ = try writer.write("&");
|
||||||
try writer.print("{s}{s}={s}", .{ parent, field_name, obj });
|
try writer.print("{s}{s}={s}", .{ parent, field_name, obj });
|
||||||
rc = false;
|
rc = false;
|
||||||
},
|
},
|
||||||
.Int, .ComptimeInt, .Float, .ComptimeFloat => {
|
.int, .comptime_int, .float, .comptime_float => {
|
||||||
if (!first) _ = try writer.write("&");
|
if (!first) _ = try writer.write("&");
|
||||||
try writer.print("{s}{s}={d}", .{ parent, field_name, obj });
|
try writer.print("{s}{s}={d}", .{ parent, field_name, obj });
|
||||||
rc = false;
|
rc = false;
|
||||||
|
|
|
@ -96,14 +96,14 @@ pub fn parse(comptime T: type, source: []const u8, options: ParseOptions) !Parse
|
||||||
|
|
||||||
fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions) !T {
|
fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions) !T {
|
||||||
switch (@typeInfo(T)) {
|
switch (@typeInfo(T)) {
|
||||||
.Bool => {
|
.bool => {
|
||||||
if (std.ascii.eqlIgnoreCase("true", element.children.items[0].CharData))
|
if (std.ascii.eqlIgnoreCase("true", element.children.items[0].CharData))
|
||||||
return true;
|
return true;
|
||||||
if (std.ascii.eqlIgnoreCase("false", element.children.items[0].CharData))
|
if (std.ascii.eqlIgnoreCase("false", element.children.items[0].CharData))
|
||||||
return false;
|
return false;
|
||||||
return error.UnexpectedToken;
|
return error.UnexpectedToken;
|
||||||
},
|
},
|
||||||
.Float, .ComptimeFloat => {
|
.float, .comptime_float => {
|
||||||
return std.fmt.parseFloat(T, element.children.items[0].CharData) catch |e| {
|
return std.fmt.parseFloat(T, element.children.items[0].CharData) catch |e| {
|
||||||
if (log_parse_traces) {
|
if (log_parse_traces) {
|
||||||
std.log.err(
|
std.log.err(
|
||||||
|
@ -121,7 +121,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
return e;
|
return e;
|
||||||
};
|
};
|
||||||
},
|
},
|
||||||
.Int, .ComptimeInt => {
|
.int, .comptime_int => {
|
||||||
// 2021-10-05T16:39:45.000Z
|
// 2021-10-05T16:39:45.000Z
|
||||||
return std.fmt.parseInt(T, element.children.items[0].CharData, 10) catch |e| {
|
return std.fmt.parseInt(T, element.children.items[0].CharData, 10) catch |e| {
|
||||||
if (element.children.items[0].CharData[element.children.items[0].CharData.len - 1] == 'Z') {
|
if (element.children.items[0].CharData[element.children.items[0].CharData.len - 1] == 'Z') {
|
||||||
|
@ -146,7 +146,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
return e;
|
return e;
|
||||||
};
|
};
|
||||||
},
|
},
|
||||||
.Optional => |optional_info| {
|
.optional => |optional_info| {
|
||||||
if (element.children.items.len == 0) {
|
if (element.children.items.len == 0) {
|
||||||
// This is almost certainly incomplete. Empty strings? xsi:nil?
|
// This is almost certainly incomplete. Empty strings? xsi:nil?
|
||||||
return null;
|
return null;
|
||||||
|
@ -156,7 +156,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
return try parseInternal(optional_info.child, element, options);
|
return try parseInternal(optional_info.child, element, options);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Enum => |enum_info| {
|
.@"enum" => |enum_info| {
|
||||||
_ = enum_info;
|
_ = enum_info;
|
||||||
// const numeric: ?enum_info.tag_type = std.fmt.parseInt(enum_info.tag_type, element.children.items[0].CharData, 10) catch null;
|
// const numeric: ?enum_info.tag_type = std.fmt.parseInt(enum_info.tag_type, element.children.items[0].CharData, 10) catch null;
|
||||||
// if (numeric) |num| {
|
// if (numeric) |num| {
|
||||||
|
@ -166,7 +166,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
// return std.meta.stringToEnum(T, element.CharData);
|
// return std.meta.stringToEnum(T, element.CharData);
|
||||||
// }
|
// }
|
||||||
},
|
},
|
||||||
.Union => |union_info| {
|
.@"union" => |union_info| {
|
||||||
if (union_info.tag_type) |_| {
|
if (union_info.tag_type) |_| {
|
||||||
// try each of the union fields until we find one that matches
|
// try each of the union fields until we find one that matches
|
||||||
// inline for (union_info.fields) |u_field| {
|
// inline for (union_info.fields) |u_field| {
|
||||||
|
@ -189,7 +189,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
}
|
}
|
||||||
@compileError("Unable to parse into untagged union '" ++ @typeName(T) ++ "'");
|
@compileError("Unable to parse into untagged union '" ++ @typeName(T) ++ "'");
|
||||||
},
|
},
|
||||||
.Struct => |struct_info| {
|
.@"struct" => |struct_info| {
|
||||||
var r: T = undefined;
|
var r: T = undefined;
|
||||||
var fields_seen = [_]bool{false} ** struct_info.fields.len;
|
var fields_seen = [_]bool{false} ** struct_info.fields.len;
|
||||||
var fields_set: u64 = 0;
|
var fields_set: u64 = 0;
|
||||||
|
@ -244,7 +244,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
fields_set = fields_set + 1;
|
fields_set = fields_set + 1;
|
||||||
found_value = true;
|
found_value = true;
|
||||||
}
|
}
|
||||||
if (@typeInfo(field.type) == .Optional) {
|
if (@typeInfo(field.type) == .optional) {
|
||||||
// Test "compiler assertion failure 2"
|
// Test "compiler assertion failure 2"
|
||||||
// Zig compiler bug circa 0.9.0. Using "and !found_value"
|
// Zig compiler bug circa 0.9.0. Using "and !found_value"
|
||||||
// in the if statement above will trigger assertion failure
|
// in the if statement above will trigger assertion failure
|
||||||
|
@ -269,7 +269,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
return error.FieldElementMismatch; // see fields_seen for details
|
return error.FieldElementMismatch; // see fields_seen for details
|
||||||
return r;
|
return r;
|
||||||
},
|
},
|
||||||
.Array => //|array_info| {
|
.array => //|array_info| {
|
||||||
return error.ArrayNotImplemented,
|
return error.ArrayNotImplemented,
|
||||||
// switch (token) {
|
// switch (token) {
|
||||||
// .ArrayBegin => {
|
// .ArrayBegin => {
|
||||||
|
@ -304,7 +304,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
// else => return error.UnexpectedToken,
|
// else => return error.UnexpectedToken,
|
||||||
// }
|
// }
|
||||||
// },
|
// },
|
||||||
.Pointer => |ptr_info| {
|
.pointer => |ptr_info| {
|
||||||
const allocator = options.allocator orelse return error.AllocatorRequired;
|
const allocator = options.allocator orelse return error.AllocatorRequired;
|
||||||
switch (ptr_info.size) {
|
switch (ptr_info.size) {
|
||||||
.One => {
|
.One => {
|
||||||
|
|
Loading…
Reference in New Issue
Block a user