Compare commits

..

No commits in common. "357c583e9b7b3d77bd60a9c57ca3f0c01115cd62" and "f009bb5c370826604729886ee21f62f289444efd" have entirely different histories.

9 changed files with 57 additions and 253 deletions

View File

@ -2,7 +2,8 @@
[![Build Status](https://drone.lerch.org/api/badges/lobo/aws-sdk-for-zig/status.svg?ref=refs/heads/master)](https://drone.lerch.org/api/badges/lobo/aws-sdk-for-zig/)
This SDK currently supports all AWS services except S3. See TODO list below.
This SDK currently supports all AWS services. restXml protocol support (4
services including S3), is still new, with lots of gaps. See TODO list below.
Current executable size for the demo is 1.7M (90k of which is the AWS PEM file,
and approximately 600K for XML services) after compiling with -Drelease-safe and
@ -42,15 +43,14 @@ for posterity, and supports x86_64 linux. The old branch is deprecated.
## Limitations
There are many nuances of AWS V4 signature calculation. S3 is not supported
because it uses many of these edge cases. Also endpoint calculation is special
for S3. WebIdentityToken is not yet implemented.
There are many nuances of AWS V4 signature calculation. Parts of S3 are not supported
because it uses many of these test cases. WebIdentityToken is not yet
implemented.
TODO List:
* Implement initial S3 support. This involves:
* Implementation of AWS SigV4 signature calculation for S3, which is unique
* Implementation of S3 endpoint calculation, which is also unique to this service
* Complete development of [AWS restXml protocol](https://awslabs.github.io/smithy/1.0/spec/aws/aws-restxml-protocol.html).
Includes S3. Total service count 4.
* Bump to zig 0.9.1. iguanaTLS, used in zFetch is still [working out 0.9.1 issues](https://github.com/alexnask/iguanaTLS/pull/29)
* Implement sigv4a signing
* Implement jitter/exponential backoff

View File

@ -202,7 +202,7 @@ fn run(allocator: std.mem.Allocator, argv: []const []const u8, cwd: ?[]const u8,
try writer.print("{s}\"{s}\"", .{ prefix, arg });
prefix = " ";
}
// std.log.debug("[RUN] {s}", .{msg.items});
std.log.info("[RUN] {s}", .{msg.items});
}
const result = try std.ChildProcess.exec(.{

View File

@ -67,13 +67,11 @@ pub fn build(b: *Builder) !void {
var test_step = try tst.addTestStep(b, mode, exe.packages.items);
test_step.dependOn(&version.step);
var codegen: ?*std.build.Step = null;
if (target.getOs().tag == .linux) {
// TODO: Support > linux with RunStep
// std.build.RunStep.create(null,null).cwd(std.fs.path.resolve(b.build_root, "codegen")).addArgs(...)
codegen = b.step("gen", "Generate zig service code from smithy models");
const cg = codegen.?;
cg.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", "cd codegen && zig build" }).step);
const codegen = b.step("gen", "Generate zig service code from smithy models");
codegen.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", "cd codegen && zig build" }).step);
// This can probably be triggered instead by GitRepoStep cloning the repo
// with models
@ -81,19 +79,20 @@ pub fn build(b: *Builder) !void {
// service manifest we know it needs to be regenerated. So this step
// will remove the service manifest if codegen has been touched, thereby
// triggering the re-gen
cg.dependOn(&b.addSystemCommand(&.{
codegen.dependOn(&b.addSystemCommand(&.{
"/bin/sh", "-c",
\\ [ ! -f src/models/service_manifest.zig ] || \
\\ [ $(find codegen -type f -newer src/models/service_manifest.zig -print -quit |wc -c) = '0' ] || \
\\ [ src/models/service_manifest.zig -nt codegen/codegen ] || \
\\ rm src/models/service_manifest.zig
}).step);
cg.dependOn(&b.addSystemCommand(&.{
codegen.dependOn(&b.addSystemCommand(&.{
"/bin/sh", "-c",
\\ mkdir -p src/models/ && \
\\ [ -f src/models/service_manifest.zig ] || \
\\ ( cd codegen/models && ../codegen *.json && mv *.zig ../../src/models )
}).step);
exe.step.dependOn(cg);
b.getInstallStep().dependOn(codegen);
test_step.dependOn(codegen);
}
exe.install();

View File

@ -257,10 +257,9 @@ fn constantName(allocator: std.mem.Allocator, id: []const u8) ![]const u8 {
// This one might be a bug in snake, but it's the only example so HPDL
if (std.mem.eql(u8, id, "SESv2")) return try std.fmt.allocPrint(allocator, "ses_v2", .{});
if (std.mem.eql(u8, id, "CloudFront")) return try std.fmt.allocPrint(allocator, "cloudfront", .{});
// IoT is an acryonym, but snake wouldn't know that. Interestingly not all
// iot services are capitalizing that way.
if (std.mem.eql(u8, id, "IoTSiteWise")) return try std.fmt.allocPrint(allocator, "iot_sitewise", .{});
if (std.mem.eql(u8, id, "IoTSiteWise")) return try std.fmt.allocPrint(allocator, "iot_site_wise", .{}); //sitewise?
if (std.mem.eql(u8, id, "IoTFleetHub")) return try std.fmt.allocPrint(allocator, "iot_fleet_hub", .{});
if (std.mem.eql(u8, id, "IoTSecureTunneling")) return try std.fmt.allocPrint(allocator, "iot_secure_tunneling", .{});
if (std.mem.eql(u8, id, "IoTThingsGraph")) return try std.fmt.allocPrint(allocator, "iot_things_graph", .{});
@ -580,7 +579,6 @@ fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, ty
_ = try writer.write(" {\n");
var child_state = state;
child_state.indent_level += 1;
var payload: ?[]const u8 = null;
for (members) |member| {
// This is our mapping
const snake_case_member = try snake.fromPascalCase(state.allocator, member.name);
@ -601,14 +599,6 @@ fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, ty
},
.http_query => http_query_mappings.appendAssumeCapacity(.{ .snake = try state.allocator.dupe(u8, snake_case_member), .original = trait.http_query }),
.http_header => http_header_mappings.appendAssumeCapacity(.{ .snake = try state.allocator.dupe(u8, snake_case_member), .original = trait.http_header }),
.http_payload => {
// Don't assert as that will be optimized for Release* builds
// We'll continue here and treat the above as a warning
if (payload) |first| {
std.log.err("Found multiple httpPayloads in violation of smithy spec! Ignoring '{s}' and using '{s}'", .{ first, snake_case_member });
}
payload = try state.allocator.dupe(u8, snake_case_member);
},
else => {},
}
}
@ -649,12 +639,6 @@ fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, ty
// return @field(mappings, field_name);
// }
//
if (payload) |load| {
try writer.writeByte('\n');
try outputIndent(child_state, writer);
try writer.print("pub const http_payload: []const u8 = \"{s}\";", .{load});
}
try writer.writeByte('\n');
try outputIndent(child_state, writer);
_ = try writer.write("pub fn fieldNameFor(_: @This(), comptime field_name: []const u8) []const u8 {\n");

View File

@ -95,7 +95,6 @@ pub const TraitType = enum {
http_header,
http_label,
http_query,
http_payload,
json_name,
xml_name,
required,
@ -129,7 +128,6 @@ pub const Trait = union(TraitType) {
http_header: []const u8,
http_label: []const u8,
http_query: []const u8,
http_payload: struct {},
required: struct {},
documentation: []const u8,
pattern: []const u8,
@ -575,8 +573,6 @@ fn getTrait(trait_type: []const u8, value: std.json.Value) SmithyParseError!?Tra
return Trait{ .http_query = value.String };
if (std.mem.eql(u8, trait_type, "smithy.api#httpHeader"))
return Trait{ .http_header = value.String };
if (std.mem.eql(u8, trait_type, "smithy.api#httpPayload"))
return Trait{ .http_payload = .{} };
// TODO: Maybe care about these traits?
if (std.mem.eql(u8, trait_type, "smithy.api#title"))
@ -604,6 +600,7 @@ fn getTrait(trait_type: []const u8, value: std.json.Value) SmithyParseError!?Tra
\\smithy.api#httpError
\\smithy.api#httpChecksumRequired
\\smithy.api#httpLabel
\\smithy.api#httpPayload
\\smithy.api#httpPrefixHeaders
\\smithy.api#httpQueryParams
\\smithy.api#httpResponseCode

View File

@ -268,13 +268,6 @@ pub fn Request(comptime action: anytype) type {
else
ServerResponse(action);
const NullType: type = u0; // This is a small hack, yes...
const SRawResponse = if (Self.service_meta.aws_protocol != .query and
std.meta.fields(SResponse).len == 1)
std.meta.fields(SResponse)[0].field_type
else
NullType;
const parser_options = json.ParseOptions{
.allocator = options.client.allocator,
.allow_camel_case_conversion = true, // new option
@ -295,37 +288,7 @@ pub fn Request(comptime action: anytype) type {
var stream = json.TokenStream.init(response.body);
const start = std.mem.indexOf(u8, response.body, "\"") orelse 0; // Should never be 0
if (start == 0) log.warn("Response body missing json key?!", .{});
var end = std.mem.indexOf(u8, response.body[start + 1 ..], "\"") orelse 0;
if (end == 0) log.warn("Response body only has one double quote?!", .{});
end = end + start + 1;
const key = response.body[start + 1 .. end];
log.debug("First json key: {s}", .{key});
const foundNormalJsonResponse = std.mem.eql(u8, key, action.action_name ++ "Response");
const parsed_response_ptr = blk: {
if (SRawResponse == NullType or foundNormalJsonResponse)
break :blk &(json.parse(SResponse, &stream, parser_options) catch |e| {
log.err(
\\Call successful, but unexpected response from service.
\\This could be the result of a bug or a stale set of code generated
\\service models.
\\
\\Model Type: {s}
\\
\\Response from server:
\\
\\{s}
\\
, .{ SResponse, response.body });
return e;
});
log.debug("Appears server has provided a raw response", .{});
const ptr = try options.client.allocator.create(SResponse);
@field(ptr.*, std.meta.fields(SResponse)[0].name) =
json.parse(SRawResponse, &stream, parser_options) catch |e| {
const parsed_response = json.parse(SResponse, &stream, parser_options) catch |e| {
log.err(
\\Call successful, but unexpected response from service.
\\This could be the result of a bug or a stale set of code generated
@ -340,15 +303,6 @@ pub fn Request(comptime action: anytype) type {
, .{ SResponse, response.body });
return e;
};
break :blk ptr;
};
// This feels like it should result in a use after free, but it
// seems to be working?
defer if (!(SRawResponse == NullType or foundNormalJsonResponse))
options.client.allocator.destroy(parsed_response_ptr);
const parsed_response = parsed_response_ptr.*;
// TODO: Figure out this hack
// the code setting the response about 10 lines down will trigger
@ -418,16 +372,7 @@ pub fn Request(comptime action: anytype) type {
//
// Big thing is that requestid, which we'll need to fetch "manually"
const xml_options = xml_shaper.ParseOptions{ .allocator = options.client.allocator };
var body: []const u8 = result.body;
var free_body = false;
if (std.mem.lastIndexOf(u8, result.body[result.body.len - 20 ..], "Response>") == null) {
free_body = true;
// chop the "<?xml version="1.0"?>" from the front
const start = if (std.mem.indexOf(u8, result.body, "?>")) |i| i else 0;
body = try std.fmt.allocPrint(options.client.allocator, "<ActionResponse>{s}</ActionResponse>", .{body[start..]});
}
defer if (free_body) options.client.allocator.free(body);
const parsed = try xml_shaper.parse(action.Response, body, xml_options);
const parsed = try xml_shaper.parse(action.Response, result.body, xml_options);
errdefer parsed.deinit();
var free_rid = false;
// This needs to get into FullResponseType somehow: defer parsed.deinit();
@ -445,13 +390,10 @@ pub fn Request(comptime action: anytype) type {
// so we'll use that
var host_id: ?[]const u8 = null;
for (result.headers) |header| {
if (std.ascii.eqlIgnoreCase(header.name, "x-amzn-requestid")) { // CloudFront
if (std.ascii.eqlIgnoreCase(header.name, "x-amz-request-id")) {
rid = header.value;
}
if (std.ascii.eqlIgnoreCase(header.name, "x-amz-request-id")) { // S3
rid = header.value;
}
if (std.ascii.eqlIgnoreCase(header.name, "x-amz-id-2")) { // S3
if (std.ascii.eqlIgnoreCase(header.name, "x-amz-id-2")) {
host_id = header.value;
}
}
@ -585,23 +527,19 @@ fn buildPath(allocator: std.mem.Allocator, raw_uri: []const u8, comptime ActionR
var buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
// const writer = buffer.writer();
defer buffer.deinit();
var in_label = false;
var in_var = false;
var start: usize = 0;
for (raw_uri) |c, inx| {
switch (c) {
'{' => {
in_label = true;
in_var = true;
start = inx + 1;
},
'}' => {
in_label = false;
// The label may be "greedy" (uses a '+' at the end), but
// it's not clear if that effects this processing
var end = inx;
if (raw_uri[inx - 1] == '+') end -= 1;
const replacement_label = raw_uri[start..end];
in_var = false;
const replacement_var = raw_uri[start..inx];
inline for (std.meta.fields(ActionRequest)) |field| {
if (std.mem.eql(u8, request.fieldNameFor(field.name), replacement_label)) {
if (std.mem.eql(u8, request.fieldNameFor(field.name), replacement_var)) {
var replacement_buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
defer replacement_buffer.deinit();
var encoded_buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
@ -619,7 +557,7 @@ fn buildPath(allocator: std.mem.Allocator, raw_uri: []const u8, comptime ActionR
}
}
},
else => if (!in_label) {
else => if (!in_var) {
try buffer.append(c);
} else {},
}

View File

@ -60,7 +60,6 @@ const EndPoint = struct {
fn deinit(self: EndPoint) void {
self.allocator.free(self.uri);
self.allocator.free(self.host);
}
};
pub const AwsHttp = struct {
@ -95,31 +94,14 @@ pub const AwsHttp = struct {
/// service called, and will set up the signing options. The return
/// value is simply a raw HttpResult
pub fn callApi(self: Self, service: []const u8, request: HttpRequest, options: Options) !HttpResult {
// This function or regionSubDomain needs altering for virtual host
// addressing (for S3). Botocore, and I suspect other SDKs, have
// hardcoded exceptions for S3:
// https://github.com/boto/botocore/blob/f2b0dbb800b8dc2a3541334d5ca1190faf900150/botocore/utils.py#L2160-L2181
// Boto assumes virtual host addressing unless the endpoint url is configured
//
// NOTE: There are 4 rest_xml services. They are:
// * CloudFront
// * Route53
// * S3
// * S3 control
//
// All 4 are non-standard. Route53 and CloudFront are global endpoints
// S3 uses virtual host addressing (except when it doesn't), and
// S3 control uses <account-id>.s3-control.<region>.amazonaws.com
//
// So this regionSubDomain call needs to handle generic customization
const endpoint = try endpointForRequest(self.allocator, service, options.region, options.dualstack);
const endpoint = try regionSubDomain(self.allocator, service, options.region, options.dualstack);
defer endpoint.deinit();
log.debug("Calling endpoint {s}", .{endpoint.uri});
// TODO: Should we allow customization here?
const creds = try credentials.getCredentials(self.allocator, .{});
defer creds.deinit();
const signing_config: signing.Config = .{
.region = getRegion(service, options.region),
.region = options.region,
.service = options.sigv4_service_name orelse service,
.credentials = creds,
};
@ -179,11 +161,6 @@ pub const AwsHttp = struct {
const url = try std.fmt.allocPrint(self.allocator, "{s}{s}{s}", .{ endpoint.uri, request.path, request.query });
defer self.allocator.free(url);
log.debug("Request url: {s}", .{url});
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// PLEASE READ!! IF YOU ARE LOOKING AT THIS LINE OF CODE DUE TO A
// SEGFAULT IN INIT, IT IS PROBABLY BECAUSE THE HOST DOES NOT EXIST
// https://github.com/ziglang/zig/issues/11358
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
var req = try zfetch.Request.init(self.allocator, url, self.trust_chain);
defer req.deinit();
@ -191,7 +168,7 @@ pub const AwsHttp = struct {
try req.do(method, headers, if (request_cp.body.len == 0) null else request_cp.body);
// TODO: Timeout - is this now above us?
log.debug("Request Complete. Response code {d}: {s}", .{ req.status.code, req.status.reason });
log.debug("request_complete. Response code {d}: {s}", .{ req.status.code, req.status.reason });
log.debug("Response headers:", .{});
var resp_headers = try std.ArrayList(Header).initCapacity(self.allocator, req.headers.list.items.len);
defer resp_headers.deinit();
@ -227,11 +204,6 @@ pub const AwsHttp = struct {
}
};
fn getRegion(service: []const u8, region: []const u8) []const u8 {
if (std.mem.eql(u8, service, "cloudfront")) return "us-east-1";
return region;
}
fn addHeaders(allocator: std.mem.Allocator, headers: *std.ArrayList(base.Header), host: []const u8, body: []const u8, content_type: []const u8, additional_headers: []Header) !?[]const u8 {
try headers.append(.{ .name = "Accept", .value = "application/json" });
try headers.append(.{ .name = "Host", .value = host });
@ -253,24 +225,15 @@ fn getEnvironmentVariable(allocator: std.mem.Allocator, key: []const u8) !?[]con
};
}
fn endpointForRequest(allocator: std.mem.Allocator, service: []const u8, region: []const u8, use_dual_stack: bool) !EndPoint {
fn regionSubDomain(allocator: std.mem.Allocator, service: []const u8, region: []const u8, useDualStack: bool) !EndPoint {
const environment_override = try getEnvironmentVariable(allocator, "AWS_ENDPOINT_URL");
if (environment_override) |override| {
const uri = try allocator.dupeZ(u8, override);
return endPointFromUri(allocator, uri);
}
if (std.mem.eql(u8, service, "cloudfront")) {
return EndPoint{
.uri = try allocator.dupe(u8, "https://cloudfront.amazonaws.com"),
.host = try allocator.dupe(u8, "cloudfront.amazonaws.com"),
.scheme = "https",
.port = 443,
.allocator = allocator,
};
}
// Fallback to us-east-1 if global endpoint does not exist.
const realregion = if (std.mem.eql(u8, region, "aws-global")) "us-east-1" else region;
const dualstack = if (use_dual_stack) ".dualstack" else "";
const dualstack = if (useDualStack) ".dualstack" else "";
const domain = switch (std.hash_map.hashString(region)) {
US_ISO_EAST_1_HASH => "c2s.ic.gov",
@ -280,7 +243,7 @@ fn endpointForRequest(allocator: std.mem.Allocator, service: []const u8, region:
};
const uri = try std.fmt.allocPrintZ(allocator, "https://{s}{s}.{s}.{s}", .{ service, dualstack, realregion, domain });
const host = try allocator.dupe(u8, uri["https://".len..]);
const host = uri["https://".len..];
log.debug("host: {s}, scheme: {s}, port: {}", .{ host, "https", 443 });
return EndPoint{
.uri = uri,
@ -328,7 +291,7 @@ fn endPointFromUri(allocator: std.mem.Allocator, uri: []const u8) !EndPoint {
if (host_end == 0) {
host_end = uri.len;
}
host = try allocator.dupe(u8, uri[host_start..host_end]);
host = uri[host_start..host_end];
log.debug("host: {s}, scheme: {s}, port: {}", .{ host, scheme, port });
return EndPoint{
@ -339,25 +302,3 @@ fn endPointFromUri(allocator: std.mem.Allocator, uri: []const u8) !EndPoint {
.port = port,
};
}
test "endpointForRequest standard operation" {
const allocator = std.testing.allocator;
const service = "dynamodb";
const region = "us-west-2";
const use_dual_stack = false;
const endpoint = try endpointForRequest(allocator, service, region, use_dual_stack);
defer endpoint.deinit();
try std.testing.expectEqualStrings("https://dynamodb.us-west-2.amazonaws.com", endpoint.uri);
}
test "endpointForRequest for cloudfront" {
const allocator = std.testing.allocator;
const service = "cloudfront";
const region = "us-west-2";
const use_dual_stack = false;
const endpoint = try endpointForRequest(allocator, service, region, use_dual_stack);
defer endpoint.deinit();
try std.testing.expectEqualStrings("https://cloudfront.amazonaws.com", endpoint.uri);
}

View File

@ -49,7 +49,6 @@ const Tests = enum {
rest_json_1_query_with_input,
rest_json_1_work_with_lambda,
rest_xml_no_input,
rest_xml_anything_but_s3,
};
pub fn main() anyerror!void {
@ -90,7 +89,7 @@ pub fn main() anyerror!void {
};
defer client.deinit();
const services = aws.Services(.{ .sts, .ec2, .dynamo_db, .ecs, .lambda, .sqs, .s3, .cloudfront }){};
const services = aws.Services(.{ .sts, .ec2, .dynamo_db, .ecs, .lambda, .sqs, .s3 }){};
for (tests.items) |t| {
std.log.info("===== Start Test: {s} =====", .{@tagName(t)});
@ -222,14 +221,6 @@ pub fn main() anyerror!void {
std.log.info("request id: {s}", .{result.response_metadata.request_id});
std.log.info("bucket count: {d}", .{result.response.buckets.?.len});
},
.rest_xml_anything_but_s3 => {
const result = try client.call(services.cloudfront.list_key_groups.Request{}, options);
defer result.deinit();
std.log.info("request id: {s}", .{result.response_metadata.request_id});
const list = result.response.key_group_list.?;
std.log.info("key group list max: {d}", .{list.max_items});
std.log.info("key group quantity: {d}", .{list.quantity});
},
}
std.log.info("===== End Test: {s} =====\n", .{@tagName(t)});
}

View File

@ -145,8 +145,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
if (element.children.items.len == 0) {
// This is almost certainly incomplete. Empty strings? xsi:nil?
return null;
}
if (element.children.items.len > 0) {
} else {
// return try parseInternal(optional_info.child, element.elements().next().?, options);
return try parseInternal(optional_info.child, element, options);
}
@ -181,8 +180,9 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
// }
// }
return error.NoUnionMembersMatched;
}
} else {
@compileError("Unable to parse into untagged union '" ++ @typeName(T) ++ "'");
}
},
.Struct => |struct_info| {
var r: T = undefined;
@ -239,17 +239,12 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
fields_set = fields_set + 1;
found_value = true;
}
if (@typeInfo(field.field_type) == .Optional) {
// Test "compiler assertion failure 2"
// Zig compiler bug circa 0.9.0. Using "and !found_value"
// in the if statement above will trigger assertion failure
if (!found_value) {
if (@typeInfo(field.field_type) == .Optional and !found_value) {
// @compileLog("Optional: Field name ", field.name, ", type ", field.field_type);
@field(r, field.name) = null;
fields_set = fields_set + 1;
found_value = true;
}
}
// Using this else clause breaks zig, so we'll use a boolean instead
if (!found_value) {
log.err("Could not find a value for field {s}. Looking for {s} in element {s}", .{ field.name, name, element.tag });
@ -479,8 +474,7 @@ test "can coerce 8601 date to integer" {
defer parsed_data.deinit();
try testing.expectEqual(@as(i64, 1633451985), parsed_data.parsed_value.foo_bar.?);
}
// This is the simplest test so far that breaks zig (circa 0.9.0)
// See "Using this else clause breaks zig, so we'll use a boolean instead"
// This is the simplest test so far that breaks zig
test "can parse a boolean type (two fields)" {
const allocator = std.testing.allocator;
const data =
@ -648,43 +642,3 @@ test "can parse something serious" {
try testing.expectEqualStrings("eu-north-1", parsed_data.parsed_value.regions.?[0].region_name.?);
try testing.expectEqualStrings("ec2.eu-north-1.amazonaws.com", parsed_data.parsed_value.regions.?[0].endpoint.?);
}
test "compiler assertion failure 2" {
// std.testing.log_level = .debug;
// log.debug("", .{});
// Actually, we only care here that the code compiles
const allocator = std.testing.allocator;
const Response: type = struct {
key_group_list: ?struct {
quantity: i64, // Making this optional will make the code compile
items: ?[]struct {
key_group: []const u8,
} = null,
pub fn fieldNameFor(_: @This(), comptime field_name: []const u8) []const u8 {
const mappings = .{
.quantity = "Quantity",
.items = "Items",
};
return @field(mappings, field_name);
}
} = null,
pub fn fieldNameFor(_: @This(), comptime field_name: []const u8) []const u8 {
const mappings = .{
.key_group_list = "KeyGroupList",
};
return @field(mappings, field_name);
}
};
const data =
\\<?xml version="1.0" encoding="UTF-8"?>
\\<AnythingAtAll xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
\\ <KeyGroupList>
\\ <Quantity>42</Quantity>
\\ </KeyGroupList>
\\</AnythingAtAll>
;
const parsed_data = try parse(Response, data, .{ .allocator = allocator });
defer parsed_data.deinit();
try testing.expect(parsed_data.parsed_value.key_group_list.?.quantity == 42);
}