Compare commits

..

No commits in common. "8e4dafbdee0e12e7aeddba0e1a4bbb7e9e35b877" and "feedbabba5d672f5160a570fd6259dd6dd76e871" have entirely different histories.

6 changed files with 43 additions and 209 deletions

View File

@ -100,7 +100,7 @@ TODO List:
* Move to compiler on tagged release (hopefully 0.8.1) * Move to compiler on tagged release (hopefully 0.8.1)
(new 2021-05-29. I will proceed in this order unless I get other requests) (new 2021-05-29. I will proceed in this order unless I get other requests)
* ✓ Implement [AWS query protocol](https://awslabs.github.io/smithy/1.0/spec/aws/aws-query-protocol.html). This is the protocol in use by sts.getcalleridentity. Total service count 18 * ✓ Implement [AWS query protocol](https://awslabs.github.io/smithy/1.0/spec/aws/aws-query-protocol.html). This is the protocol in use by sts.getcalleridentity. Total service count 18
* Implement [AWS Json 1.0 protocol](https://awslabs.github.io/smithy/1.0/spec/aws/aws-json-1_0-protocol.html). Includes dynamodb. Total service count 18 * Implement [AWS Json 1.0 protocol](https://awslabs.github.io/smithy/1.0/spec/aws/aws-json-1_0-protocol.html). Includes dynamodb. Total service count 18
* Implement [AWS Json 1.1 protocol](https://awslabs.github.io/smithy/1.0/spec/aws/aws-json-1_1-protocol.html). Includes ecs. Total service count 105 * Implement [AWS Json 1.1 protocol](https://awslabs.github.io/smithy/1.0/spec/aws/aws-json-1_1-protocol.html). Includes ecs. Total service count 105
* Implement [AWS restXml protocol](https://awslabs.github.io/smithy/1.0/spec/aws/aws-restxml-protocol.html). Includes S3. Total service count 4. This may be blocked due to the same issue as EC2. * Implement [AWS restXml protocol](https://awslabs.github.io/smithy/1.0/spec/aws/aws-restxml-protocol.html). Includes S3. Total service count 4. This may be blocked due to the same issue as EC2.
* Implement [AWS EC2 query protocol](https://awslabs.github.io/smithy/1.0/spec/aws/aws-ec2-query-protocol.html). Includes EC2. Total service count 1. This is currently blocked, probably on self-hosted compiler coming in zig 0.9.0 (January 2022) due to compiler bug discovered. More details and llvm ir log can be found in the [XML branch](https://git.lerch.org/lobo/aws-sdk-for-zig/src/branch/xml). * Implement [AWS EC2 query protocol](https://awslabs.github.io/smithy/1.0/spec/aws/aws-ec2-query-protocol.html). Includes EC2. Total service count 1. This is currently blocked, probably on self-hosted compiler coming in zig 0.9.0 (January 2022) due to compiler bug discovered. More details and llvm ir log can be found in the [XML branch](https://git.lerch.org/lobo/aws-sdk-for-zig/src/branch/xml).

View File

@ -176,7 +176,7 @@ fn generateOperation(allocator: *std.mem.Allocator, operation: smithy.ShapeInfo,
_ = try writer.write(",\n"); _ = try writer.write(",\n");
_ = try writer.write(" Response: type = "); _ = try writer.write(" Response: type = ");
if (operation.shape.operation.output) |member| { if (operation.shape.operation.output) |member| {
try generateTypeFor(allocator, member, shapes, writer, " ", false, &type_stack, true); try generateTypeFor(allocator, member, shapes, writer, " ", true, &type_stack, true);
} else _ = try writer.write("struct {}"); // we want to maintain consistency with other ops } else _ = try writer.write("struct {}"); // we want to maintain consistency with other ops
_ = try writer.write(",\n"); _ = try writer.write(",\n");

View File

@ -56,72 +56,25 @@ pub const Aws = struct {
// It seems as though there are 3 major branches of the 6 protocols. // It seems as though there are 3 major branches of the 6 protocols.
// 1. query/ec2_query, which are identical until you get to complex // 1. query/ec2_query, which are identical until you get to complex
// structures. EC2 query does not allow us to request json though, // structures. TBD if the shortcut we're taking for query to make
// so we need to handle xml returns from this. // it return json will work on EC2, but my guess is yes.
// 2. *json*: These three appear identical for input (possible difference // 2. *json*: These three appear identical for input (possible difference
// for empty body serialization), but differ in error handling. // for empty body serialization), but differ in error handling.
// We're not doing a lot of error handling here, though. // We're not doing a lot of error handling here, though.
// 3. rest_xml: This is a one-off for S3, never used since // 3. rest_xml: This is a one-off for S3, never used since
switch (service_meta.aws_protocol) { switch (service_meta.aws_protocol) {
.query => return self.callQuery(request, service_meta, action, options), .query, .ec2_query => return self.callQuery(request, service_meta, action, options),
// .query, .ec2_query => return self.callQuery(request, service_meta, action, options), .rest_json_1, .json_1_0, .json_1_1 => @compileError("REST Json, Json 1.0/1.1 protocol not yet supported"),
.rest_json_1, .json_1_0, .json_1_1 => return self.callJson(request, service_meta, action, options), .rest_xml => @compileError("REST XML protocol not yet supported"),
.ec2_query, .rest_xml => @compileError("XML responses may be blocked on a zig compiler bug scheduled to be fixed in 0.9.0"),
} }
} }
/// Calls using one of the json protocols (rest_json_1, json_1_0, json_1_1
fn callJson(self: Self, comptime request: anytype, comptime service_meta: anytype, action: anytype, options: Options) !FullResponse(request) {
// Target might be a problem. The smithy docs differ fairly significantly
// from the REST API examples. Here I'm following the REST API examples
// as they have not yet led me astray. Whether they're consistent
// across other services is another matter...
var version = try self.allocator.alloc(u8, service_meta.version.len);
defer self.allocator.free(version);
const replacements = std.mem.replace(u8, service_meta.version, "-", "", version);
// Resize the version, otherwise the junk at the end will mess with allocPrint
version = try self.allocator.resize(version, version.len - replacements);
const target =
try std.fmt.allocPrint(self.allocator, "{s}_{s}.{s}", .{
service_meta.sdk_id,
version,
action.action_name,
});
defer self.allocator.free(target);
var buffer = std.ArrayList(u8).init(self.allocator);
defer buffer.deinit();
// The transformer needs to allocate stuff out of band, but we
// can guarantee we don't need the memory after this call completes,
// so we'll use an arena allocator to whack everything.
// TODO: Determine if sending in null values is ok, or if we need another
// tweak to the stringify function to exclude
var nameAllocator = std.heap.ArenaAllocator.init(self.allocator);
defer nameAllocator.deinit();
try json.stringify(request, .{ .whitespace = .{}, .allocator = &nameAllocator.allocator, .nameTransform = pascalTransformer }, buffer.writer());
var content_type: []const u8 = undefined;
switch (service_meta.aws_protocol) {
.rest_json_1 => content_type = "application/json",
.json_1_0 => content_type = "application/x-amz-json-1.0",
.json_1_1 => content_type = "application/x-amz-json-1.1",
else => unreachable,
}
return try self.callAws(request, service_meta, .{
.query = "",
.body = buffer.items,
.content_type = content_type,
.headers = &[_]awshttp.Header{.{ .name = "X-Amz-Target", .value = target }},
}, options);
}
// Call using query protocol. This is documented as an XML protocol, but // Call using query protocol. This is documented as an XML protocol, but
// throwing a JSON accept header seems to work. EC2Query is very simliar to // throwing a JSON accept header seems to work. EC2Query is very simliar to
// Query, so we'll handle both here. Realistically we probably don't effectively // Query, so we'll handle both here. Realistically we probably don't effectively
// handle lists and maps properly anyway yet, so we'll go for it and see // handle lists and maps properly anyway yet, so we'll go for it and see
// where it breaks. PRs and/or failing test cases appreciated. // where it breaks. PRs and/or failing test cases appreciated.
fn callQuery(self: Self, comptime request: anytype, comptime service_meta: anytype, action: anytype, options: Options) !FullResponse(request) { fn callQuery(self: Self, comptime request: anytype, service_meta: anytype, action: anytype, options: Options) !FullResponse(request) {
var buffer = std.ArrayList(u8).init(self.allocator); var buffer = std.ArrayList(u8).init(self.allocator);
defer buffer.deinit(); defer buffer.deinit();
const writer = buffer.writer(); const writer = buffer.writer();
@ -150,28 +103,31 @@ pub const Aws = struct {
else // EC2 else // EC2
try std.fmt.allocPrint(self.allocator, "{s}", .{buffer.items}); try std.fmt.allocPrint(self.allocator, "{s}", .{buffer.items});
defer self.allocator.free(body); defer self.allocator.free(body);
return try self.callAws(request, service_meta, .{
.query = query,
.body = body,
.content_type = "application/x-www-form-urlencoded",
}, options);
}
fn callAws(self: Self, comptime request: anytype, comptime service_meta: anytype, aws_request: awshttp.HttpRequest, options: Options) !FullResponse(request) {
const FullR = FullResponse(request); const FullR = FullResponse(request);
const response = try self.aws_http.callApi( const response = try self.aws_http.callApi(
service_meta.endpoint_prefix, service_meta.endpoint_prefix,
aws_request, .{
.body = body,
.query = query,
},
.{ .{
.region = options.region, .region = options.region,
.dualstack = options.dualstack, .dualstack = options.dualstack,
.sigv4_service_name = service_meta.sigv4_name, .sigv4_service_name = service_meta.sigv4_name,
}, },
); );
// TODO: Can response handling be reused?
defer response.deinit(); defer response.deinit();
// try self.reportTraffic("", aws_request, response, log.debug);
if (response.response_code != 200) { if (response.response_code != 200) {
try self.reportTraffic("Call Failed", aws_request, response, log.err); log.err("call failed! return status: {d}", .{response.response_code});
log.err("Request Query:\n |{s}\n", .{query});
log.err("Request Body:\n |{s}\n", .{body});
log.err("Response Headers:\n", .{});
for (response.headers) |h|
log.err("\t{s}:{s}\n", .{ h.name, h.value });
log.err("Response Body:\n |{s}", .{response.body});
return error.HttpFailure; return error.HttpFailure;
} }
// EC2 ignores our accept type, but technically query protocol only // EC2 ignores our accept type, but technically query protocol only
@ -182,10 +138,6 @@ pub const Aws = struct {
if (std.mem.eql(u8, "Content-Type", h.name)) { if (std.mem.eql(u8, "Content-Type", h.name)) {
if (std.mem.startsWith(u8, h.value, "application/json")) { if (std.mem.startsWith(u8, h.value, "application/json")) {
isJson = true; isJson = true;
} else if (std.mem.startsWith(u8, h.value, "application/x-amz-json-1.0")) {
isJson = true;
} else if (std.mem.startsWith(u8, h.value, "application/x-amz-json-1.1")) {
isJson = true;
} else if (std.mem.startsWith(u8, h.value, "text/xml")) { } else if (std.mem.startsWith(u8, h.value, "text/xml")) {
isJson = false; isJson = false;
} else { } else {
@ -208,53 +160,19 @@ pub const Aws = struct {
.allow_unknown_fields = true, // new option. Cannot yet handle non-struct fields though .allow_unknown_fields = true, // new option. Cannot yet handle non-struct fields though
.allow_missing_fields = false, // new option. Cannot yet handle non-struct fields though .allow_missing_fields = false, // new option. Cannot yet handle non-struct fields though
}; };
const SResponse = ServerResponse(request);
// const SResponse = ServerResponse(request);
const SResponse = if (service_meta.aws_protocol != .query and service_meta.aws_protocol != .ec2_query)
Response(request)
else
ServerResponse(request);
const parsed_response = json.parse(SResponse, &stream, parser_options) catch |e| { const parsed_response = json.parse(SResponse, &stream, parser_options) catch |e| {
log.err( log.err(
\\Call successful, but unexpected response from service. \\Call successful, but unexpected response from service.
\\This could be the result of a bug or a stale set of code generated \\This could be the result of a bug or a stale set of code generated
\\service models. \\service models. Response from server:
\\
\\Model Type: {s}
\\
\\Response from server:
\\ \\
\\{s} \\{s}
\\ \\
, .{ SResponse, response.body }); , .{response.body});
return e; return e;
}; };
if (service_meta.aws_protocol != .query and service_meta.aws_protocol != .ec2_query) {
var request_id: []u8 = undefined;
var found = false;
for (response.headers) |h| {
if (std.ascii.eqlIgnoreCase(h.name, "X-Amzn-RequestId")) {
found = true;
request_id = try std.fmt.allocPrint(self.allocator, "{s}", .{h.value}); // will be freed in FullR.deinit()
}
}
if (!found) {
try self.reportTraffic("Request ID not found", aws_request, response, log.err);
return error.RequestIdNotFound;
}
return FullR{
.response = parsed_response,
.response_metadata = .{
.request_id = request_id,
},
.parser_options = parser_options,
.raw_parsed = .{ .raw = parsed_response },
};
}
// Grab the first (and only) object from the server. Server shape expected to be: // Grab the first (and only) object from the server. Server shape expected to be:
// { ActionResponse: {ActionResult: {...}, ResponseMetadata: {...} } } // { ActionResponse: {ActionResult: {...}, ResponseMetadata: {...} } }
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -271,36 +189,9 @@ pub const Aws = struct {
.request_id = real_response.ResponseMetadata.RequestId, .request_id = real_response.ResponseMetadata.RequestId,
}, },
.parser_options = parser_options, .parser_options = parser_options,
.raw_parsed = .{ .server = parsed_response }, .raw_parsed = parsed_response,
}; };
} }
fn reportTraffic(self: Self, info: []const u8, request: awshttp.HttpRequest, response: awshttp.HttpResult, comptime reporter: fn (comptime []const u8, anytype) void) !void {
var msg = std.ArrayList(u8).init(self.allocator);
defer msg.deinit();
const writer = msg.writer();
try writer.print("{s}\n\n", .{info});
try writer.print("Return status: {d}\n\n", .{response.response_code});
if (request.query.len > 0) try writer.print("Request Query:\n \t{s}\n", .{request.query});
_ = try writer.write("Unique Request Headers:\n");
if (request.headers.len > 0) {
for (request.headers) |h|
try writer.print("\t{s}: {s}\n", .{ h.name, h.value });
}
try writer.print("\tContent-Type: {s}\n\n", .{request.content_type});
_ = try writer.write("Request Body:\n");
try writer.print("-------------\n{s}\n", .{request.body});
_ = try writer.write("-------------\n");
_ = try writer.write("Response Headers:\n");
for (response.headers) |h|
try writer.print("\t{s}: {s}\n", .{ h.name, h.value });
_ = try writer.write("Response Body:\n");
try writer.print("--------------\n{s}\n", .{response.body});
_ = try writer.write("--------------\n");
reporter("{s}\n", .{msg.items});
}
}; };
fn ServerResponse(comptime request: anytype) type { fn ServerResponse(comptime request: anytype) type {
@ -359,20 +250,11 @@ fn FullResponse(comptime request: anytype) type {
request_id: []u8, request_id: []u8,
}, },
parser_options: json.ParseOptions, parser_options: json.ParseOptions,
raw_parsed: union(enum) { raw_parsed: ServerResponse(request),
server: ServerResponse(request),
raw: Response(request),
},
// raw_parsed: ServerResponse(request),
const Self = @This(); const Self = @This();
pub fn deinit(self: Self) void { pub fn deinit(self: Self) void {
switch (self.raw_parsed) { json.parseFree(ServerResponse(request), self.raw_parsed, self.parser_options);
.server => json.parseFree(ServerResponse(request), self.raw_parsed.server, self.parser_options),
.raw => json.parseFree(Response(request), self.raw_parsed.raw, self.parser_options),
}
self.parser_options.allocator.?.free(self.response_metadata.request_id);
} }
}; };
} }
@ -383,9 +265,6 @@ fn queryFieldTransformer(field_name: []const u8, encoding_options: url.EncodingO
return try case.snakeToPascal(encoding_options.allocator.?, field_name); return try case.snakeToPascal(encoding_options.allocator.?, field_name);
} }
fn pascalTransformer(field_name: []const u8, options: json.StringifyOptions) anyerror![]const u8 {
return try case.snakeToPascal(options.allocator.?, field_name);
}
// Use for debugging json responses of specific requests // Use for debugging json responses of specific requests
// test "dummy request" { // test "dummy request" {
// const allocator = std.testing.allocator; // const allocator = std.testing.allocator;

View File

@ -74,15 +74,14 @@ const SigningOptions = struct {
service: []const u8, service: []const u8,
}; };
pub const HttpRequest = struct { const HttpRequest = struct {
path: []const u8 = "/", path: []const u8 = "/",
query: []const u8 = "", query: []const u8 = "",
body: []const u8 = "", body: []const u8 = "",
method: []const u8 = "POST", method: []const u8 = "POST",
content_type: []const u8 = "application/json", // Can we get away with this? // headers: []Header = .{},
headers: []Header = &[_]Header{},
}; };
pub const HttpResult = struct { const HttpResult = struct {
response_code: u16, // actually 3 digits can fit in u10 response_code: u16, // actually 3 digits can fit in u10
body: []const u8, body: []const u8,
headers: []Header, headers: []Header,
@ -100,7 +99,7 @@ pub const HttpResult = struct {
} }
}; };
pub const Header = struct { const Header = struct {
name: []const u8, name: []const u8,
value: []const u8, value: []const u8,
}; };
@ -321,7 +320,7 @@ pub const AwsHttp = struct {
var tls_connection_options: ?*c.aws_tls_connection_options = null; var tls_connection_options: ?*c.aws_tls_connection_options = null;
const host = try self.allocator.dupeZ(u8, endpoint.host); const host = try self.allocator.dupeZ(u8, endpoint.host);
defer self.allocator.free(host); defer self.allocator.free(host);
try self.addHeaders(http_request.?, host, request.body, request.content_type, request.headers); try self.addHeaders(http_request.?, host, request.body);
if (std.mem.eql(u8, endpoint.scheme, "https")) { if (std.mem.eql(u8, endpoint.scheme, "https")) {
// TODO: Figure out why this needs to be inline vs function call // TODO: Figure out why this needs to be inline vs function call
// tls_connection_options = try self.setupTls(host); // tls_connection_options = try self.setupTls(host);
@ -632,7 +631,7 @@ pub const AwsHttp = struct {
async_result.sync.store(false, .SeqCst); async_result.sync.store(false, .SeqCst);
} }
fn addHeaders(self: Self, request: *c.aws_http_message, host: []const u8, body: []const u8, content_type: []const u8, additional_headers: []Header) !void { fn addHeaders(self: Self, request: *c.aws_http_message, host: []const u8, body: []const u8) !void {
const accept_header = c.aws_http_header{ const accept_header = c.aws_http_header{
.name = c.aws_byte_cursor_from_c_str("Accept"), .name = c.aws_byte_cursor_from_c_str("Accept"),
.value = c.aws_byte_cursor_from_c_str("application/json"), .value = c.aws_byte_cursor_from_c_str("application/json"),
@ -663,37 +662,22 @@ pub const AwsHttp = struct {
// const accept_encoding_header = c.aws_http_header{ // const accept_encoding_header = c.aws_http_header{
// .name = c.aws_byte_cursor_from_c_str("Accept-Encoding"), // .name = c.aws_byte_cursor_from_c_str("Accept-Encoding"),
// .value = c.aws_byte_cursor_from_c_str("identity"), // .value = c.aws_byte_cursor_from_c_str("identity"),
// .compression = 0, //.AWS_HTTP_HEADER_COMPRESSION_USE_CACHE, // .compression = .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
// }; // };
// if (c.aws_http_message_add_header(request, accept_encoding_header) != c.AWS_OP_SUCCESS) // if (c.aws_http_message_add_header(request, accept_encoding_header) != c.AWS_OP_SUCCESS)
// return AwsError.AddHeaderError; // return AwsError.AddHeaderError;
// AWS *does* seem to care about Content-Type. I don't think this header // AWS *does* seem to care about Content-Type. I don't think this header
// will hold for all APIs // will hold for all APIs
const c_type = try std.fmt.allocPrintZ(self.allocator, "{s}", .{content_type}); // TODO: Work out Content-type
defer self.allocator.free(c_type);
const content_type_header = c.aws_http_header{ const content_type_header = c.aws_http_header{
.name = c.aws_byte_cursor_from_c_str("Content-Type"), .name = c.aws_byte_cursor_from_c_str("Content-Type"),
.value = c.aws_byte_cursor_from_c_str(c_type), .value = c.aws_byte_cursor_from_c_str("application/x-www-form-urlencoded"),
.compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE, .compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
}; };
if (c.aws_http_message_add_header(request, content_type_header) != c.AWS_OP_SUCCESS) if (c.aws_http_message_add_header(request, content_type_header) != c.AWS_OP_SUCCESS)
return AwsError.AddHeaderError; return AwsError.AddHeaderError;
for (additional_headers) |h| {
const name = try std.fmt.allocPrintZ(self.allocator, "{s}", .{h.name});
defer self.allocator.free(name);
const value = try std.fmt.allocPrintZ(self.allocator, "{s}", .{h.value});
defer self.allocator.free(value);
const c_header = c.aws_http_header{
.name = c.aws_byte_cursor_from_c_str(name),
.value = c.aws_byte_cursor_from_c_str(value),
.compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
};
if (c.aws_http_message_add_header(request, c_header) != c.AWS_OP_SUCCESS)
return AwsError.AddHeaderError;
}
if (body.len > 0) { if (body.len > 0) {
const len = try std.fmt.allocPrintZ(self.allocator, "{d}", .{body.len}); const len = try std.fmt.allocPrintZ(self.allocator, "{d}", .{body.len});
// This defer seems to work ok, but I'm a bit concerned about why // This defer seems to work ok, but I'm a bit concerned about why

View File

@ -2656,15 +2656,6 @@ pub const StringifyOptions = struct {
string: StringOptions = StringOptions{ .String = .{} }, string: StringOptions = StringOptions{ .String = .{} },
nameTransform: fn ([]const u8, StringifyOptions) anyerror![]const u8 = nullTransform,
/// Not used by stringify - might be needed for your name transformer
allocator: ?*std.mem.Allocator = null,
fn nullTransform(name: []const u8, _: StringifyOptions) ![]const u8 {
return name;
}
/// Should []u8 be serialised as a string? or an array? /// Should []u8 be serialised as a string? or an array?
pub const StringOptions = union(enum) { pub const StringOptions = union(enum) {
Array, Array,
@ -2708,7 +2699,7 @@ pub fn stringify(
value: anytype, value: anytype,
options: StringifyOptions, options: StringifyOptions,
out_stream: anytype, out_stream: anytype,
) !void { ) @TypeOf(out_stream).Error!void {
const T = @TypeOf(value); const T = @TypeOf(value);
switch (@typeInfo(T)) { switch (@typeInfo(T)) {
.Float, .ComptimeFloat => { .Float, .ComptimeFloat => {
@ -2777,10 +2768,7 @@ pub fn stringify(
try out_stream.writeByte('\n'); try out_stream.writeByte('\n');
try child_whitespace.outputIndent(out_stream); try child_whitespace.outputIndent(out_stream);
} }
const name = child_options.nameTransform(Field.name, options) catch { try stringify(Field.name, options, out_stream);
return error.NameTransformationError;
};
try stringify(name, options, out_stream);
try out_stream.writeByte(':'); try out_stream.writeByte(':');
if (child_options.whitespace) |child_whitespace| { if (child_options.whitespace) |child_whitespace| {
if (child_whitespace.separator) { if (child_whitespace.separator) {

View File

@ -27,8 +27,6 @@ const Tests = enum {
query_no_input, query_no_input,
query_with_input, query_with_input,
ec2_query_no_input, ec2_query_no_input,
json_1_0_query_with_input,
json_1_0_query_no_input,
}; };
pub fn main() anyerror!void { pub fn main() anyerror!void {
@ -67,7 +65,7 @@ pub fn main() anyerror!void {
var client = aws.Aws.init(allocator); var client = aws.Aws.init(allocator);
defer client.deinit(); defer client.deinit();
const services = aws.Services(.{ .sts, .ec2, .dynamo_db }){}; const services = aws.Services(.{ .sts, .ec2 }){};
for (tests.items) |t| { for (tests.items) |t| {
std.log.info("===== Start Test: {s} =====", .{@tagName(t)}); std.log.info("===== Start Test: {s} =====", .{@tagName(t)});
@ -86,27 +84,12 @@ pub fn main() anyerror!void {
.duration_seconds = 900, .duration_seconds = 900,
}, options); }, options);
defer access.deinit(); defer access.deinit();
std.log.info("access key: {s}", .{access.response.credentials.?.access_key_id}); std.log.info("access key: {s}", .{access.response.credentials.access_key_id});
},
.json_1_0_query_with_input => {
// TODO: Find test without sensitive info
const tables = try client.call(services.dynamo_db.list_tables.Request{
.limit = 1,
}, options);
defer tables.deinit();
std.log.info("request id: {s}", .{tables.response_metadata.request_id});
std.log.info("account has tables: {b}", .{tables.response.table_names.?.len > 0});
},
.json_1_0_query_no_input => {
const limits = try client.call(services.dynamo_db.describe_limits.Request{}, options);
defer limits.deinit();
std.log.info("account read capacity limit: {d}", .{limits.response.account_max_read_capacity_units});
}, },
.ec2_query_no_input => { .ec2_query_no_input => {
std.log.err("EC2 Test disabled due to compiler bug", .{}); const instances = try client.call(services.ec2.describe_instances.Request{}, options);
// const instances = try client.call(services.ec2.describe_instances.Request{}, options); defer instances.deinit();
// defer instances.deinit(); std.log.info("reservation count: {d}", .{instances.response.reservations.len});
// std.log.info("reservation count: {d}", .{instances.response.reservations.len});
}, },
} }
std.log.info("===== End Test: {s} =====\n", .{@tagName(t)}); std.log.info("===== End Test: {s} =====\n", .{@tagName(t)});