initial support for json 1.0

This commit is contained in:
Emil Lerch 2021-08-12 14:24:24 -07:00
parent feedbabba5
commit 00b2ab3d27
Signed by: lobo
GPG Key ID: A7B62D657EF764F8
5 changed files with 208 additions and 42 deletions

View File

@ -176,7 +176,7 @@ fn generateOperation(allocator: *std.mem.Allocator, operation: smithy.ShapeInfo,
_ = try writer.write(",\n"); _ = try writer.write(",\n");
_ = try writer.write(" Response: type = "); _ = try writer.write(" Response: type = ");
if (operation.shape.operation.output) |member| { if (operation.shape.operation.output) |member| {
try generateTypeFor(allocator, member, shapes, writer, " ", true, &type_stack, true); try generateTypeFor(allocator, member, shapes, writer, " ", false, &type_stack, true);
} else _ = try writer.write("struct {}"); // we want to maintain consistency with other ops } else _ = try writer.write("struct {}"); // we want to maintain consistency with other ops
_ = try writer.write(",\n"); _ = try writer.write(",\n");

View File

@ -56,25 +56,72 @@ pub const Aws = struct {
// It seems as though there are 3 major branches of the 6 protocols. // It seems as though there are 3 major branches of the 6 protocols.
// 1. query/ec2_query, which are identical until you get to complex // 1. query/ec2_query, which are identical until you get to complex
// structures. TBD if the shortcut we're taking for query to make // structures. EC2 query does not allow us to request json though,
// it return json will work on EC2, but my guess is yes. // so we need to handle xml returns from this.
// 2. *json*: These three appear identical for input (possible difference // 2. *json*: These three appear identical for input (possible difference
// for empty body serialization), but differ in error handling. // for empty body serialization), but differ in error handling.
// We're not doing a lot of error handling here, though. // We're not doing a lot of error handling here, though.
// 3. rest_xml: This is a one-off for S3, never used since // 3. rest_xml: This is a one-off for S3, never used since
switch (service_meta.aws_protocol) { switch (service_meta.aws_protocol) {
.query, .ec2_query => return self.callQuery(request, service_meta, action, options), .query => return self.callQuery(request, service_meta, action, options),
.rest_json_1, .json_1_0, .json_1_1 => @compileError("REST Json, Json 1.0/1.1 protocol not yet supported"), // .query, .ec2_query => return self.callQuery(request, service_meta, action, options),
.rest_xml => @compileError("REST XML protocol not yet supported"), .rest_json_1, .json_1_0, .json_1_1 => return self.callJson(request, service_meta, action, options),
.ec2_query, .rest_xml => @compileError("XML responses may be blocked on a zig compiler bug scheduled to be fixed in 0.9.0"),
} }
} }
/// Calls using one of the json protocols (rest_json_1, json_1_0, json_1_1
fn callJson(self: Self, comptime request: anytype, comptime service_meta: anytype, action: anytype, options: Options) !FullResponse(request) {
// Target might be a problem. The smithy docs differ fairly significantly
// from the REST API examples. Here I'm following the REST API examples
// as they have not yet led me astray. Whether they're consistent
// across other services is another matter...
var version = try self.allocator.alloc(u8, service_meta.version.len);
defer self.allocator.free(version);
const replacements = std.mem.replace(u8, service_meta.version, "-", "", version);
// Resize the version, otherwise the junk at the end will mess with allocPrint
version = try self.allocator.resize(version, version.len - replacements);
const target =
try std.fmt.allocPrint(self.allocator, "{s}_{s}.{s}", .{
service_meta.sdk_id,
version,
action.action_name,
});
defer self.allocator.free(target);
var buffer = std.ArrayList(u8).init(self.allocator);
defer buffer.deinit();
// The transformer needs to allocate stuff out of band, but we
// can guarantee we don't need the memory after this call completes,
// so we'll use an arena allocator to whack everything.
// TODO: Determine if sending in null values is ok, or if we need another
// tweak to the stringify function to exclude
var nameAllocator = std.heap.ArenaAllocator.init(self.allocator);
defer nameAllocator.deinit();
try json.stringify(request, .{ .whitespace = .{}, .allocator = &nameAllocator.allocator, .nameTransform = pascalTransformer }, buffer.writer());
var content_type: []const u8 = undefined;
switch (service_meta.aws_protocol) {
.rest_json_1 => content_type = "application/json",
.json_1_0 => content_type = "application/x-amz-json-1.0",
.json_1_1 => content_type = "application/x-amz-json-1.1",
else => unreachable,
}
return try self.callAws(request, service_meta, .{
.query = "",
.body = buffer.items,
.content_type = content_type,
.headers = &[_]awshttp.Header{.{ .name = "X-Amz-Target", .value = target }},
}, options);
}
// Call using query protocol. This is documented as an XML protocol, but // Call using query protocol. This is documented as an XML protocol, but
// throwing a JSON accept header seems to work. EC2Query is very simliar to // throwing a JSON accept header seems to work. EC2Query is very simliar to
// Query, so we'll handle both here. Realistically we probably don't effectively // Query, so we'll handle both here. Realistically we probably don't effectively
// handle lists and maps properly anyway yet, so we'll go for it and see // handle lists and maps properly anyway yet, so we'll go for it and see
// where it breaks. PRs and/or failing test cases appreciated. // where it breaks. PRs and/or failing test cases appreciated.
fn callQuery(self: Self, comptime request: anytype, service_meta: anytype, action: anytype, options: Options) !FullResponse(request) { fn callQuery(self: Self, comptime request: anytype, comptime service_meta: anytype, action: anytype, options: Options) !FullResponse(request) {
var buffer = std.ArrayList(u8).init(self.allocator); var buffer = std.ArrayList(u8).init(self.allocator);
defer buffer.deinit(); defer buffer.deinit();
const writer = buffer.writer(); const writer = buffer.writer();
@ -103,31 +150,28 @@ pub const Aws = struct {
else // EC2 else // EC2
try std.fmt.allocPrint(self.allocator, "{s}", .{buffer.items}); try std.fmt.allocPrint(self.allocator, "{s}", .{buffer.items});
defer self.allocator.free(body); defer self.allocator.free(body);
return try self.callAws(request, service_meta, .{
.query = query,
.body = body,
.content_type = "application/x-www-form-urlencoded",
}, options);
}
fn callAws(self: Self, comptime request: anytype, comptime service_meta: anytype, aws_request: awshttp.HttpRequest, options: Options) !FullResponse(request) {
const FullR = FullResponse(request); const FullR = FullResponse(request);
const response = try self.aws_http.callApi( const response = try self.aws_http.callApi(
service_meta.endpoint_prefix, service_meta.endpoint_prefix,
.{ aws_request,
.body = body,
.query = query,
},
.{ .{
.region = options.region, .region = options.region,
.dualstack = options.dualstack, .dualstack = options.dualstack,
.sigv4_service_name = service_meta.sigv4_name, .sigv4_service_name = service_meta.sigv4_name,
}, },
); );
// TODO: Can response handling be reused?
defer response.deinit(); defer response.deinit();
// try self.reportTraffic("", aws_request, response, log.debug);
if (response.response_code != 200) { if (response.response_code != 200) {
log.err("call failed! return status: {d}", .{response.response_code}); try self.reportTraffic("Call Failed", aws_request, response, log.err);
log.err("Request Query:\n |{s}\n", .{query});
log.err("Request Body:\n |{s}\n", .{body});
log.err("Response Headers:\n", .{});
for (response.headers) |h|
log.err("\t{s}:{s}\n", .{ h.name, h.value });
log.err("Response Body:\n |{s}", .{response.body});
return error.HttpFailure; return error.HttpFailure;
} }
// EC2 ignores our accept type, but technically query protocol only // EC2 ignores our accept type, but technically query protocol only
@ -138,6 +182,10 @@ pub const Aws = struct {
if (std.mem.eql(u8, "Content-Type", h.name)) { if (std.mem.eql(u8, "Content-Type", h.name)) {
if (std.mem.startsWith(u8, h.value, "application/json")) { if (std.mem.startsWith(u8, h.value, "application/json")) {
isJson = true; isJson = true;
} else if (std.mem.startsWith(u8, h.value, "application/x-amz-json-1.0")) {
isJson = true;
} else if (std.mem.startsWith(u8, h.value, "application/x-amz-json-1.1")) {
isJson = true;
} else if (std.mem.startsWith(u8, h.value, "text/xml")) { } else if (std.mem.startsWith(u8, h.value, "text/xml")) {
isJson = false; isJson = false;
} else { } else {
@ -160,19 +208,53 @@ pub const Aws = struct {
.allow_unknown_fields = true, // new option. Cannot yet handle non-struct fields though .allow_unknown_fields = true, // new option. Cannot yet handle non-struct fields though
.allow_missing_fields = false, // new option. Cannot yet handle non-struct fields though .allow_missing_fields = false, // new option. Cannot yet handle non-struct fields though
}; };
const SResponse = ServerResponse(request);
// const SResponse = ServerResponse(request);
const SResponse = if (service_meta.aws_protocol != .query and service_meta.aws_protocol != .ec2_query)
Response(request)
else
ServerResponse(request);
const parsed_response = json.parse(SResponse, &stream, parser_options) catch |e| { const parsed_response = json.parse(SResponse, &stream, parser_options) catch |e| {
log.err( log.err(
\\Call successful, but unexpected response from service. \\Call successful, but unexpected response from service.
\\This could be the result of a bug or a stale set of code generated \\This could be the result of a bug or a stale set of code generated
\\service models. Response from server: \\service models.
\\
\\Model Type: {s}
\\
\\Response from server:
\\ \\
\\{s} \\{s}
\\ \\
, .{response.body}); , .{ SResponse, response.body });
return e; return e;
}; };
if (service_meta.aws_protocol != .query and service_meta.aws_protocol != .ec2_query) {
var request_id: []u8 = undefined;
var found = false;
for (response.headers) |h| {
if (std.ascii.eqlIgnoreCase(h.name, "X-Amzn-RequestId")) {
found = true;
request_id = try std.fmt.allocPrint(self.allocator, "{s}", .{h.value}); // will be freed in FullR.deinit()
}
}
if (!found) {
try self.reportTraffic("Request ID not found", aws_request, response, log.err);
return error.RequestIdNotFound;
}
return FullR{
.response = parsed_response,
.response_metadata = .{
.request_id = request_id,
},
.parser_options = parser_options,
.raw_parsed = .{ .raw = parsed_response },
};
}
// Grab the first (and only) object from the server. Server shape expected to be: // Grab the first (and only) object from the server. Server shape expected to be:
// { ActionResponse: {ActionResult: {...}, ResponseMetadata: {...} } } // { ActionResponse: {ActionResult: {...}, ResponseMetadata: {...} } }
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -189,9 +271,36 @@ pub const Aws = struct {
.request_id = real_response.ResponseMetadata.RequestId, .request_id = real_response.ResponseMetadata.RequestId,
}, },
.parser_options = parser_options, .parser_options = parser_options,
.raw_parsed = parsed_response, .raw_parsed = .{ .server = parsed_response },
}; };
} }
fn reportTraffic(self: Self, info: []const u8, request: awshttp.HttpRequest, response: awshttp.HttpResult, comptime reporter: fn (comptime []const u8, anytype) void) !void {
var msg = std.ArrayList(u8).init(self.allocator);
defer msg.deinit();
const writer = msg.writer();
try writer.print("{s}\n\n", .{info});
try writer.print("Return status: {d}\n\n", .{response.response_code});
if (request.query.len > 0) try writer.print("Request Query:\n \t{s}\n", .{request.query});
_ = try writer.write("Unique Request Headers:\n");
if (request.headers.len > 0) {
for (request.headers) |h|
try writer.print("\t{s}: {s}\n", .{ h.name, h.value });
}
try writer.print("\tContent-Type: {s}\n\n", .{request.content_type});
_ = try writer.write("Request Body:\n");
try writer.print("-------------\n{s}\n", .{request.body});
_ = try writer.write("-------------\n");
_ = try writer.write("Response Headers:\n");
for (response.headers) |h|
try writer.print("\t{s}: {s}\n", .{ h.name, h.value });
_ = try writer.write("Response Body:\n");
try writer.print("--------------\n{s}\n", .{response.body});
_ = try writer.write("--------------\n");
reporter("{s}\n", .{msg.items});
}
}; };
fn ServerResponse(comptime request: anytype) type { fn ServerResponse(comptime request: anytype) type {
@ -250,11 +359,20 @@ fn FullResponse(comptime request: anytype) type {
request_id: []u8, request_id: []u8,
}, },
parser_options: json.ParseOptions, parser_options: json.ParseOptions,
raw_parsed: ServerResponse(request), raw_parsed: union(enum) {
server: ServerResponse(request),
raw: Response(request),
},
// raw_parsed: ServerResponse(request),
const Self = @This(); const Self = @This();
pub fn deinit(self: Self) void { pub fn deinit(self: Self) void {
json.parseFree(ServerResponse(request), self.raw_parsed, self.parser_options); switch (self.raw_parsed) {
.server => json.parseFree(ServerResponse(request), self.raw_parsed.server, self.parser_options),
.raw => json.parseFree(Response(request), self.raw_parsed.raw, self.parser_options),
}
self.parser_options.allocator.?.free(self.response_metadata.request_id);
} }
}; };
} }
@ -265,6 +383,9 @@ fn queryFieldTransformer(field_name: []const u8, encoding_options: url.EncodingO
return try case.snakeToPascal(encoding_options.allocator.?, field_name); return try case.snakeToPascal(encoding_options.allocator.?, field_name);
} }
fn pascalTransformer(field_name: []const u8, options: json.StringifyOptions) anyerror![]const u8 {
return try case.snakeToPascal(options.allocator.?, field_name);
}
// Use for debugging json responses of specific requests // Use for debugging json responses of specific requests
// test "dummy request" { // test "dummy request" {
// const allocator = std.testing.allocator; // const allocator = std.testing.allocator;

View File

@ -74,14 +74,15 @@ const SigningOptions = struct {
service: []const u8, service: []const u8,
}; };
const HttpRequest = struct { pub const HttpRequest = struct {
path: []const u8 = "/", path: []const u8 = "/",
query: []const u8 = "", query: []const u8 = "",
body: []const u8 = "", body: []const u8 = "",
method: []const u8 = "POST", method: []const u8 = "POST",
// headers: []Header = .{}, content_type: []const u8 = "application/json", // Can we get away with this?
headers: []Header = &[_]Header{},
}; };
const HttpResult = struct { pub const HttpResult = struct {
response_code: u16, // actually 3 digits can fit in u10 response_code: u16, // actually 3 digits can fit in u10
body: []const u8, body: []const u8,
headers: []Header, headers: []Header,
@ -99,7 +100,7 @@ const HttpResult = struct {
} }
}; };
const Header = struct { pub const Header = struct {
name: []const u8, name: []const u8,
value: []const u8, value: []const u8,
}; };
@ -320,7 +321,7 @@ pub const AwsHttp = struct {
var tls_connection_options: ?*c.aws_tls_connection_options = null; var tls_connection_options: ?*c.aws_tls_connection_options = null;
const host = try self.allocator.dupeZ(u8, endpoint.host); const host = try self.allocator.dupeZ(u8, endpoint.host);
defer self.allocator.free(host); defer self.allocator.free(host);
try self.addHeaders(http_request.?, host, request.body); try self.addHeaders(http_request.?, host, request.body, request.content_type, request.headers);
if (std.mem.eql(u8, endpoint.scheme, "https")) { if (std.mem.eql(u8, endpoint.scheme, "https")) {
// TODO: Figure out why this needs to be inline vs function call // TODO: Figure out why this needs to be inline vs function call
// tls_connection_options = try self.setupTls(host); // tls_connection_options = try self.setupTls(host);
@ -631,7 +632,7 @@ pub const AwsHttp = struct {
async_result.sync.store(false, .SeqCst); async_result.sync.store(false, .SeqCst);
} }
fn addHeaders(self: Self, request: *c.aws_http_message, host: []const u8, body: []const u8) !void { fn addHeaders(self: Self, request: *c.aws_http_message, host: []const u8, body: []const u8, content_type: []const u8, additional_headers: []Header) !void {
const accept_header = c.aws_http_header{ const accept_header = c.aws_http_header{
.name = c.aws_byte_cursor_from_c_str("Accept"), .name = c.aws_byte_cursor_from_c_str("Accept"),
.value = c.aws_byte_cursor_from_c_str("application/json"), .value = c.aws_byte_cursor_from_c_str("application/json"),
@ -662,22 +663,37 @@ pub const AwsHttp = struct {
// const accept_encoding_header = c.aws_http_header{ // const accept_encoding_header = c.aws_http_header{
// .name = c.aws_byte_cursor_from_c_str("Accept-Encoding"), // .name = c.aws_byte_cursor_from_c_str("Accept-Encoding"),
// .value = c.aws_byte_cursor_from_c_str("identity"), // .value = c.aws_byte_cursor_from_c_str("identity"),
// .compression = .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE, // .compression = 0, //.AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
// }; // };
// if (c.aws_http_message_add_header(request, accept_encoding_header) != c.AWS_OP_SUCCESS) // if (c.aws_http_message_add_header(request, accept_encoding_header) != c.AWS_OP_SUCCESS)
// return AwsError.AddHeaderError; // return AwsError.AddHeaderError;
// AWS *does* seem to care about Content-Type. I don't think this header // AWS *does* seem to care about Content-Type. I don't think this header
// will hold for all APIs // will hold for all APIs
// TODO: Work out Content-type const c_type = try std.fmt.allocPrintZ(self.allocator, "{s}", .{content_type});
defer self.allocator.free(c_type);
const content_type_header = c.aws_http_header{ const content_type_header = c.aws_http_header{
.name = c.aws_byte_cursor_from_c_str("Content-Type"), .name = c.aws_byte_cursor_from_c_str("Content-Type"),
.value = c.aws_byte_cursor_from_c_str("application/x-www-form-urlencoded"), .value = c.aws_byte_cursor_from_c_str(c_type),
.compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE, .compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
}; };
if (c.aws_http_message_add_header(request, content_type_header) != c.AWS_OP_SUCCESS) if (c.aws_http_message_add_header(request, content_type_header) != c.AWS_OP_SUCCESS)
return AwsError.AddHeaderError; return AwsError.AddHeaderError;
for (additional_headers) |h| {
const name = try std.fmt.allocPrintZ(self.allocator, "{s}", .{h.name});
defer self.allocator.free(name);
const value = try std.fmt.allocPrintZ(self.allocator, "{s}", .{h.value});
defer self.allocator.free(value);
const c_header = c.aws_http_header{
.name = c.aws_byte_cursor_from_c_str(name),
.value = c.aws_byte_cursor_from_c_str(value),
.compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
};
if (c.aws_http_message_add_header(request, c_header) != c.AWS_OP_SUCCESS)
return AwsError.AddHeaderError;
}
if (body.len > 0) { if (body.len > 0) {
const len = try std.fmt.allocPrintZ(self.allocator, "{d}", .{body.len}); const len = try std.fmt.allocPrintZ(self.allocator, "{d}", .{body.len});
// This defer seems to work ok, but I'm a bit concerned about why // This defer seems to work ok, but I'm a bit concerned about why

View File

@ -2656,6 +2656,15 @@ pub const StringifyOptions = struct {
string: StringOptions = StringOptions{ .String = .{} }, string: StringOptions = StringOptions{ .String = .{} },
nameTransform: fn ([]const u8, StringifyOptions) anyerror![]const u8 = nullTransform,
/// Not used by stringify - might be needed for your name transformer
allocator: ?*std.mem.Allocator = null,
fn nullTransform(name: []const u8, _: StringifyOptions) ![]const u8 {
return name;
}
/// Should []u8 be serialised as a string? or an array? /// Should []u8 be serialised as a string? or an array?
pub const StringOptions = union(enum) { pub const StringOptions = union(enum) {
Array, Array,
@ -2699,7 +2708,7 @@ pub fn stringify(
value: anytype, value: anytype,
options: StringifyOptions, options: StringifyOptions,
out_stream: anytype, out_stream: anytype,
) @TypeOf(out_stream).Error!void { ) !void {
const T = @TypeOf(value); const T = @TypeOf(value);
switch (@typeInfo(T)) { switch (@typeInfo(T)) {
.Float, .ComptimeFloat => { .Float, .ComptimeFloat => {
@ -2768,7 +2777,10 @@ pub fn stringify(
try out_stream.writeByte('\n'); try out_stream.writeByte('\n');
try child_whitespace.outputIndent(out_stream); try child_whitespace.outputIndent(out_stream);
} }
try stringify(Field.name, options, out_stream); const name = child_options.nameTransform(Field.name, options) catch {
return error.NameTransformationError;
};
try stringify(name, options, out_stream);
try out_stream.writeByte(':'); try out_stream.writeByte(':');
if (child_options.whitespace) |child_whitespace| { if (child_options.whitespace) |child_whitespace| {
if (child_whitespace.separator) { if (child_whitespace.separator) {

View File

@ -27,6 +27,8 @@ const Tests = enum {
query_no_input, query_no_input,
query_with_input, query_with_input,
ec2_query_no_input, ec2_query_no_input,
json_1_0_query_with_input,
json_1_0_query_no_input,
}; };
pub fn main() anyerror!void { pub fn main() anyerror!void {
@ -65,7 +67,7 @@ pub fn main() anyerror!void {
var client = aws.Aws.init(allocator); var client = aws.Aws.init(allocator);
defer client.deinit(); defer client.deinit();
const services = aws.Services(.{ .sts, .ec2 }){}; const services = aws.Services(.{ .sts, .ec2, .dynamo_db }){};
for (tests.items) |t| { for (tests.items) |t| {
std.log.info("===== Start Test: {s} =====", .{@tagName(t)}); std.log.info("===== Start Test: {s} =====", .{@tagName(t)});
@ -84,12 +86,27 @@ pub fn main() anyerror!void {
.duration_seconds = 900, .duration_seconds = 900,
}, options); }, options);
defer access.deinit(); defer access.deinit();
std.log.info("access key: {s}", .{access.response.credentials.access_key_id}); std.log.info("access key: {s}", .{access.response.credentials.?.access_key_id});
},
.json_1_0_query_with_input => {
// TODO: Find test without sensitive info
const tables = try client.call(services.dynamo_db.list_tables.Request{
.limit = 1,
}, options);
defer tables.deinit();
std.log.info("request id: {s}", .{tables.response_metadata.request_id});
std.log.info("account has tables: {b}", .{tables.response.table_names.?.len > 0});
},
.json_1_0_query_no_input => {
const limits = try client.call(services.dynamo_db.describe_limits.Request{}, options);
defer limits.deinit();
std.log.info("account read capacity limit: {d}", .{limits.response.account_max_read_capacity_units});
}, },
.ec2_query_no_input => { .ec2_query_no_input => {
const instances = try client.call(services.ec2.describe_instances.Request{}, options); std.log.err("EC2 Test disabled due to compiler bug", .{});
defer instances.deinit(); // const instances = try client.call(services.ec2.describe_instances.Request{}, options);
std.log.info("reservation count: {d}", .{instances.response.reservations.len}); // defer instances.deinit();
// std.log.info("reservation count: {d}", .{instances.response.reservations.len});
}, },
} }
std.log.info("===== End Test: {s} =====\n", .{@tagName(t)}); std.log.info("===== End Test: {s} =====\n", .{@tagName(t)});