Compare commits

...

5 Commits

Author SHA1 Message Date
ab1060b145
s3 listallbuckets/putobject working
Some checks failed
continuous-integration/drone/push Build is failing
2022-05-31 18:47:59 -07:00
b9aaffb08d
add s3 exception for virtual host addressing 2022-05-29 14:12:01 -07:00
c531164cfa
cleanup and refactoring. Addressed TODO related to comptime eval 2022-05-29 12:49:02 -07:00
03f7228662
fix rest_xml_no_input integration test 2022-05-29 11:16:55 -07:00
08c2ed0c07
move json detection to separate function 2022-05-29 11:13:05 -07:00
4 changed files with 527 additions and 209 deletions

View File

@ -102,10 +102,9 @@ pub fn Request(comptime action: anytype) type {
.method = Action.http_config.method,
.content_type = "application/json",
.path = Action.http_config.uri,
.headers = try headersFor(options.client.allocator, request),
};
if (Self.service_meta.aws_protocol == .rest_xml) {
aws_request.content_type = "application/xml";
}
defer freeHeadersFor(options.client.allocator, request, aws_request.headers);
log.debug("Rest method: '{s}'", .{aws_request.method});
log.debug("Rest success code: '{d}'", .{Action.http_config.success_code});
@ -113,7 +112,25 @@ pub fn Request(comptime action: anytype) type {
aws_request.path = try buildPath(options.client.allocator, Action.http_config.uri, ActionRequest, request);
defer options.client.allocator.free(aws_request.path);
log.debug("Rest processed uri: '{s}'", .{aws_request.path});
// TODO: Make sure this doesn't get escaped here for S3
aws_request.query = try buildQuery(options.client.allocator, request);
if (aws_request.query.len == 0) {
if (std.mem.indexOf(u8, aws_request.path, "?")) |inx| {
log.debug("Detected query in path. Adjusting", .{});
// Sometimes (looking at you, s3), the uri in the model
// has a query string shoved into it. If that's the case,
// we need to parse and straighten this all out
const orig_path = aws_request.path; // save as we'll need to dealloc
const orig_query = aws_request.query; // save as we'll need to dealloc
// We need to chop the query off because apparently the other one whacks the
// query string. TODO: RTFM on zig to figure out why
aws_request.query = try options.client.allocator.dupe(u8, aws_request.path[inx..]);
aws_request.path = try options.client.allocator.dupe(u8, aws_request.path[0..inx]);
log.debug("inx: {d}\n\tnew path: {s}\n\tnew query: {s}", .{ inx, aws_request.path, aws_request.query });
options.client.allocator.free(orig_path);
options.client.allocator.free(orig_query);
}
}
log.debug("Rest query: '{s}'", .{aws_request.query});
defer options.client.allocator.free(aws_request.query);
// We don't know if we need a body...guessing here, this should cover most
@ -126,12 +143,20 @@ pub fn Request(comptime action: anytype) type {
try json.stringify(request, .{ .whitespace = .{} }, buffer.writer());
}
}
aws_request.body = buffer.items;
if (Self.service_meta.aws_protocol == .rest_xml) {
if (std.mem.eql(u8, "PUT", aws_request.method) or std.mem.eql(u8, "POST", aws_request.method)) {
return error.NotImplemented;
if (@hasDecl(ActionRequest, "http_payload")) {
// We will assign the body to the value of the field denoted by
// the http_payload declaration on the request type.
// Hopefully these will always be ?[]const u8, otherwise
// we should see a compile error on this line
aws_request.body = @field(request, ActionRequest.http_payload).?;
} else {
return error.NotImplemented;
}
}
}
aws_request.body = buffer.items;
return try Self.callAws(aws_request, .{
.success_http_code = Action.http_config.success_code,
@ -237,44 +262,63 @@ pub fn Request(comptime action: anytype) type {
try reportTraffic(options.client.allocator, "Call Failed", aws_request, response, log.err);
return error.HttpFailure;
}
// EC2 ignores our accept type, but technically query protocol only
// returns XML as well. So, we'll ignore the protocol here and just
// look at the return type
var isJson: bool = undefined;
for (response.headers) |h| {
if (std.ascii.eqlIgnoreCase("Content-Type", h.name)) {
if (std.mem.startsWith(u8, h.value, "application/json")) {
isJson = true;
} else if (std.mem.startsWith(u8, h.value, "application/x-amz-json-1.0")) {
isJson = true;
} else if (std.mem.startsWith(u8, h.value, "application/x-amz-json-1.1")) {
isJson = true;
} else if (std.mem.startsWith(u8, h.value, "text/xml")) {
isJson = false;
} else if (std.mem.startsWith(u8, h.value, "application/xml")) {
isJson = false;
} else {
log.err("Unexpected content type: {s}", .{h.value});
return error.UnexpectedContentType;
var fullResponse = try getFullResponseFromBody(aws_request, response, options);
// Fill in any fields that require a header. Note doing it post-facto
// assumes all response header fields are optional, which may be incorrect
if (@hasDecl(action.Response, "http_header")) {
inline for (std.meta.fields(@TypeOf(action.Response.http_header))) |f| {
const header_name = @field(action.Response.http_header, f.name);
for (response.headers) |h| {
if (std.ascii.eqlIgnoreCase(h.name, header_name)) {
log.debug("Response header {s} configured for field. Setting {s} = {s}", .{ h.name, f.name, h.value });
const field_type = @TypeOf(@field(fullResponse.response, f.name));
// TODO: Fix this. We need to make this much more robust
// The deal is we have to do the dupe though
// Also, this is a memory leak atm
if (field_type == ?[]const u8) {
@field(fullResponse.response, f.name) = try options.client.allocator.dupe(u8, (try coerceFromString(field_type, h.value)).?);
} else {
@field(fullResponse.response, f.name) = try coerceFromString(field_type, h.value);
}
break;
}
}
break;
}
}
return fullResponse;
}
if (!isJson) return try xmlReturn(options, response);
fn getFullResponseFromBody(aws_request: awshttp.HttpRequest, response: awshttp.HttpResult, options: Options) !FullResponseType {
// First, we need to determine if we care about a response at all
// If the expected result has no fields, there's no sense in
// doing any more work. Let's bail early
var expected_body_field_len = std.meta.fields(action.Response).len;
if (@hasDecl(action.Response, "http_header"))
expected_body_field_len -= std.meta.fields(@TypeOf(action.Response.http_header)).len;
const SResponse = if (Self.service_meta.aws_protocol != .query)
action.Response
else
ServerResponse(action);
const NullType: type = u0; // This is a small hack, yes...
const SRawResponse = if (Self.service_meta.aws_protocol != .query and
std.meta.fields(SResponse).len == 1)
std.meta.fields(SResponse)[0].field_type
else
NullType;
// We don't care about the body if there are no fields we expect there...
if (std.meta.fields(action.Response).len == 0 or expected_body_field_len == 0) {
// ^^ This should be redundant, but is necessary. I suspect it's a compiler quirk
//
// Do we care if an unexpected body comes in?
return FullResponseType{
.response = .{},
.response_metadata = .{
.request_id = try requestIdFromHeaders(aws_request, response, options),
},
.parser_options = .{ .json = .{} },
.raw_parsed = .{ .raw = .{} },
.allocator = options.client.allocator,
};
}
const isJson = try isJsonResponse(response.headers);
if (!isJson) return try xmlReturn(aws_request, options, response);
return try jsonReturn(aws_request, options, response);
}
fn jsonReturn(aws_request: awshttp.HttpRequest, options: Options, response: awshttp.HttpResult) !FullResponseType {
const parser_options = json.ParseOptions{
.allocator = options.client.allocator,
.allow_camel_case_conversion = true, // new option
@ -282,87 +326,47 @@ pub fn Request(comptime action: anytype) type {
.allow_unknown_fields = true, // new option. Cannot yet handle non-struct fields though
.allow_missing_fields = false, // new option. Cannot yet handle non-struct fields though
};
if (std.meta.fields(SResponse).len == 0) // We don't care about the body if there are no fields
// Do we care if an unexpected body comes in?
// Get our possible response types. There are 3:
//
// 1. A result wrapped with metadata like request ID. This is ServerResponse(action)
// 2. A "Normal" result, which starts with { "MyActionResponse": {...} }
// 3. A "Raw" result, which is simply {...} without decoration
const response_types = jsonResponseTypesForAction();
// Parse the server data. Function will determine which of the three
// responses we have, and do the right thing
const parsed_data = try parseJsonData(response_types, response.body, options, parser_options);
defer parsed_data.deinit();
const parsed_response = parsed_data.parsed_response_ptr.*;
if (response_types.NormalResponse == ServerResponse(action)) {
// This should only apply to query results, but we're in comptime
// type land, so the only thing that matters is whether our
// response is a ServerResponse
//
// Grab the first (and only) object from the data. Server shape expected to be:
// { ActionResponse: {ActionResult: {...}, ResponseMetadata: {...} } }
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
// Next line of code pulls this portion
//
//
// And the response property below will pull whatever is the ActionResult object
// We can grab index [0] as structs are guaranteed by zig to be returned in the order
// declared, and we're declaring in that order in ServerResponse().
const real_response = @field(parsed_response, @typeInfo(response_types.NormalResponse).Struct.fields[0].name);
return FullResponseType{
.response = .{},
.response = @field(real_response, @typeInfo(@TypeOf(real_response)).Struct.fields[0].name),
.response_metadata = .{
.request_id = try requestIdFromHeaders(aws_request, response, options),
.request_id = try options.client.allocator.dupe(u8, real_response.ResponseMetadata.RequestId),
},
.parser_options = .{ .json = parser_options },
.raw_parsed = .{ .raw = .{} },
.raw_parsed = .{ .server = parsed_response },
.allocator = options.client.allocator,
};
var stream = json.TokenStream.init(response.body);
const start = std.mem.indexOf(u8, response.body, "\"") orelse 0; // Should never be 0
if (start == 0) log.warn("Response body missing json key?!", .{});
var end = std.mem.indexOf(u8, response.body[start + 1 ..], "\"") orelse 0;
if (end == 0) log.warn("Response body only has one double quote?!", .{});
end = end + start + 1;
const key = response.body[start + 1 .. end];
log.debug("First json key: {s}", .{key});
const foundNormalJsonResponse = std.mem.eql(u8, key, action.action_name ++ "Response");
const parsed_response_ptr = blk: {
if (SRawResponse == NullType or foundNormalJsonResponse)
break :blk &(json.parse(SResponse, &stream, parser_options) catch |e| {
log.err(
\\Call successful, but unexpected response from service.
\\This could be the result of a bug or a stale set of code generated
\\service models.
\\
\\Model Type: {s}
\\
\\Response from server:
\\
\\{s}
\\
, .{ SResponse, response.body });
return e;
});
log.debug("Appears server has provided a raw response", .{});
const ptr = try options.client.allocator.create(SResponse);
@field(ptr.*, std.meta.fields(SResponse)[0].name) =
json.parse(SRawResponse, &stream, parser_options) catch |e| {
log.err(
\\Call successful, but unexpected response from service.
\\This could be the result of a bug or a stale set of code generated
\\service models.
\\
\\Model Type: {s}
\\
\\Response from server:
\\
\\{s}
\\
, .{ SResponse, response.body });
return e;
};
break :blk ptr;
};
// This feels like it should result in a use after free, but it
// seems to be working?
defer if (!(SRawResponse == NullType or foundNormalJsonResponse))
options.client.allocator.destroy(parsed_response_ptr);
const parsed_response = parsed_response_ptr.*;
// TODO: Figure out this hack
// the code setting the response about 10 lines down will trigger
// an error because the first field may not be a struct when
// XML processing is happening above, which we only know at runtime.
//
// We could simply force .ec2_query and .rest_xml above rather than
// isJson, but it would be nice to automatically support json if
// these services start returning that like we'd like them to.
//
// Otherwise, the compiler gets down here thinking this will be
// processed. If it is, then we have a problem when the field name
// may not be a struct.
if (Self.service_meta.aws_protocol != .query or Self.service_meta.aws_protocol == .ec2_query) {
} else {
// Conditions 2 or 3 (no wrapping)
return FullResponseType{
.response = parsed_response,
.response_metadata = .{
@ -370,30 +374,12 @@ pub fn Request(comptime action: anytype) type {
},
.parser_options = .{ .json = parser_options },
.raw_parsed = .{ .raw = parsed_response },
.allocator = options.client.allocator,
};
}
// Grab the first (and only) object from the server. Server shape expected to be:
// { ActionResponse: {ActionResult: {...}, ResponseMetadata: {...} } }
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
// Next line of code pulls this portion
//
//
// And the response property below will pull whatever is the ActionResult object
// We can grab index [0] as structs are guaranteed by zig to be returned in the order
// declared, and we're declaring in that order in ServerResponse().
const real_response = @field(parsed_response, @typeInfo(SResponse).Struct.fields[0].name);
return FullResponseType{
.response = @field(real_response, @typeInfo(@TypeOf(real_response)).Struct.fields[0].name),
.response_metadata = .{
.request_id = try options.client.allocator.dupe(u8, real_response.ResponseMetadata.RequestId),
},
.parser_options = .{ .json = parser_options },
.raw_parsed = .{ .server = parsed_response },
};
}
fn xmlReturn(options: Options, result: awshttp.HttpResult) !FullResponseType {
fn xmlReturn(request: awshttp.HttpRequest, options: Options, result: awshttp.HttpResult) !FullResponseType {
// Server shape be all like:
//
// <?xml version="1.0" encoding="UTF-8"?>
@ -420,7 +406,9 @@ pub fn Request(comptime action: anytype) type {
const xml_options = xml_shaper.ParseOptions{ .allocator = options.client.allocator };
var body: []const u8 = result.body;
var free_body = false;
if (std.mem.lastIndexOf(u8, result.body[result.body.len - 20 ..], "Response>") == null) {
if (std.mem.lastIndexOf(u8, result.body[result.body.len - 20 ..], "Response>") == null and
std.mem.lastIndexOf(u8, result.body[result.body.len - 20 ..], "Result>") == null)
{
free_body = true;
// chop the "<?xml version="1.0"?>" from the front
const start = if (std.mem.indexOf(u8, result.body, "?>")) |i| i else 0;
@ -429,42 +417,13 @@ pub fn Request(comptime action: anytype) type {
defer if (free_body) options.client.allocator.free(body);
const parsed = try xml_shaper.parse(action.Response, body, xml_options);
errdefer parsed.deinit();
var free_rid = false;
// This needs to get into FullResponseType somehow: defer parsed.deinit();
const request_id = blk: {
if (parsed.document.root.getCharData("requestId")) |elem|
break :blk elem;
var rid: ?[]const u8 = null;
// This "thing" is called:
// * Host ID
// * Extended Request ID
// * Request ID 2
//
// I suspect it identifies the S3 frontend server and they are
// trying to obscure that fact. But several SDKs go with host id,
// so we'll use that
var host_id: ?[]const u8 = null;
for (result.headers) |header| {
if (std.ascii.eqlIgnoreCase(header.name, "x-amzn-requestid")) { // CloudFront
rid = header.value;
}
if (std.ascii.eqlIgnoreCase(header.name, "x-amz-request-id")) { // S3
rid = header.value;
}
if (std.ascii.eqlIgnoreCase(header.name, "x-amz-id-2")) { // S3
host_id = header.value;
}
}
if (rid) |r| {
if (host_id) |h| {
free_rid = true;
break :blk try std.fmt.allocPrint(options.client.allocator, "{s}, host_id: {s}", .{ r, h });
}
break :blk r;
}
return error.RequestIdNotFound;
break :blk try options.client.allocator.dupe(u8, elem);
break :blk try requestIdFromHeaders(request, result, options);
};
defer if (free_rid) options.client.allocator.free(request_id);
defer options.client.allocator.free(request_id);
return FullResponseType{
.response = parsed.parsed_value,
@ -473,26 +432,264 @@ pub fn Request(comptime action: anytype) type {
},
.parser_options = .{ .xml = xml_options },
.raw_parsed = .{ .xml = parsed },
.allocator = options.client.allocator,
};
}
const ServerResponseTypes = struct {
NormalResponse: type,
RawResponse: type,
isRawPossible: bool,
};
fn jsonResponseTypesForAction() ServerResponseTypes {
// The shape of the data coming back from the server will
// vary quite a bit based on the exact protocol being used,
// age of the service, etc. Before we parse the data, we need
// to understand what we're expecting. Because types are handled
// at comptime, we are restricted in how we handle them. They must
// be constants, so first we'll set up an unreasonable "NullType"
// we can use in our conditionals below
const NullType: type = u0;
// Next, we'll provide a "SResponse", or Server Response, for a
// "normal" return that modern AWS services provide, that includes
// meta information and a result inside it. This could be the
// response as described in our models, or it could be a wrapped
// response that's only applicable to aws_query smithy protocol
// services
const SResponse = if (Self.service_meta.aws_protocol != .query)
action.Response
else
ServerResponse(action);
// Now, we want to also establish a "SRawResponse", or a raw
// response. Some older services (like CloudFront) respect
// that we desire application/json data even though they're
// considered "rest_xml" protocol. However, they don't wrap
// anything, so we actually want to parse the only field in
// the response structure. In this case we have to manually
// create the type, parse, then set the field. For example:
//
// Response: type = struct {
// key_group_list: ?struct {...
//
// Normal responses would start parsing on the Response type,
// but raw responses need to create an instance of the response
// type, and parse "key_group_list" directly before attaching.
//
// Because we cannot change types at runtime, we need to create
// both a SResponse and SRawResponse type in anticipation of either
// scenario, then parse as appropriate later
const SRawResponse = if (Self.service_meta.aws_protocol != .query and
std.meta.fields(action.Response).len == 1)
std.meta.fields(action.Response)[0].field_type
else
NullType;
return .{
.NormalResponse = SResponse,
.RawResponse = SRawResponse,
.isRawPossible = SRawResponse != NullType,
};
}
fn ParsedJsonData(comptime T: type) type {
return struct {
raw_response_parsed: bool,
parsed_response_ptr: *T,
allocator: std.mem.Allocator,
const MySelf = @This();
pub fn deinit(self: MySelf) void {
// This feels like it should result in a use after free, but it
// seems to be working?
if (self.raw_response_parsed)
self.allocator.destroy(self.parsed_response_ptr);
}
};
}
fn parseJsonData(comptime response_types: ServerResponseTypes, data: []const u8, options: Options, parser_options: json.ParseOptions) !ParsedJsonData(response_types.NormalResponse) {
// Now it's time to start looking at the actual data. Job 1 will
// be to figure out if this is a raw response or wrapped
// Extract the first json key
const key = firstJsonKey(data);
const found_normal_json_response = std.mem.eql(u8, key, action.action_name ++ "Response") or
std.mem.eql(u8, key, action.action_name ++ "Result");
var raw_response_parsed = false;
var stream = json.TokenStream.init(data);
const parsed_response_ptr = blk: {
if (!response_types.isRawPossible or found_normal_json_response)
break :blk &(json.parse(response_types.NormalResponse, &stream, parser_options) catch |e| {
log.err(
\\Call successful, but unexpected response from service.
\\This could be the result of a bug or a stale set of code generated
\\service models.
\\
\\Model Type: {s}
\\
\\Response from server:
\\
\\{s}
\\
, .{ action.Response, data });
return e;
});
log.debug("Appears server has provided a raw response", .{});
raw_response_parsed = true;
const ptr = try options.client.allocator.create(response_types.NormalResponse);
@field(ptr.*, std.meta.fields(action.Response)[0].name) =
json.parse(response_types.RawResponse, &stream, parser_options) catch |e| {
log.err(
\\Call successful, but unexpected response from service.
\\This could be the result of a bug or a stale set of code generated
\\service models.
\\
\\Model Type: {s}
\\
\\Response from server:
\\
\\{s}
\\
, .{ action.Response, data });
return e;
};
break :blk ptr;
};
return ParsedJsonData(response_types.NormalResponse){
.raw_response_parsed = raw_response_parsed,
.parsed_response_ptr = parsed_response_ptr,
.allocator = options.client.allocator,
};
}
};
}
/// Get request ID from headers. Caller responsible for freeing memory
fn requestIdFromHeaders(request: awshttp.HttpRequest, response: awshttp.HttpResult, options: Options) ![]u8 {
var request_id: []u8 = undefined;
var found = false;
for (response.headers) |h| {
if (std.ascii.eqlIgnoreCase(h.name, "X-Amzn-RequestId")) {
found = true;
request_id = try std.fmt.allocPrint(options.client.allocator, "{s}", .{h.value}); // will be freed in FullR.deinit()
fn coerceFromString(comptime T: type, val: []const u8) !T {
if (@typeInfo(T) == .Optional) return try coerceFromString(@typeInfo(T).Optional.child, val);
// TODO: This is terrible...fix it
switch (T) {
bool => return std.ascii.eqlIgnoreCase(val, "true"),
i64 => return try std.fmt.parseInt(T, val, 10),
else => return val,
}
}
fn generalAllocPrint(allocator: std.mem.Allocator, val: anytype) !?[]const u8 {
switch (@typeInfo(@TypeOf(val))) {
.Optional => if (val) |v| return generalAllocPrint(allocator, v) else return null,
.Array, .Pointer => return try std.fmt.allocPrint(allocator, "{s}", .{val}),
else => return try std.fmt.allocPrint(allocator, "{any}", .{val}),
}
}
fn headersFor(allocator: std.mem.Allocator, request: anytype) ![]awshttp.Header {
log.debug("Checking for headers to include for type {s}", .{@TypeOf(request)});
if (!@hasDecl(@TypeOf(request), "http_header")) return &[_]awshttp.Header{};
const http_header = @TypeOf(request).http_header;
const fields = std.meta.fields(@TypeOf(http_header));
log.debug("Found {d} possible custom headers", .{fields.len});
// It would be awesome to have a fixed array, but we can't because
// it depends on a runtime value based on whether these variables are null
var headers = try std.ArrayList(awshttp.Header).initCapacity(allocator, fields.len);
inline for (fields) |f| {
// Header name = value of field
// Header value = value of the field of the request based on field name
const val = @field(request, f.name);
const final_val: ?[]const u8 = try generalAllocPrint(allocator, val);
if (final_val) |v| {
headers.appendAssumeCapacity(.{
.name = @field(http_header, f.name),
.value = v,
});
}
}
if (!found) {
try reportTraffic(options.client.allocator, "Request ID not found", request, response, log.err);
return error.RequestIdNotFound;
return headers.toOwnedSlice();
}
fn freeHeadersFor(allocator: std.mem.Allocator, request: anytype, headers: []awshttp.Header) void {
if (!@hasDecl(@TypeOf(request), "http_header")) return;
const http_header = @TypeOf(request).http_header;
const fields = std.meta.fields(@TypeOf(http_header));
inline for (fields) |f| {
const header_name = @field(http_header, f.name);
for (headers) |h| {
if (std.mem.eql(u8, h.name, header_name)) {
allocator.free(h.value);
break;
}
}
}
return request_id;
allocator.free(headers);
}
fn firstJsonKey(data: []const u8) []const u8 {
const start = std.mem.indexOf(u8, data, "\"") orelse 0; // Should never be 0
if (start == 0) log.warn("Response body missing json key?!", .{});
var end = std.mem.indexOf(u8, data[start + 1 ..], "\"") orelse 0;
if (end == 0) log.warn("Response body only has one double quote?!", .{});
end = end + start + 1;
const key = data[start + 1 .. end];
log.debug("First json key: {s}", .{key});
return key;
}
fn isJsonResponse(headers: []awshttp.Header) !bool {
// EC2 ignores our accept type, but technically query protocol only
// returns XML as well. So, we'll ignore the protocol here and just
// look at the return type
var isJson: ?bool = null;
for (headers) |h| {
if (std.ascii.eqlIgnoreCase("Content-Type", h.name)) {
if (std.mem.startsWith(u8, h.value, "application/json")) {
isJson = true;
} else if (std.mem.startsWith(u8, h.value, "application/x-amz-json-1.0")) {
isJson = true;
} else if (std.mem.startsWith(u8, h.value, "application/x-amz-json-1.1")) {
isJson = true;
} else if (std.mem.startsWith(u8, h.value, "text/xml")) {
isJson = false;
} else if (std.mem.startsWith(u8, h.value, "application/xml")) {
isJson = false;
} else {
log.err("Unexpected content type: {s}", .{h.value});
return error.UnexpectedContentType;
}
break;
}
}
if (isJson == null) return error.ContentTypeNotFound;
return isJson.?;
}
/// Get request ID from headers. Caller responsible for freeing memory
fn requestIdFromHeaders(request: awshttp.HttpRequest, response: awshttp.HttpResult, options: Options) ![]u8 {
var rid: ?[]const u8 = null;
// This "thing" is called:
// * Host ID
// * Extended Request ID
// * Request ID 2
//
// I suspect it identifies the S3 frontend server and they are
// trying to obscure that fact. But several SDKs go with host id,
// so we'll use that
var host_id: ?[]const u8 = null;
for (response.headers) |header| {
if (std.ascii.eqlIgnoreCase(header.name, "x-amzn-requestid")) // CloudFront
rid = header.value;
if (std.ascii.eqlIgnoreCase(header.name, "x-amz-request-id")) // S3
rid = header.value;
if (std.ascii.eqlIgnoreCase(header.name, "x-amz-id-2")) // S3
host_id = header.value;
}
if (rid) |r| {
if (host_id) |h|
return try std.fmt.allocPrint(options.client.allocator, "{s}, host_id: {s}", .{ r, h });
return try options.client.allocator.dupe(u8, r);
}
try reportTraffic(options.client.allocator, "Request ID not found", request, response, log.err);
return error.RequestIdNotFound;
}
fn ServerResponse(comptime action: anytype) type {
const T = action.Response;
@ -557,6 +754,7 @@ fn FullResponse(comptime action: anytype) type {
raw: action.Response,
xml: xml_shaper.Parsed(action.Response),
},
allocator: std.mem.Allocator,
const Self = @This();
pub fn deinit(self: Self) void {
@ -568,12 +766,21 @@ fn FullResponse(comptime action: anytype) type {
.xml => |xml| xml.deinit(),
}
var allocator: std.mem.Allocator = undefined;
switch (self.parser_options) {
.json => |j| allocator = j.allocator.?,
.xml => |x| allocator = x.allocator.?,
self.allocator.free(self.response_metadata.request_id);
const Response = @TypeOf(self.response);
if (@hasDecl(Response, "http_header")) {
inline for (std.meta.fields(@TypeOf(Response.http_header))) |f| {
const field_type = @TypeOf(@field(self.response, f.name));
// TODO: Fix this. We need to make this much more robust
// The deal is we have to do the dupe though
// Also, this is a memory leak atm
if (field_type == ?[]const u8) {
if (@field(self.response, f.name) != null) {
self.allocator.free(@field(self.response, f.name).?);
}
}
}
}
allocator.free(self.response_metadata.request_id);
}
};
}

View File

@ -56,11 +56,13 @@ const EndPoint = struct {
host: []const u8,
scheme: []const u8,
port: u16,
path: []const u8,
allocator: std.mem.Allocator,
fn deinit(self: EndPoint) void {
self.allocator.free(self.uri);
self.allocator.free(self.host);
self.allocator.free(self.path);
}
};
pub const AwsHttp = struct {
@ -112,7 +114,7 @@ pub const AwsHttp = struct {
// S3 control uses <account-id>.s3-control.<region>.amazonaws.com
//
// So this regionSubDomain call needs to handle generic customization
const endpoint = try endpointForRequest(self.allocator, service, options.region, options.dualstack);
const endpoint = try endpointForRequest(self.allocator, service, request, options);
defer endpoint.deinit();
log.debug("Calling endpoint {s}", .{endpoint.uri});
// TODO: Should we allow customization here?
@ -145,12 +147,18 @@ pub const AwsHttp = struct {
pub fn makeRequest(self: Self, endpoint: EndPoint, request: HttpRequest, signing_config: ?signing.Config) !HttpResult {
var request_cp = request;
log.debug("Path: {s}", .{request_cp.path});
log.debug("Request Path: {s}", .{request_cp.path});
log.debug("Endpoint Path (actually used): {s}", .{endpoint.path});
log.debug("Query: {s}", .{request_cp.query});
log.debug("Request additional header count: {d}", .{request_cp.headers.len});
log.debug("Method: {s}", .{request_cp.method});
log.debug("body length: {d}", .{request_cp.body.len});
log.debug("Body\n====\n{s}\n====", .{request_cp.body});
// Endpoint calculation might be different from the request (e.g. S3 requests)
// We will use endpoint instead
request_cp.path = endpoint.path;
var request_headers = std.ArrayList(base.Header).init(self.allocator);
defer request_headers.deinit();
@ -176,7 +184,7 @@ pub const AwsHttp = struct {
log.debug("\t{s}: {s}", .{ h.name, h.value });
}
const url = try std.fmt.allocPrint(self.allocator, "{s}{s}{s}", .{ endpoint.uri, request.path, request.query });
const url = try std.fmt.allocPrint(self.allocator, "{s}{s}{s}", .{ endpoint.uri, request_cp.path, request_cp.query });
defer self.allocator.free(url);
log.debug("Request url: {s}", .{url});
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@ -233,10 +241,18 @@ fn getRegion(service: []const u8, region: []const u8) []const u8 {
}
fn addHeaders(allocator: std.mem.Allocator, headers: *std.ArrayList(base.Header), host: []const u8, body: []const u8, content_type: []const u8, additional_headers: []Header) !?[]const u8 {
var has_content_type = false;
for (additional_headers) |h| {
if (std.ascii.eqlIgnoreCase(h.name, "Content-Type")) {
has_content_type = true;
break;
}
}
try headers.append(.{ .name = "Accept", .value = "application/json" });
try headers.append(.{ .name = "Host", .value = host });
try headers.append(.{ .name = "User-Agent", .value = "zig-aws 1.0, Powered by the AWS Common Runtime." });
try headers.append(.{ .name = "Content-Type", .value = content_type });
if (!has_content_type)
try headers.append(.{ .name = "Content-Type", .value = content_type });
try headers.appendSlice(additional_headers);
if (body.len > 0) {
const len = try std.fmt.allocPrint(allocator, "{d}", .{body.len});
@ -253,32 +269,26 @@ fn getEnvironmentVariable(allocator: std.mem.Allocator, key: []const u8) !?[]con
};
}
fn endpointForRequest(allocator: std.mem.Allocator, service: []const u8, region: []const u8, use_dual_stack: bool) !EndPoint {
fn endpointForRequest(allocator: std.mem.Allocator, service: []const u8, request: HttpRequest, options: Options) !EndPoint {
const environment_override = try getEnvironmentVariable(allocator, "AWS_ENDPOINT_URL");
if (environment_override) |override| {
const uri = try allocator.dupeZ(u8, override);
return endPointFromUri(allocator, uri);
}
if (std.mem.eql(u8, service, "cloudfront")) {
return EndPoint{
.uri = try allocator.dupe(u8, "https://cloudfront.amazonaws.com"),
.host = try allocator.dupe(u8, "cloudfront.amazonaws.com"),
.scheme = "https",
.port = 443,
.allocator = allocator,
};
}
// Fallback to us-east-1 if global endpoint does not exist.
const realregion = if (std.mem.eql(u8, region, "aws-global")) "us-east-1" else region;
const dualstack = if (use_dual_stack) ".dualstack" else "";
const realregion = if (std.mem.eql(u8, options.region, "aws-global")) "us-east-1" else options.region;
const dualstack = if (options.dualstack) ".dualstack" else "";
const domain = switch (std.hash_map.hashString(region)) {
const domain = switch (std.hash_map.hashString(options.region)) {
US_ISO_EAST_1_HASH => "c2s.ic.gov",
CN_NORTH_1_HASH, CN_NORTHWEST_1_HASH => "amazonaws.com.cn",
US_ISOB_EAST_1_HASH => "sc2s.sgov.gov",
else => "amazonaws.com",
};
if (try endpointException(allocator, service, request, options, realregion, dualstack, domain)) |e|
return e;
const uri = try std.fmt.allocPrintZ(allocator, "https://{s}{s}.{s}.{s}", .{ service, dualstack, realregion, domain });
const host = try allocator.dupe(u8, uri["https://".len..]);
log.debug("host: {s}, scheme: {s}, port: {}", .{ host, "https", 443 });
@ -288,9 +298,68 @@ fn endpointForRequest(allocator: std.mem.Allocator, service: []const u8, region:
.scheme = "https",
.port = 443,
.allocator = allocator,
.path = try allocator.dupe(u8, request.path),
};
}
fn endpointException(
allocator: std.mem.Allocator,
service: []const u8,
request: HttpRequest,
options: Options,
realregion: []const u8,
dualstack: []const u8,
domain: []const u8,
) !?EndPoint {
if (std.mem.eql(u8, service, "cloudfront")) {
return EndPoint{
.uri = try allocator.dupe(u8, "https://cloudfront.amazonaws.com"),
.host = try allocator.dupe(u8, "cloudfront.amazonaws.com"),
.scheme = "https",
.port = 443,
.allocator = allocator,
.path = try allocator.dupe(u8, request.path),
};
}
if (std.mem.eql(u8, service, "s3")) {
if (request.path.len == 1 or std.mem.indexOf(u8, request.path[1..], "/") == null)
return null;
// We need to adjust the host and the path to accomodate virtual
// host addressing. This only applies to bucket operations, but
// right now I'm hoping that bucket operations do not include a path
// component, so will be handled by the return null statement above.
const bucket_name = s3BucketFromPath(request.path);
const rest_of_path = request.path[bucket_name.len + 1 ..];
// TODO: Implement
_ = options;
const uri = try std.fmt.allocPrintZ(allocator, "https://{s}.{s}{s}.{s}.{s}", .{ bucket_name, service, dualstack, realregion, domain });
const host = try allocator.dupe(u8, uri["https://".len..]);
log.debug("S3 host: {s}, scheme: {s}, port: {}", .{ host, "https", 443 });
return EndPoint{
.uri = uri,
.host = host,
.scheme = "https",
.port = 443,
.allocator = allocator,
.path = try allocator.dupe(u8, rest_of_path),
};
}
return null;
}
fn s3BucketFromPath(path: []const u8) []const u8 {
var in_bucket = false;
var start: usize = 0;
for (path) |c, inx| {
if (c == '/') {
if (in_bucket) return path[start..inx];
start = inx + 1;
in_bucket = true;
}
}
unreachable;
}
/// creates an endpoint from a uri string.
///
/// allocator: Will be used only to construct the EndPoint struct
@ -337,27 +406,68 @@ fn endPointFromUri(allocator: std.mem.Allocator, uri: []const u8) !EndPoint {
.scheme = scheme,
.allocator = allocator,
.port = port,
.path = try allocator.dupe(u8, "/"),
};
}
test "endpointForRequest standard operation" {
const request: HttpRequest = .{};
const options: Options = .{
.region = "us-west-2",
.dualstack = false,
.sigv4_service_name = null,
};
const allocator = std.testing.allocator;
const service = "dynamodb";
const region = "us-west-2";
const use_dual_stack = false;
const endpoint = try endpointForRequest(allocator, service, region, use_dual_stack);
const endpoint = try endpointForRequest(allocator, service, request, options);
defer endpoint.deinit();
try std.testing.expectEqualStrings("https://dynamodb.us-west-2.amazonaws.com", endpoint.uri);
}
test "endpointForRequest for cloudfront" {
const request = HttpRequest{};
const options = Options{
.region = "us-west-2",
.dualstack = false,
.sigv4_service_name = null,
};
const allocator = std.testing.allocator;
const service = "cloudfront";
const region = "us-west-2";
const use_dual_stack = false;
const endpoint = try endpointForRequest(allocator, service, region, use_dual_stack);
const endpoint = try endpointForRequest(allocator, service, request, options);
defer endpoint.deinit();
try std.testing.expectEqualStrings("https://cloudfront.amazonaws.com", endpoint.uri);
}
test "endpointForRequest for s3" {
const request = HttpRequest{};
const options = Options{
.region = "us-east-2",
.dualstack = false,
.sigv4_service_name = null,
};
const allocator = std.testing.allocator;
const service = "s3";
const endpoint = try endpointForRequest(allocator, service, request, options);
defer endpoint.deinit();
try std.testing.expectEqualStrings("https://s3.us-east-2.amazonaws.com", endpoint.uri);
}
test "endpointForRequest for s3 - specific bucket" {
const request = HttpRequest{
.path = "/bucket/key",
};
const options = Options{
.region = "us-east-2",
.dualstack = false,
.sigv4_service_name = null,
};
const allocator = std.testing.allocator;
const service = "s3";
const endpoint = try endpointForRequest(allocator, service, request, options);
defer endpoint.deinit();
try std.testing.expectEqualStrings("https://bucket.s3.us-east-2.amazonaws.com", endpoint.uri);
try std.testing.expectEqualStrings("/key", endpoint.path);
}

View File

@ -357,7 +357,9 @@ fn createCanonicalRequest(allocator: std.mem.Allocator, request: base.Request, p
// TODO: This is all better as a writer - less allocations/copying
const canonical_method = canonicalRequestMethod(request.method);
const canonical_url = try canonicalUri(allocator, request.path, true); // TODO: set false for s3
// Let's not mess around here...s3 is the oddball
const double_encode = !std.mem.eql(u8, config.service, "s3");
const canonical_url = try canonicalUri(allocator, request.path, double_encode);
defer allocator.free(canonical_url);
log.debug("final uri: {s}", .{canonical_url});
const canonical_query = try canonicalQueryString(allocator, request.query);
@ -408,8 +410,6 @@ fn canonicalUri(allocator: std.mem.Allocator, path: []const u8, double_encode: b
//
// For now, we will "Remove redundant and relative path components". This
// doesn't apply to S3 anyway, and we'll make it the callers's problem
if (!double_encode)
return SigningError.S3NotImplemented;
if (path.len == 0 or path[0] == '?' or path[0] == '#')
return try allocator.dupe(u8, "/");
log.debug("encoding path: {s}", .{path});

View File

@ -50,6 +50,7 @@ const Tests = enum {
rest_json_1_work_with_lambda,
rest_xml_no_input,
rest_xml_anything_but_s3,
rest_xml_work_with_s3,
};
pub fn main() anyerror!void {