Compare commits
6 Commits
e34a0d8ff4
...
a662f6f674
Author | SHA1 | Date | |
---|---|---|---|
a662f6f674 | |||
247a236ad1 | |||
b428029cc1 | |||
21b04317bd | |||
723c483544 | |||
d3a7edcadc |
12
README.md
12
README.md
|
@ -2,7 +2,7 @@
|
|||
|
||||
[![Build Status](https://drone.lerch.org/api/badges/lobo/aws-sdk-for-zig/status.svg?ref=refs/heads/master)](https://drone.lerch.org/api/badges/lobo/aws-sdk-for-zig/)
|
||||
|
||||
This SDK currently supports all AWS services except S3. See TODO list below.
|
||||
This SDK currently supports all AWS services. S3 has basic support
|
||||
|
||||
Current executable size for the demo is 1.7M (90k of which is the AWS PEM file,
|
||||
and approximately 600K for XML services) after compiling with -Drelease-safe and
|
||||
|
@ -42,15 +42,13 @@ for posterity, and supports x86_64 linux. The old branch is deprecated.
|
|||
|
||||
## Limitations
|
||||
|
||||
There are many nuances of AWS V4 signature calculation. S3 is not supported
|
||||
because it uses many of these edge cases. Also endpoint calculation is special
|
||||
for S3. WebIdentityToken is not yet implemented.
|
||||
There are many nuances of AWS V4 signature calculation, and not all edge cases
|
||||
of S3 are handled. WebIdentityToken is not yet implemented.
|
||||
|
||||
TODO List:
|
||||
|
||||
* Implement initial S3 support. This involves:
|
||||
* Implementation of AWS SigV4 signature calculation for S3, which is unique
|
||||
* Implementation of S3 endpoint calculation, which is also unique to this service
|
||||
* Implement more robust S3 support. Keys with slashes in the name are currently
|
||||
causing a SignatureDoesNotMatch error
|
||||
* Bump to zig 0.9.1. iguanaTLS, used in zFetch is still [working out 0.9.1 issues](https://github.com/alexnask/iguanaTLS/pull/29)
|
||||
* Implement sigv4a signing
|
||||
* Implement jitter/exponential backoff
|
||||
|
|
133
src/aws.zig
133
src/aws.zig
|
@ -4,6 +4,7 @@ const awshttp = @import("aws_http.zig");
|
|||
const json = @import("json.zig");
|
||||
const url = @import("url.zig");
|
||||
const case = @import("case.zig");
|
||||
const date = @import("date.zig");
|
||||
const servicemodel = @import("servicemodel.zig");
|
||||
const xml_shaper = @import("xml_shaper.zig");
|
||||
|
||||
|
@ -126,7 +127,7 @@ pub fn Request(comptime action: anytype) type {
|
|||
// query string. TODO: RTFM on zig to figure out why
|
||||
aws_request.query = try options.client.allocator.dupe(u8, aws_request.path[inx..]);
|
||||
aws_request.path = try options.client.allocator.dupe(u8, aws_request.path[0..inx]);
|
||||
log.debug("inx: {d}\n\tnew path: {s}\n\tnew query: {s}", .{ inx, aws_request.path, aws_request.query });
|
||||
// log.debug("inx: {d}\n\tnew path: {s}\n\tnew query: {s}", .{ inx, aws_request.path, aws_request.query });
|
||||
options.client.allocator.free(orig_path);
|
||||
options.client.allocator.free(orig_query);
|
||||
}
|
||||
|
@ -263,31 +264,78 @@ pub fn Request(comptime action: anytype) type {
|
|||
return error.HttpFailure;
|
||||
}
|
||||
|
||||
var fullResponse = try getFullResponseFromBody(aws_request, response, options);
|
||||
var full_response = try getFullResponseFromBody(aws_request, response, options);
|
||||
errdefer full_response.deinit();
|
||||
|
||||
// Fill in any fields that require a header. Note doing it post-facto
|
||||
// assumes all response header fields are optional, which may be incorrect
|
||||
if (@hasDecl(action.Response, "http_header")) {
|
||||
inline for (std.meta.fields(@TypeOf(action.Response.http_header))) |f| {
|
||||
const header_name = @field(action.Response.http_header, f.name);
|
||||
for (response.headers) |h| {
|
||||
if (std.ascii.eqlIgnoreCase(h.name, header_name)) {
|
||||
log.debug("Response header {s} configured for field. Setting {s} = {s}", .{ h.name, f.name, h.value });
|
||||
const field_type = @TypeOf(@field(fullResponse.response, f.name));
|
||||
// TODO: Fix this. We need to make this much more robust
|
||||
// The deal is we have to do the dupe though
|
||||
// Also, this is a memory leak atm
|
||||
if (field_type == ?[]const u8) {
|
||||
@field(fullResponse.response, f.name) = try options.client.allocator.dupe(u8, (try coerceFromString(field_type, h.value)).?);
|
||||
} else {
|
||||
@field(fullResponse.response, f.name) = try coerceFromString(field_type, h.value);
|
||||
}
|
||||
log.debug("Checking headers based on type: {s}", .{@typeName(action.Response)});
|
||||
const HeaderInfo = struct {
|
||||
name: []const u8,
|
||||
T: type,
|
||||
header_name: []const u8,
|
||||
};
|
||||
comptime var fields = [_]?HeaderInfo{null} ** std.meta.fields(@TypeOf(action.Response.http_header)).len;
|
||||
inline for (std.meta.fields(@TypeOf(action.Response.http_header))) |f, inx| {
|
||||
fields[inx] = HeaderInfo{
|
||||
.name = f.name,
|
||||
.T = @TypeOf(@field(full_response.response, f.name)),
|
||||
.header_name = @field(action.Response.http_header, f.name),
|
||||
};
|
||||
}
|
||||
inline for (fields) |f| {
|
||||
for (response.headers) |header| {
|
||||
if (std.mem.eql(u8, header.name, f.?.header_name)) {
|
||||
log.debug("Response header {s} configured for field. Setting {s} = {s}", .{ header.name, f.?.name, header.value });
|
||||
// TODO: Revisit return for this function. At the moment, there
|
||||
// is something in the compiler that is causing the inline for
|
||||
// surrounding this to start repeating elements
|
||||
//
|
||||
// https://github.com/ziglang/zig/issues/10507
|
||||
//
|
||||
// This bug is also relevant to some of the many,
|
||||
// many different methods used to try to work around:
|
||||
// https://github.com/ziglang/zig/issues/10029
|
||||
//
|
||||
// Note: issues found on zig 0.9.0
|
||||
setHeaderValue(
|
||||
options.client.allocator,
|
||||
&full_response.response,
|
||||
f.?.name,
|
||||
f.?.T,
|
||||
header.value,
|
||||
) catch |e| {
|
||||
log.err("Could not set header value: Response header {s}. Field {s}. Value {s}", .{ header.name, f.?.name, header.value });
|
||||
log.err("Error: {}", .{e});
|
||||
if (@errorReturnTrace()) |trace| {
|
||||
std.debug.dumpStackTrace(trace.*);
|
||||
}
|
||||
};
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return fullResponse;
|
||||
return full_response;
|
||||
}
|
||||
|
||||
fn setHeaderValue(
|
||||
allocator: std.mem.Allocator,
|
||||
response: anytype,
|
||||
comptime field_name: []const u8,
|
||||
comptime field_type: type,
|
||||
value: []const u8,
|
||||
) !void {
|
||||
// TODO: Fix this. We need to make this much more robust
|
||||
// The deal is we have to do the dupe though
|
||||
// Also, this is a memory leak atm
|
||||
if (field_type == ?[]const u8) {
|
||||
@field(response, field_name) = try allocator.dupe(u8, value);
|
||||
} else {
|
||||
@field(response, field_name) = try coerceFromString(field_type, value);
|
||||
}
|
||||
}
|
||||
|
||||
fn getFullResponseFromBody(aws_request: awshttp.HttpRequest, response: awshttp.HttpResult, options: Options) !FullResponseType {
|
||||
|
@ -297,6 +345,26 @@ pub fn Request(comptime action: anytype) type {
|
|||
var expected_body_field_len = std.meta.fields(action.Response).len;
|
||||
if (@hasDecl(action.Response, "http_header"))
|
||||
expected_body_field_len -= std.meta.fields(@TypeOf(action.Response.http_header)).len;
|
||||
if (@hasDecl(action.Response, "http_payload")) {
|
||||
var rc = FullResponseType{
|
||||
.response = .{},
|
||||
.response_metadata = .{
|
||||
.request_id = try requestIdFromHeaders(aws_request, response, options),
|
||||
},
|
||||
.parser_options = .{ .json = .{} },
|
||||
.raw_parsed = .{ .raw = .{} },
|
||||
.allocator = options.client.allocator,
|
||||
};
|
||||
var body_field = @field(rc.response, action.Response.http_payload);
|
||||
const BodyField = @TypeOf(body_field);
|
||||
if (BodyField == []const u8 or BodyField == ?[]const u8) {
|
||||
expected_body_field_len = 0;
|
||||
// We can't use body_field for this set - only @field will work
|
||||
@field(rc.response, action.Response.http_payload) = try options.client.allocator.dupe(u8, response.body);
|
||||
return rc;
|
||||
}
|
||||
rc.deinit();
|
||||
}
|
||||
|
||||
// We don't care about the body if there are no fields we expect there...
|
||||
if (std.meta.fields(action.Response).len == 0 or expected_body_field_len == 0) {
|
||||
|
@ -568,15 +636,31 @@ pub fn Request(comptime action: anytype) type {
|
|||
};
|
||||
}
|
||||
|
||||
fn coerceFromString(comptime T: type, val: []const u8) !T {
|
||||
fn coerceFromString(comptime T: type, val: []const u8) anyerror!T {
|
||||
if (@typeInfo(T) == .Optional) return try coerceFromString(@typeInfo(T).Optional.child, val);
|
||||
// TODO: This is terrible...fix it
|
||||
switch (T) {
|
||||
bool => return std.ascii.eqlIgnoreCase(val, "true"),
|
||||
i64 => return try std.fmt.parseInt(T, val, 10),
|
||||
i64 => return parseInt(T, val) catch |e| {
|
||||
log.err("Invalid string representing i64: {s}", .{val});
|
||||
return e;
|
||||
},
|
||||
else => return val,
|
||||
}
|
||||
}
|
||||
fn parseInt(comptime T: type, val: []const u8) !T {
|
||||
const rc = std.fmt.parseInt(T, val, 10);
|
||||
if (!std.meta.isError(rc)) return rc;
|
||||
|
||||
if (T == i64) {
|
||||
return date.parseEnglishToTimestamp(val) catch |e| {
|
||||
log.err("Error coercing date string '{s}' to timestamp value", .{val});
|
||||
return e;
|
||||
};
|
||||
}
|
||||
log.err("Error parsing string '{s}' to integer", .{val});
|
||||
return rc;
|
||||
}
|
||||
|
||||
fn generalAllocPrint(allocator: std.mem.Allocator, val: anytype) !?[]const u8 {
|
||||
switch (@typeInfo(@TypeOf(val))) {
|
||||
|
@ -781,6 +865,17 @@ fn FullResponse(comptime action: anytype) type {
|
|||
}
|
||||
}
|
||||
}
|
||||
if (@hasDecl(Response, "http_payload")) {
|
||||
var body_field = @field(self.response, Response.http_payload);
|
||||
const BodyField = @TypeOf(body_field);
|
||||
if (BodyField == []const u8) {
|
||||
self.allocator.free(body_field);
|
||||
}
|
||||
if (BodyField == ?[]const u8) {
|
||||
if (body_field) |f|
|
||||
self.allocator.free(f);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
85
src/date.zig
85
src/date.zig
|
@ -57,6 +57,86 @@ pub fn timestampToDateTime(timestamp: i64) DateTime {
|
|||
return DateTime{ .day = day, .month = month, .year = year, .hour = hours, .minute = minutes, .second = seconds };
|
||||
}
|
||||
|
||||
pub fn parseEnglishToTimestamp(data: []const u8) !i64 {
|
||||
return try dateTimeToTimestamp(try parseEnglishToDateTime(data));
|
||||
}
|
||||
|
||||
const EnglishParsingState = enum { Start, Day, Month, Year, Hour, Minute, Second, End };
|
||||
/// Converts a string to a timestamp value. May not handle dates before the
|
||||
/// epoch. Dates should look like "Fri, 03 Jun 2022 18:12:36 GMT"
|
||||
pub fn parseEnglishToDateTime(data: []const u8) !DateTime {
|
||||
// Fri, 03 Jun 2022 18:12:36 GMT
|
||||
if (!std.mem.endsWith(u8, data, "GMT")) return error.InvalidFormat;
|
||||
|
||||
var start: usize = 0;
|
||||
var state = EnglishParsingState.Start;
|
||||
// Anything not explicitly set by our string would be 0
|
||||
var rc = DateTime{ .year = 0, .month = 0, .day = 0, .hour = 0, .minute = 0, .second = 0 };
|
||||
for (data) |ch, i| {
|
||||
_ = i;
|
||||
switch (ch) {
|
||||
',' => {},
|
||||
' ', ':' => {
|
||||
// State transition
|
||||
|
||||
// We're going to coerce and this might not go well, but we
|
||||
// want the compiler to create checks, so we'll turn on
|
||||
// runtime safety for this block, forcing checks in ReleaseSafe
|
||||
// ReleaseFast modes.
|
||||
const next_state = try endEnglishState(state, &rc, data[start..i]);
|
||||
state = next_state;
|
||||
start = i + 1;
|
||||
},
|
||||
else => {}, // We need to be pretty trusting on this format...
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
fn endEnglishState(current_state: EnglishParsingState, date: *DateTime, prev_data: []const u8) !EnglishParsingState {
|
||||
var next_state: EnglishParsingState = undefined;
|
||||
log.debug("endEnglishState. Current state '{s}', data: {s}", .{ current_state, prev_data });
|
||||
|
||||
// Using two switches is slightly less efficient, but more readable
|
||||
switch (current_state) {
|
||||
.End => return error.IllegalStateTransition,
|
||||
.Start => next_state = .Day,
|
||||
.Day => next_state = .Month,
|
||||
.Month => next_state = .Year,
|
||||
.Year => next_state = .Hour,
|
||||
.Hour => next_state = .Minute,
|
||||
.Minute => next_state = .Second,
|
||||
.Second => next_state = .End,
|
||||
}
|
||||
|
||||
switch (current_state) {
|
||||
.Year => date.year = try std.fmt.parseUnsigned(u16, prev_data, 10),
|
||||
.Month => date.month = try parseEnglishMonth(prev_data),
|
||||
.Day => date.day = try std.fmt.parseUnsigned(u8, prev_data, 10),
|
||||
.Hour => date.hour = try std.fmt.parseUnsigned(u8, prev_data, 10),
|
||||
.Minute => date.minute = try std.fmt.parseUnsigned(u8, prev_data, 10),
|
||||
.Second => date.second = try std.fmt.parseUnsigned(u8, prev_data, 10),
|
||||
.Start => {},
|
||||
.End => return error.InvalidState,
|
||||
}
|
||||
return next_state;
|
||||
}
|
||||
|
||||
fn parseEnglishMonth(data: []const u8) !u8 {
|
||||
if (std.ascii.startsWithIgnoreCase(data, "Jan")) return 1;
|
||||
if (std.ascii.startsWithIgnoreCase(data, "Feb")) return 2;
|
||||
if (std.ascii.startsWithIgnoreCase(data, "Mar")) return 3;
|
||||
if (std.ascii.startsWithIgnoreCase(data, "Apr")) return 4;
|
||||
if (std.ascii.startsWithIgnoreCase(data, "May")) return 5;
|
||||
if (std.ascii.startsWithIgnoreCase(data, "Jun")) return 6;
|
||||
if (std.ascii.startsWithIgnoreCase(data, "Jul")) return 7;
|
||||
if (std.ascii.startsWithIgnoreCase(data, "Aug")) return 8;
|
||||
if (std.ascii.startsWithIgnoreCase(data, "Sep")) return 9;
|
||||
if (std.ascii.startsWithIgnoreCase(data, "Oct")) return 10;
|
||||
if (std.ascii.startsWithIgnoreCase(data, "Nov")) return 11;
|
||||
if (std.ascii.startsWithIgnoreCase(data, "Dec")) return 12;
|
||||
return error.InvalidMonth;
|
||||
}
|
||||
pub fn parseIso8601ToTimestamp(data: []const u8) !i64 {
|
||||
return try dateTimeToTimestamp(try parseIso8601ToDateTime(data));
|
||||
}
|
||||
|
@ -327,3 +407,8 @@ test "Convert ISO8601 string to timestamp" {
|
|||
test "Convert datetime to timestamp before 1970" {
|
||||
try std.testing.expectEqual(@as(i64, -449392815), try dateTimeToTimestamp(DateTime{ .year = 1955, .month = 10, .day = 05, .hour = 16, .minute = 39, .second = 45 }));
|
||||
}
|
||||
|
||||
test "Convert whatever AWS is sending us to timestamp" {
|
||||
const string_date = "Fri, 03 Jun 2022 18:12:36 GMT";
|
||||
try std.testing.expectEqual(DateTime{ .year = 2022, .month = 06, .day = 03, .hour = 18, .minute = 12, .second = 36 }, try parseEnglishToDateTime(string_date));
|
||||
}
|
||||
|
|
86
src/main.zig
86
src/main.zig
|
@ -12,19 +12,22 @@ pub fn log(
|
|||
args: anytype,
|
||||
) void {
|
||||
// Ignore aws_signing messages
|
||||
if (verbose < 2 and scope == .aws_signing and @enumToInt(level) >= @enumToInt(std.log.Level.debug))
|
||||
if (verbose < 3 and scope == .aws_signing and @enumToInt(level) >= @enumToInt(std.log.Level.debug))
|
||||
return;
|
||||
// Ignore aws_credentials messages
|
||||
if (verbose < 2 and scope == .aws_credentials and @enumToInt(level) >= @enumToInt(std.log.Level.debug))
|
||||
if (verbose < 3 and scope == .aws_credentials and @enumToInt(level) >= @enumToInt(std.log.Level.debug))
|
||||
return;
|
||||
// Ignore xml_shaper messages
|
||||
if (verbose < 2 and scope == .xml_shaper and @enumToInt(level) >= @enumToInt(std.log.Level.debug))
|
||||
if (verbose < 3 and scope == .xml_shaper and @enumToInt(level) >= @enumToInt(std.log.Level.debug))
|
||||
return;
|
||||
// Ignore date messages
|
||||
if (verbose < 2 and scope == .date and @enumToInt(level) >= @enumToInt(std.log.Level.debug))
|
||||
if (verbose < 3 and scope == .date and @enumToInt(level) >= @enumToInt(std.log.Level.debug))
|
||||
return;
|
||||
// Ignore awshttp messages
|
||||
if (verbose < 1 and scope == .awshttp and @enumToInt(level) >= @enumToInt(std.log.Level.debug))
|
||||
if (verbose < 2 and scope == .awshttp and @enumToInt(level) >= @enumToInt(std.log.Level.debug))
|
||||
return;
|
||||
|
||||
if (verbose < 1 and scope == .aws and @enumToInt(level) >= @enumToInt(std.log.Level.debug))
|
||||
return;
|
||||
const scope_prefix = "(" ++ @tagName(scope) ++ "): ";
|
||||
const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix;
|
||||
|
@ -50,7 +53,7 @@ const Tests = enum {
|
|||
rest_json_1_work_with_lambda,
|
||||
rest_xml_no_input,
|
||||
rest_xml_anything_but_s3,
|
||||
// rest_xml_work_with_s3,
|
||||
rest_xml_work_with_s3,
|
||||
};
|
||||
|
||||
pub fn main() anyerror!void {
|
||||
|
@ -231,6 +234,77 @@ pub fn main() anyerror!void {
|
|||
std.log.info("key group list max: {d}", .{list.max_items});
|
||||
std.log.info("key group quantity: {d}", .{list.quantity});
|
||||
},
|
||||
.rest_xml_work_with_s3 => {
|
||||
// TODO: Fix signature calculation mismatch with slashes
|
||||
// const key = "i/am/a/teapot/foo";
|
||||
const key = "foo";
|
||||
|
||||
const bucket = blk: {
|
||||
const result = try client.call(services.s3.list_buckets.Request{}, options);
|
||||
defer result.deinit();
|
||||
const bucket = result.response.buckets.?[result.response.buckets.?.len - 1];
|
||||
std.log.info("ListBuckets request id: {s}", .{result.response_metadata.request_id});
|
||||
std.log.info("bucket name: {s}", .{bucket.name.?});
|
||||
break :blk try allocator.dupe(u8, bucket.name.?);
|
||||
};
|
||||
defer allocator.free(bucket);
|
||||
const location = blk: {
|
||||
const result = try aws.Request(services.s3.get_bucket_location).call(.{
|
||||
.bucket = bucket,
|
||||
}, options);
|
||||
defer result.deinit();
|
||||
const location = result.response.location_constraint.?;
|
||||
std.log.info("GetBucketLocation request id: {s}", .{result.response_metadata.request_id});
|
||||
std.log.info("location: {s}", .{location});
|
||||
break :blk try allocator.dupe(u8, location);
|
||||
};
|
||||
defer allocator.free(location);
|
||||
const s3opts = aws.Options{
|
||||
.region = location,
|
||||
.client = client,
|
||||
};
|
||||
{
|
||||
const result = try aws.Request(services.s3.put_object).call(.{
|
||||
.bucket = bucket,
|
||||
.key = key,
|
||||
.content_type = "text/plain",
|
||||
.body = "bar",
|
||||
.storage_class = "STANDARD",
|
||||
}, s3opts);
|
||||
std.log.info("PutObject Request id: {s}", .{result.response_metadata.request_id});
|
||||
std.log.info("PutObject etag: {s}", .{result.response.e_tag.?});
|
||||
defer result.deinit();
|
||||
}
|
||||
{
|
||||
// Note that boto appears to redirect by default, but java
|
||||
// does not. We will not
|
||||
const result = try aws.Request(services.s3.get_object).call(.{
|
||||
.bucket = bucket,
|
||||
.key = key,
|
||||
}, s3opts);
|
||||
std.log.info("GetObject Request id: {s}", .{result.response_metadata.request_id});
|
||||
std.log.info("GetObject Body: {s}", .{result.response.body});
|
||||
std.log.info("GetObject etag: {s}", .{result.response.e_tag.?});
|
||||
std.log.info("GetObject last modified (seconds since epoch): {d}", .{result.response.last_modified.?});
|
||||
defer result.deinit();
|
||||
}
|
||||
{
|
||||
const result = try aws.Request(services.s3.delete_object).call(.{
|
||||
.bucket = bucket,
|
||||
.key = key,
|
||||
}, s3opts);
|
||||
std.log.info("DeleteObject Request id: {s}", .{result.response_metadata.request_id});
|
||||
defer result.deinit();
|
||||
}
|
||||
{
|
||||
const result = try aws.Request(services.s3.list_objects).call(.{
|
||||
.bucket = bucket,
|
||||
}, s3opts);
|
||||
std.log.info("ListObject Request id: {s}", .{result.response_metadata.request_id});
|
||||
std.log.info("Object count: {d}", .{result.response.contents.?.len});
|
||||
defer result.deinit();
|
||||
}
|
||||
},
|
||||
}
|
||||
std.log.info("===== End Test: {s} =====\n", .{@tagName(t)});
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user