diff --git a/src/aws.zig b/src/aws.zig
index 66242b4..3ed9e92 100644
--- a/src/aws.zig
+++ b/src/aws.zig
@@ -31,7 +31,7 @@ pub const services = servicemodel.services;
pub const Services = servicemodel.Services;
pub const ClientOptions = struct {
- proxy: ?std.http.Client.HttpProxy = null,
+ proxy: ?std.http.Client.Proxy = null,
};
pub const Client = struct {
allocator: std.mem.Allocator,
@@ -226,7 +226,7 @@ pub fn Request(comptime request_action: anytype) type {
defer buffer.deinit();
const writer = buffer.writer();
try url.encode(options.client.allocator, request, writer, .{
- .field_name_transformer = &queryFieldTransformer,
+ .field_name_transformer = queryFieldTransformer,
});
const continuation = if (buffer.items.len > 0) "&" else "";
@@ -734,7 +734,7 @@ fn headersFor(allocator: std.mem.Allocator, request: anytype) ![]awshttp.Header
return headers.toOwnedSlice();
}
-fn freeHeadersFor(allocator: std.mem.Allocator, request: anytype, headers: []awshttp.Header) void {
+fn freeHeadersFor(allocator: std.mem.Allocator, request: anytype, headers: []const awshttp.Header) void {
if (!@hasDecl(@TypeOf(request), "http_header")) return;
const http_header = @TypeOf(request).http_header;
const fields = std.meta.fields(@TypeOf(http_header));
@@ -761,7 +761,7 @@ fn firstJsonKey(data: []const u8) []const u8 {
log.debug("First json key: {s}", .{key});
return key;
}
-fn isJsonResponse(headers: []awshttp.Header) !bool {
+fn isJsonResponse(headers: []const awshttp.Header) !bool {
// EC2 ignores our accept type, but technically query protocol only
// returns XML as well. So, we'll ignore the protocol here and just
// look at the return type
@@ -919,8 +919,7 @@ fn safeFree(allocator: std.mem.Allocator, obj: anytype) void {
else => {},
}
}
-fn queryFieldTransformer(allocator: std.mem.Allocator, field_name: []const u8, options: url.EncodingOptions) anyerror![]const u8 {
- _ = options;
+fn queryFieldTransformer(allocator: std.mem.Allocator, field_name: []const u8) anyerror![]const u8 {
return try case.snakeToPascal(allocator, field_name);
}
@@ -1363,16 +1362,17 @@ test {
}
const TestOptions = struct {
allocator: std.mem.Allocator,
+ arena: ?*std.heap.ArenaAllocator = null,
server_port: ?u16 = null,
server_remaining_requests: usize = 1,
server_response: []const u8 = "unset",
server_response_status: std.http.Status = .ok,
- server_response_headers: [][2][]const u8 = &[_][2][]const u8{},
+ server_response_headers: []const std.http.Header = &.{},
server_response_transfer_encoding: ?std.http.TransferEncoding = null,
request_body: []u8 = "",
request_method: std.http.Method = undefined,
request_target: []const u8 = undefined,
- request_headers: *std.http.Headers = undefined,
+ request_headers: []std.http.Header = undefined,
test_server_runtime_uri: ?[]u8 = null,
server_ready: bool = false,
requests_processed: usize = 0,
@@ -1380,7 +1380,7 @@ const TestOptions = struct {
const Self = @This();
fn expectHeader(self: *Self, name: []const u8, value: []const u8) !void {
- for (self.request_headers.list.items) |h|
+ for (self.request_headers) |h|
if (std.ascii.eqlIgnoreCase(name, h.name) and
std.mem.eql(u8, value, h.value)) return;
return error.HeaderOrValueNotFound;
@@ -1391,17 +1391,6 @@ const TestOptions = struct {
while (!self.server_ready)
std.time.sleep(100);
}
-
- fn deinit(self: Self) void {
- if (self.requests_processed > 0) {
- self.allocator.free(self.request_body);
- self.allocator.free(self.request_target);
- self.request_headers.deinit();
- self.allocator.destroy(self.request_headers);
- }
- if (self.test_server_runtime_uri) |_|
- self.allocator.free(self.test_server_runtime_uri.?);
- }
};
/// This starts a test server. We're not testing the server itself,
@@ -1409,16 +1398,19 @@ const TestOptions = struct {
/// whole thing so we can just deallocate everything at once at the end,
/// leaks be damned
fn threadMain(options: *TestOptions) !void {
- var server = std.http.Server.init(options.allocator, .{ .reuse_address = true });
- // defer server.deinit();
+ // https://github.com/ziglang/zig/blob/d2be725e4b14c33dbd39054e33d926913eee3cd4/lib/compiler/std-docs.zig#L22-L54
+
+ options.arena = try options.allocator.create(std.heap.ArenaAllocator);
+ options.arena.?.* = std.heap.ArenaAllocator.init(options.allocator);
+ const allocator = options.arena.?.allocator();
+ options.allocator = allocator;
const address = try std.net.Address.parseIp("127.0.0.1", 0);
- try server.listen(address);
- options.server_port = server.socket.listen_address.in.getPort();
-
+ var http_server = try address.listen(.{});
+ options.server_port = http_server.listen_address.in.getPort();
+ // TODO: remove
options.test_server_runtime_uri = try std.fmt.allocPrint(options.allocator, "http://127.0.0.1:{d}", .{options.server_port.?});
log.debug("server listening at {s}", .{options.test_server_runtime_uri.?});
- defer server.deinit();
log.info("starting server thread, tid {d}", .{std.Thread.getCurrentId()});
// var arena = std.heap.ArenaAllocator.init(options.allocator);
// defer arena.deinit();
@@ -1427,7 +1419,7 @@ fn threadMain(options: *TestOptions) !void {
// when it's time to shut down
while (options.server_remaining_requests > 0) {
options.server_remaining_requests -= 1;
- processRequest(options, &server) catch |e| {
+ processRequest(options, &http_server) catch |e| {
log.err("Unexpected error processing request: {any}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
@@ -1436,76 +1428,63 @@ fn threadMain(options: *TestOptions) !void {
}
}
-fn processRequest(options: *TestOptions, server: *std.http.Server) !void {
+fn processRequest(options: *TestOptions, net_server: *std.net.Server) !void {
options.server_ready = true;
errdefer options.server_ready = false;
log.debug(
"tid {d} (server): server waiting to accept. requests remaining: {d}",
.{ std.Thread.getCurrentId(), options.server_remaining_requests + 1 },
);
- var res = try server.accept(.{ .allocator = options.allocator });
- options.server_ready = false;
- defer res.deinit();
- defer if (res.headers.owned and res.headers.list.items.len > 0) res.headers.deinit();
- defer _ = res.reset();
- try res.wait(); // wait for client to send a complete request head
+ var connection = try net_server.accept();
+ defer connection.stream.close();
+ var read_buffer: [1024 * 16]u8 = undefined;
+ var http_server = std.http.Server.init(connection, &read_buffer);
+ while (http_server.state == .ready) {
+ var request = http_server.receiveHead() catch |err| switch (err) {
+ error.HttpConnectionClosing => return,
+ else => {
+ std.log.err("closing http connection: {s}", .{@errorName(err)});
+ std.log.debug("Error occurred from this request: \n{s}", .{read_buffer[0..http_server.read_buffer_len]});
+ return;
+ },
+ };
+ try serveRequest(options, &request);
+ }
+}
- const errstr = "Internal Server Error\n";
- var errbuf: [errstr.len]u8 = undefined;
- @memcpy(&errbuf, errstr);
- var response_bytes: []const u8 = errbuf[0..];
+fn serveRequest(options: *TestOptions, request: *std.http.Server.Request) !void {
+ options.server_ready = false;
options.requests_processed += 1;
- if (res.request.content_length) |l|
- options.request_body = try res.reader().readAllAlloc(options.allocator, @as(usize, @intCast(l)))
- else
- options.request_body = try options.allocator.dupe(u8, "");
- options.request_method = res.request.method;
- options.request_target = try options.allocator.dupe(u8, res.request.target);
- options.request_headers = try options.allocator.create(std.http.Headers);
- options.request_headers.allocator = options.allocator;
- options.request_headers.list = .{};
- options.request_headers.index = .{};
- options.request_headers.owned = true;
- for (res.request.headers.list.items) |f|
- try options.request_headers.append(f.name, f.value);
+ options.request_body = try (try request.reader()).readAllAlloc(options.allocator, std.math.maxInt(usize));
+ options.request_method = request.head.method;
+ options.request_target = try options.allocator.dupe(u8, request.head.target);
+ var req_headers = std.ArrayList(std.http.Header).init(options.allocator);
+ defer req_headers.deinit();
+ var it = request.iterateHeaders();
+ while (it.next()) |f| {
+ const h = try options.allocator.create(std.http.Header);
+ h.* = .{ .name = try options.allocator.dupe(u8, f.name), .value = try options.allocator.dupe(u8, f.value) };
+ try req_headers.append(h.*);
+ }
+ options.request_headers = try req_headers.toOwnedSlice();
log.debug(
"tid {d} (server): {d} bytes read from request",
.{ std.Thread.getCurrentId(), options.request_body.len },
);
// try response.headers.append("content-type", "text/plain");
- response_bytes = serve(options, &res) catch |e| brk: {
- res.status = .internal_server_error;
- // TODO: more about this particular request
- log.err("Unexpected error from executor processing request: {any}", .{e});
- if (@errorReturnTrace()) |trace| {
- std.debug.dumpStackTrace(trace.*);
- }
- break :brk "Unexpected error generating request to lambda";
- };
- if (options.server_response_transfer_encoding == null)
- res.transfer_encoding = .{ .content_length = response_bytes.len }
- else
- res.transfer_encoding = .chunked;
+ try request.respond(options.server_response, .{
+ .status = options.server_response_status,
+ .extra_headers = options.server_response_headers,
+ });
- try res.do();
- _ = try res.writer().writeAll(response_bytes);
- try res.finish();
log.debug(
"tid {d} (server): sent response",
.{std.Thread.getCurrentId()},
);
}
-fn serve(options: *TestOptions, res: *std.http.Server.Response) ![]const u8 {
- res.status = options.server_response_status;
- for (options.server_response_headers) |h|
- try res.headers.append(h[0], h[1]);
- // try res.headers.append("content-length", try std.fmt.allocPrint(allocator, "{d}", .{server_response.len}));
- return options.server_response;
-}
-
////////////////////////////////////////////////////////////////////////
// These will replicate the tests that were in src/main.zig
// The server_response and server_response_headers come from logs of
@@ -1527,10 +1506,10 @@ const TestSetup = struct {
const signing_time =
date.dateTimeToTimestamp(date.parseIso8601ToDateTime("20230908T170252Z") catch @compileError("Cannot parse date")) catch @compileError("Cannot parse date");
- fn init(allocator: std.mem.Allocator, options: TestOptions) Self {
+ fn init(options: TestOptions) Self {
return .{
- .allocator = allocator,
.request_options = options,
+ .allocator = options.allocator,
};
}
@@ -1542,7 +1521,10 @@ const TestSetup = struct {
);
self.started = true;
try self.request_options.waitForReady();
+ // Not sure why we're getting sprayed here, but we have an arena allocator, and this
+ // is testing, so yolo
awshttp.endpoint_override = self.request_options.test_server_runtime_uri;
+ log.debug("endpoint override set to {?s}", .{awshttp.endpoint_override});
self.creds = aws_auth.Credentials.init(
self.allocator,
try self.allocator.dupe(u8, "ACCESS"),
@@ -1563,9 +1545,11 @@ const TestSetup = struct {
self.server_thread.join();
}
- fn deinit(self: Self) void {
- self.request_options.deinit();
-
+ fn deinit(self: *Self) void {
+ if (self.request_options.arena) |a| {
+ a.deinit();
+ self.allocator.destroy(a);
+ }
if (!self.started) return;
awshttp.endpoint_override = null;
// creds.deinit(); Creds will get deinited in the course of the call. We don't want to do it twice
@@ -1576,15 +1560,15 @@ const TestSetup = struct {
test "query_no_input: sts getCallerIdentity comptime" {
const allocator = std.testing.allocator;
- var test_harness = TestSetup.init(allocator, .{
+ var test_harness = TestSetup.init(.{
.allocator = allocator,
.server_response =
\\{"GetCallerIdentityResponse":{"GetCallerIdentityResult":{"Account":"123456789012","Arn":"arn:aws:iam::123456789012:user/admin","UserId":"AIDAYAM4POHXHRVANDQBQ"},"ResponseMetadata":{"RequestId":"8f0d54da-1230-40f7-b4ac-95015c4b84cd"}}}
,
- .server_response_headers = @constCast(&[_][2][]const u8{
- .{ "Content-Type", "application/json" },
- .{ "x-amzn-RequestId", "8f0d54da-1230-40f7-b4ac-95015c4b84cd" },
- }),
+ .server_response_headers = &.{
+ .{ .name = "Content-Type", .value = "application/json" },
+ .{ .name = "x-amzn-RequestId", .value = "8f0d54da-1230-40f7-b4ac-95015c4b84cd" },
+ },
});
defer test_harness.deinit();
const options = try test_harness.start();
@@ -1611,7 +1595,7 @@ test "query_no_input: sts getCallerIdentity comptime" {
test "query_with_input: sts getAccessKeyInfo runtime" {
// sqs switched from query to json in aws sdk for go v2 commit f5a08768ef820ff5efd62a49ba50c61c9ca5dbcb
const allocator = std.testing.allocator;
- var test_harness = TestSetup.init(allocator, .{
+ var test_harness = TestSetup.init(.{
.allocator = allocator,
.server_response =
\\
@@ -1623,10 +1607,10 @@ test "query_with_input: sts getAccessKeyInfo runtime" {
\\
\\
,
- .server_response_headers = @constCast(&[_][2][]const u8{
- .{ "Content-Type", "text/xml" },
- .{ "x-amzn-RequestId", "ec85bf29-1ef0-459a-930e-6446dd14a286" },
- }),
+ .server_response_headers = &.{
+ .{ .name = "Content-Type", .value = "text/xml" },
+ .{ .name = "x-amzn-RequestId", .value = "ec85bf29-1ef0-459a-930e-6446dd14a286" },
+ },
});
defer test_harness.deinit();
const options = try test_harness.start();
@@ -1649,15 +1633,15 @@ test "query_with_input: sts getAccessKeyInfo runtime" {
}
test "json_1_0_query_with_input: dynamodb listTables runtime" {
const allocator = std.testing.allocator;
- var test_harness = TestSetup.init(allocator, .{
+ var test_harness = TestSetup.init(.{
.allocator = allocator,
.server_response =
\\{"LastEvaluatedTableName":"Customer","TableNames":["Customer"]}
,
- .server_response_headers = @constCast(&[_][2][]const u8{
- .{ "Content-Type", "application/json" },
- .{ "x-amzn-RequestId", "QBI72OUIN8U9M9AG6PCSADJL4JVV4KQNSO5AEMVJF66Q9ASUAAJG" },
- }),
+ .server_response_headers = &.{
+ .{ .name = "Content-Type", .value = "application/json" },
+ .{ .name = "x-amzn-RequestId", .value = "QBI72OUIN8U9M9AG6PCSADJL4JVV4KQNSO5AEMVJF66Q9ASUAAJG" },
+ },
});
defer test_harness.deinit();
const options = try test_harness.start();
@@ -1685,15 +1669,15 @@ test "json_1_0_query_with_input: dynamodb listTables runtime" {
test "json_1_0_query_no_input: dynamodb listTables runtime" {
const allocator = std.testing.allocator;
- var test_harness = TestSetup.init(allocator, .{
+ var test_harness = TestSetup.init(.{
.allocator = allocator,
.server_response =
\\{"AccountMaxReadCapacityUnits":80000,"AccountMaxWriteCapacityUnits":80000,"TableMaxReadCapacityUnits":40000,"TableMaxWriteCapacityUnits":40000}
,
- .server_response_headers = @constCast(&[_][2][]const u8{
- .{ "Content-Type", "application/json" },
- .{ "x-amzn-RequestId", "QBI72OUIN8U9M9AG6PCSADJL4JVV4KQNSO5AEMVJF66Q9ASUAAJG" },
- }),
+ .server_response_headers = &.{
+ .{ .name = "Content-Type", .value = "application/json" },
+ .{ .name = "x-amzn-RequestId", .value = "QBI72OUIN8U9M9AG6PCSADJL4JVV4KQNSO5AEMVJF66Q9ASUAAJG" },
+ },
});
defer test_harness.deinit();
const options = try test_harness.start();
@@ -1714,15 +1698,15 @@ test "json_1_0_query_no_input: dynamodb listTables runtime" {
}
test "json_1_1_query_with_input: ecs listClusters runtime" {
const allocator = std.testing.allocator;
- var test_harness = TestSetup.init(allocator, .{
+ var test_harness = TestSetup.init(.{
.allocator = allocator,
.server_response =
\\{"clusterArns":["arn:aws:ecs:us-west-2:550620852718:cluster/web-applicationehjaf-cluster"],"nextToken":"czE0Og=="}
,
- .server_response_headers = @constCast(&[_][2][]const u8{
- .{ "Content-Type", "application/json" },
- .{ "x-amzn-RequestId", "b2420066-ff67-4237-b782-721c4df60744" },
- }),
+ .server_response_headers = &.{
+ .{ .name = "Content-Type", .value = "application/json" },
+ .{ .name = "x-amzn-RequestId", .value = "b2420066-ff67-4237-b782-721c4df60744" },
+ },
});
defer test_harness.deinit();
const options = try test_harness.start();
@@ -1748,16 +1732,19 @@ test "json_1_1_query_with_input: ecs listClusters runtime" {
try std.testing.expectEqualStrings("arn:aws:ecs:us-west-2:550620852718:cluster/web-applicationehjaf-cluster", call.response.cluster_arns.?[0]);
}
test "json_1_1_query_no_input: ecs listClusters runtime" {
+ // const old = std.testing.log_level;
+ // defer std.testing.log_level = old;
+ // std.testing.log_level = .debug;
const allocator = std.testing.allocator;
- var test_harness = TestSetup.init(allocator, .{
+ var test_harness = TestSetup.init(.{
.allocator = allocator,
.server_response =
\\{"clusterArns":["arn:aws:ecs:us-west-2:550620852718:cluster/web-applicationehjaf-cluster"],"nextToken":"czE0Og=="}
,
- .server_response_headers = @constCast(&[_][2][]const u8{
- .{ "Content-Type", "application/json" },
- .{ "x-amzn-RequestId", "e65322b2-0065-45f2-ba37-f822bb5ce395" },
- }),
+ .server_response_headers = &.{
+ .{ .name = "Content-Type", .value = "application/json" },
+ .{ .name = "x-amzn-RequestId", .value = "e65322b2-0065-45f2-ba37-f822bb5ce395" },
+ },
});
defer test_harness.deinit();
const options = try test_harness.start();
@@ -1782,15 +1769,15 @@ test "json_1_1_query_no_input: ecs listClusters runtime" {
}
test "rest_json_1_query_with_input: lambda listFunctions runtime" {
const allocator = std.testing.allocator;
- var test_harness = TestSetup.init(allocator, .{
+ var test_harness = TestSetup.init(.{
.allocator = allocator,
.server_response =
\\{"Functions":[{"Description":"AWS CDK resource provider framework - onEvent (DevelopmentFrontendStack-g650u/com.amazonaws.cdk.custom-resources.amplify-asset-deployment-provider/amplify-asset-deployment-handler-provider)","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"0c62fc74-a692-403d-9206-5fcbad406424","LastModified":"2023-03-01T18:13:15.704+0000","FileSystemConfigs":null,"FunctionName":"DevelopmentFrontendStack--amplifyassetdeploymentha-aZqB9IbZLIKU","Runtime":"nodejs14.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentha-aZqB9IbZLIKU","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":900,"Handler":"framework.onEvent","CodeSha256":"m4tt+M0l3p8bZvxIDj83dwGrwRW6atCfS/q8AiXCD3o=","Role":"arn:aws:iam::550620852718:role/DevelopmentFrontendStack-amplifyassetdeploymentha-1782JF7WAPXZ3","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":4307,"State":null,"StateReason":null,"Environment":{"Variables":{"USER_ON_EVENT_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymenton-X9iZJSCSPYDH","WAITER_STATE_MACHINE_ARN":"arn:aws:states:us-west-2:550620852718:stateMachine:amplifyassetdeploymenthandlerproviderwaiterstatemachineB3C2FCBE-Ltggp5wBcHWO","USER_IS_COMPLETE_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentis-jaHopLrSSARV"},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]}],"NextMarker":"lslTXFcbLQKkb0vP9Kgh5hUL7C3VghELNGbWgZfxrRCk3eiDRMkct7D8EmptWfHSXssPdS7Bo66iQPTMpVOHZgANewpgGgFGGr4pVjd6VgLUO6qPe2EMAuNDBjUTxm8z6N28yhlUwEmKbrAV/m0k5qVzizwoxFwvyruMbuMx9kADFACSslcabxXl3/jDI4rfFnIsUVdzTLBgPF1hzwrE1f3lcdkBvUp+QgY+Pn3w5QuJmwsp/di8COzFemY89GgOHbLNqsrBsgR/ee2eXoJp0ZkKM4EcBK3HokqBzefLfgR02PnfNOdXwqTlhkSPW0TKiKGIYu3Bw7lSNrLd+q3+wEr7ZakqOQf0BVo3FMRhMHlVYgwUJzwi3ActyH2q6fuqGG1sS0B8Oa/prUpe5fmp3VaA3WpazioeHtrKF78JwCi6/nfQsrj/8ZtXGQOxlwEgvT1CIUaF+CdHY3biezrK0tRZNpkCtHnkPtF9lq2U7+UiKXSW9yzxT8P2b0M/Qh4IVdnw4rncQK/doYriAeOdrs1wjMEJnHWq9lAaEyipoxYcVr/z5+yaC6Gwxdg45p9X1vIAaYMf6IZxyFuua43SYi0Ls+IBk4VvpR2io7T0dCxHAr3WAo3D2dm0y8OsbM59"}
,
- .server_response_headers = @constCast(&[_][2][]const u8{
- .{ "Content-Type", "application/json" },
- .{ "x-amzn-RequestId", "c4025199-226f-4a16-bb1f-48618e9d2ea6" },
- }),
+ .server_response_headers = &.{
+ .{ .name = "Content-Type", .value = "application/json" },
+ .{ .name = "x-amzn-RequestId", .value = "c4025199-226f-4a16-bb1f-48618e9d2ea6" },
+ },
});
defer test_harness.deinit();
const options = try test_harness.start();
@@ -1816,13 +1803,13 @@ test "rest_json_1_query_with_input: lambda listFunctions runtime" {
}
test "rest_json_1_query_no_input: lambda listFunctions runtime" {
const allocator = std.testing.allocator;
- var test_harness = TestSetup.init(allocator, .{
+ var test_harness = TestSetup.init(.{
.allocator = allocator,
.server_response = @embedFile("test_rest_json_1_query_no_input.response"),
- .server_response_headers = @constCast(&[_][2][]const u8{
- .{ "Content-Type", "application/json" },
- .{ "x-amzn-RequestId", "b2aad11f-36fc-4d0d-ae92-fe0167fb0f40" },
- }),
+ .server_response_headers = &.{
+ .{ .name = "Content-Type", .value = "application/json" },
+ .{ .name = "x-amzn-RequestId", .value = "b2aad11f-36fc-4d0d-ae92-fe0167fb0f40" },
+ },
});
defer test_harness.deinit();
const options = try test_harness.start();
@@ -1850,14 +1837,14 @@ test "rest_json_1_query_no_input: lambda listFunctions runtime" {
}
test "rest_json_1_work_with_lambda: lambda tagResource (only), to excercise zig issue 17015" {
const allocator = std.testing.allocator;
- var test_harness = TestSetup.init(allocator, .{
+ var test_harness = TestSetup.init(.{
.allocator = allocator,
.server_response = "",
.server_response_status = .no_content,
- .server_response_headers = @constCast(&[_][2][]const u8{
- .{ "Content-Type", "application/json" },
- .{ "x-amzn-RequestId", "a521e152-6e32-4e67-9fb3-abc94e34551b" },
- }),
+ .server_response_headers = &.{
+ .{ .name = "Content-Type", .value = "application/json" },
+ .{ .name = "x-amzn-RequestId", .value = "a521e152-6e32-4e67-9fb3-abc94e34551b" },
+ },
});
defer test_harness.deinit();
const options = try test_harness.start();
@@ -1886,13 +1873,13 @@ test "rest_json_1_work_with_lambda: lambda tagResource (only), to excercise zig
}
test "ec2_query_no_input: EC2 describe regions" {
const allocator = std.testing.allocator;
- var test_harness = TestSetup.init(allocator, .{
+ var test_harness = TestSetup.init(.{
.allocator = allocator,
.server_response = @embedFile("test_ec2_query_no_input.response"),
- .server_response_headers = @constCast(&[_][2][]const u8{
- .{ "Content-Type", "text/xml;charset=UTF-8" },
- .{ "x-amzn-RequestId", "4cdbdd69-800c-49b5-8474-ae4c17709782" },
- }),
+ .server_response_headers = &.{
+ .{ .name = "Content-Type", .value = "text/xml;charset=UTF-8" },
+ .{ .name = "x-amzn-RequestId", .value = "4cdbdd69-800c-49b5-8474-ae4c17709782" },
+ },
.server_response_transfer_encoding = .chunked,
});
defer test_harness.deinit();
@@ -1913,13 +1900,13 @@ test "ec2_query_no_input: EC2 describe regions" {
}
test "ec2_query_with_input: EC2 describe instances" {
const allocator = std.testing.allocator;
- var test_harness = TestSetup.init(allocator, .{
+ var test_harness = TestSetup.init(.{
.allocator = allocator,
.server_response = @embedFile("test_ec2_query_with_input.response"),
- .server_response_headers = @constCast(&[_][2][]const u8{
- .{ "Content-Type", "text/xml;charset=UTF-8" },
- .{ "x-amzn-RequestId", "150a14cc-785d-476f-a4c9-2aa4d03b14e2" },
- }),
+ .server_response_headers = &.{
+ .{ .name = "Content-Type", .value = "text/xml;charset=UTF-8" },
+ .{ .name = "x-amzn-RequestId", .value = "150a14cc-785d-476f-a4c9-2aa4d03b14e2" },
+ },
});
defer test_harness.deinit();
const options = try test_harness.start();
@@ -1943,15 +1930,15 @@ test "ec2_query_with_input: EC2 describe instances" {
}
test "rest_xml_no_input: S3 list buckets" {
const allocator = std.testing.allocator;
- var test_harness = TestSetup.init(allocator, .{
+ var test_harness = TestSetup.init(.{
.allocator = allocator,
.server_response =
\\3367189aa775bd98da38e55093705f2051443c1e775fc0971d6d77387a47c8d0emilerch+sub1550620852718-backup2020-06-17T16:26:51.000Zamplify-letmework-staging-185741-deployment2023-03-10T18:57:49.000Zaws-cloudtrail-logs-550620852718-224022a72021-06-21T18:32:44.000Zaws-sam-cli-managed-default-samclisourcebucket-1gy0z00mj47xe2021-10-05T16:38:07.000Zawsomeprojectstack-pipelineartifactsbucketaea9a05-1uzwo6c86ecr2021-10-05T22:55:09.000Zcdk-hnb659fds-assets-550620852718-us-west-22023-02-28T21:49:36.000Zcf-templates-12iy6putgdxtk-us-west-22020-06-26T02:31:59.000Zcodepipeline-us-west-2-467140836372021-09-14T18:43:07.000Zelasticbeanstalk-us-west-2-5506208527182022-04-15T16:22:42.000Zlobo-west2021-06-21T17:17:22.000Zlobo-west-22021-11-19T20:12:31.000Zlogging-backup-550620852718-us-east-22022-05-29T21:55:16.000Zmysfitszj3t6webstack-hostingbucketa91a61fe-1ep3ezkgwpxr02023-03-01T04:53:55.000Z
,
- .server_response_headers = @constCast(&[_][2][]const u8{
- .{ "Content-Type", "application/xml" },
- .{ "x-amzn-RequestId", "9PEYBAZ9J7TPRX43" },
- }),
+ .server_response_headers = &.{
+ .{ .name = "Content-Type", .value = "application/xml" },
+ .{ .name = "x-amzn-RequestId", .value = "9PEYBAZ9J7TPRX43" },
+ },
});
defer test_harness.deinit();
const options = try test_harness.start();
@@ -1974,15 +1961,15 @@ test "rest_xml_no_input: S3 list buckets" {
}
test "rest_xml_anything_but_s3: CloudFront list key groups" {
const allocator = std.testing.allocator;
- var test_harness = TestSetup.init(allocator, .{
+ var test_harness = TestSetup.init(.{
.allocator = allocator,
.server_response =
\\{"Items":null,"MaxItems":100,"NextMarker":null,"Quantity":0}
,
- .server_response_headers = @constCast(&[_][2][]const u8{
- .{ "Content-Type", "application/json" },
- .{ "x-amzn-RequestId", "d3382082-5291-47a9-876b-8df3accbb7ea" },
- }),
+ .server_response_headers = &.{
+ .{ .name = "Content-Type", .value = "application/json" },
+ .{ .name = "x-amzn-RequestId", .value = "d3382082-5291-47a9-876b-8df3accbb7ea" },
+ },
});
defer test_harness.deinit();
const options = try test_harness.start();
@@ -2000,16 +1987,16 @@ test "rest_xml_anything_but_s3: CloudFront list key groups" {
}
test "rest_xml_with_input: S3 put object" {
const allocator = std.testing.allocator;
- var test_harness = TestSetup.init(allocator, .{
+ var test_harness = TestSetup.init(.{
.allocator = allocator,
.server_response = "",
- .server_response_headers = @constCast(&[_][2][]const u8{
+ .server_response_headers = &.{
// .{ "Content-Type", "application/xml" },
- .{ "x-amzn-RequestId", "9PEYBAZ9J7TPRX43" },
- .{ "x-amz-id-2", "jdRDo30t7Ge9lf6F+4WYpg+YKui8z0mz2+rwinL38xDZzvloJqrmpCAiKG375OSvHA9OBykJS44=" },
- .{ "x-amz-server-side-encryption", "AES256" },
- .{ "ETag", "37b51d194a7513e45b56f6524f2d51f2" },
- }),
+ .{ .name = "x-amzn-RequestId", .value = "9PEYBAZ9J7TPRX43" },
+ .{ .name = "x-amz-id-2", .value = "jdRDo30t7Ge9lf6F+4WYpg+YKui8z0mz2+rwinL38xDZzvloJqrmpCAiKG375OSvHA9OBykJS44=" },
+ .{ .name = "x-amz-server-side-encryption", .value = "AES256" },
+ .{ .name = "ETag", .value = "37b51d194a7513e45b56f6524f2d51f2" },
+ },
});
defer test_harness.deinit();
const options = try test_harness.start();
@@ -2018,7 +2005,6 @@ test "rest_xml_with_input: S3 put object" {
.client = options.client,
.signing_time = TestSetup.signing_time,
};
- // std.testing.log_level = .debug;
const result = try Request(services.s3.put_object).call(.{
.bucket = "mysfitszj3t6webstack-hostingbucketa91a61fe-1ep3ezkgwpxr0",
.key = "i/am/a/teapot/foo",
@@ -2026,7 +2012,7 @@ test "rest_xml_with_input: S3 put object" {
.body = "bar",
.storage_class = "STANDARD",
}, s3opts);
- for (test_harness.request_options.request_headers.list.items) |header| {
+ for (test_harness.request_options.request_headers) |header| {
std.log.info("Request header: {s}: {s}", .{ header.name, header.value });
}
std.log.info("PutObject Request id: {s}", .{result.response_metadata.request_id});
diff --git a/src/aws_credentials.zig b/src/aws_credentials.zig
index 166fe88..96f0037 100644
--- a/src/aws_credentials.zig
+++ b/src/aws_credentials.zig
@@ -122,29 +122,22 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
const container_uri = try std.fmt.allocPrint(allocator, "http://169.254.170.2{s}", .{container_relative_uri});
defer allocator.free(container_uri);
- var empty_headers = std.http.Headers.init(allocator);
- defer empty_headers.deinit();
var cl = std.http.Client{ .allocator = allocator };
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
- var req = try cl.request(.GET, try std.Uri.parse(container_uri), empty_headers, .{});
- defer req.deinit();
- try req.start();
- try req.wait();
- if (req.response.status != .ok and req.response.status != .not_found) {
- log.warn("Bad status code received from container credentials endpoint: {}", .{@intFromEnum(req.response.status)});
+ var resp_payload = std.ArrayList(u8).init(allocator);
+ defer resp_payload.deinit();
+ const req = try cl.fetch(.{
+ .location = .{ .url = container_uri },
+ .response_storage = .{ .dynamic = &resp_payload },
+ });
+ if (req.status != .ok and req.status != .not_found) {
+ log.warn("Bad status code received from container credentials endpoint: {}", .{@intFromEnum(req.status)});
return null;
}
- if (req.response.status == .not_found) return null;
- if (req.response.content_length == null or req.response.content_length.? == 0) return null;
+ if (req.status == .not_found) return null;
- var resp_payload = try std.ArrayList(u8).initCapacity(allocator, @intCast(req.response.content_length.?));
- defer resp_payload.deinit();
- try resp_payload.resize(@intCast(req.response.content_length.?));
- const response_data = try resp_payload.toOwnedSlice();
- defer allocator.free(response_data);
- _ = try req.readAll(response_data);
- log.debug("Read {d} bytes from container credentials endpoint", .{response_data.len});
- if (response_data.len == 0) return null;
+ log.debug("Read {d} bytes from container credentials endpoint", .{resp_payload.items.len});
+ if (resp_payload.items.len == 0) return null;
const CredsResponse = struct {
AccessKeyId: []const u8,
@@ -154,8 +147,8 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
Token: []const u8,
};
const creds_response = blk: {
- const res = std.json.parseFromSlice(CredsResponse, allocator, response_data, .{}) catch |e| {
- log.err("Unexpected Json response from container credentials endpoint: {s}", .{response_data});
+ const res = std.json.parseFromSlice(CredsResponse, allocator, resp_payload.items, .{}) catch |e| {
+ log.err("Unexpected Json response from container credentials endpoint: {s}", .{resp_payload.items});
log.err("Error parsing json: {}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
@@ -182,28 +175,27 @@ fn getImdsv2Credentials(allocator: std.mem.Allocator) !?auth.Credentials {
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
// Get token
{
- var headers = std.http.Headers.init(allocator);
- defer headers.deinit();
- try headers.append("X-aws-ec2-metadata-token-ttl-seconds", "21600");
- var req = try cl.request(.PUT, try std.Uri.parse("http://169.254.169.254/latest/api/token"), headers, .{});
- defer req.deinit();
- try req.start();
- try req.wait();
- if (req.response.status != .ok) {
- log.warn("Bad status code received from IMDS v2: {}", .{@intFromEnum(req.response.status)});
+ var resp_payload = std.ArrayList(u8).init(allocator);
+ defer resp_payload.deinit();
+ const req = try cl.fetch(.{
+ .method = .PUT,
+ .location = .{ .url = "http://169.254.169.254/latest/api/token" },
+ .extra_headers = &[_]std.http.Header{
+ .{ .name = "X-aws-ec2-metadata-token-ttl-seconds", .value = "21600" },
+ },
+ .response_storage = .{ .dynamic = &resp_payload },
+ });
+ if (req.status != .ok) {
+ log.warn("Bad status code received from IMDS v2: {}", .{@intFromEnum(req.status)});
return null;
}
- if (req.response.content_length == null or req.response.content_length == 0) {
+ if (resp_payload.items.len == 0) {
log.warn("Unexpected zero response from IMDS v2", .{});
return null;
}
- var resp_payload = try std.ArrayList(u8).initCapacity(allocator, @intCast(req.response.content_length.?));
- defer resp_payload.deinit();
- try resp_payload.resize(@intCast(req.response.content_length.?));
token = try resp_payload.toOwnedSlice();
errdefer if (token) |t| allocator.free(t);
- _ = try req.readAll(token.?);
}
std.debug.assert(token != null);
log.debug("Got token from IMDSv2: {s}", .{token.?});
@@ -224,28 +216,26 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
// "InstanceProfileArn" : "arn:aws:iam::550620852718:instance-profile/ec2-dev",
// "InstanceProfileId" : "AIPAYAM4POHXCFNKZ7HU2"
// }
- var headers = std.http.Headers.init(allocator);
- defer headers.deinit();
- try headers.append("X-aws-ec2-metadata-token", imds_token);
+ var resp_payload = std.ArrayList(u8).init(allocator);
+ defer resp_payload.deinit();
+ const req = try client.fetch(.{
+ .method = .GET,
+ .location = .{ .url = "http://169.254.169.254/latest/meta-data/iam/info" },
+ .extra_headers = &[_]std.http.Header{
+ .{ .name = "X-aws-ec2-metadata-token", .value = imds_token },
+ },
+ .response_storage = .{ .dynamic = &resp_payload },
+ });
- var req = try client.request(.GET, try std.Uri.parse("http://169.254.169.254/latest/meta-data/iam/info"), headers, .{});
- defer req.deinit();
-
- try req.start();
- try req.wait();
-
- if (req.response.status != .ok and req.response.status != .not_found) {
- log.warn("Bad status code received from IMDS iam endpoint: {}", .{@intFromEnum(req.response.status)});
+ if (req.status != .ok and req.status != .not_found) {
+ log.warn("Bad status code received from IMDS iam endpoint: {}", .{@intFromEnum(req.status)});
return null;
}
- if (req.response.status == .not_found) return null;
- if (req.response.content_length == null or req.response.content_length.? == 0) {
+ if (req.status == .not_found) return null;
+ if (resp_payload.items.len == 0) {
log.warn("Unexpected empty response from IMDS endpoint post token", .{});
return null;
}
- const resp = try allocator.alloc(u8, @intCast(req.response.content_length.?));
- defer allocator.free(resp);
- _ = try req.readAll(resp);
const ImdsResponse = struct {
Code: []const u8,
@@ -253,8 +243,8 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
InstanceProfileArn: []const u8,
InstanceProfileId: []const u8,
};
- const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, resp, .{}) catch |e| {
- log.err("Unexpected Json response from IMDS endpoint: {s}", .{resp});
+ const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, resp_payload.items, .{}) catch |e| {
+ log.err("Unexpected Json response from IMDS endpoint: {s}", .{resp_payload.items});
log.err("Error parsing json: {}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
@@ -274,31 +264,28 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
/// Note - this internal function assumes zfetch is initialized prior to use
fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, role_name: []const u8, imds_token: []u8) !?auth.Credentials {
- var headers = std.http.Headers.init(allocator);
- defer headers.deinit();
- try headers.append("X-aws-ec2-metadata-token", imds_token);
-
const url = try std.fmt.allocPrint(allocator, "http://169.254.169.254/latest/meta-data/iam/security-credentials/{s}/", .{role_name});
defer allocator.free(url);
+ var resp_payload = std.ArrayList(u8).init(allocator);
+ defer resp_payload.deinit();
+ const req = try client.fetch(.{
+ .method = .GET,
+ .location = .{ .url = url },
+ .extra_headers = &[_]std.http.Header{
+ .{ .name = "X-aws-ec2-metadata-token", .value = imds_token },
+ },
+ .response_storage = .{ .dynamic = &resp_payload },
+ });
- var req = try client.request(.GET, try std.Uri.parse(url), headers, .{});
- defer req.deinit();
-
- try req.start();
- try req.wait();
-
- if (req.response.status != .ok and req.response.status != .not_found) {
- log.warn("Bad status code received from IMDS role endpoint: {}", .{@intFromEnum(req.response.status)});
+ if (req.status != .ok and req.status != .not_found) {
+ log.warn("Bad status code received from IMDS role endpoint: {}", .{@intFromEnum(req.status)});
return null;
}
- if (req.response.status == .not_found) return null;
- if (req.response.content_length == null or req.response.content_length.? == 0) {
+ if (req.status == .not_found) return null;
+ if (resp_payload.items.len == 0) {
log.warn("Unexpected empty response from IMDS role endpoint", .{});
return null;
}
- const resp = try allocator.alloc(u8, @intCast(req.response.content_length.?));
- defer allocator.free(resp);
- _ = try req.readAll(resp);
// log.debug("Read {d} bytes from imds v2 credentials endpoint", .{read});
const ImdsResponse = struct {
@@ -310,8 +297,8 @@ fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, ro
Token: []const u8,
Expiration: []const u8,
};
- const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, resp, .{}) catch |e| {
- log.err("Unexpected Json response from IMDS endpoint: {s}", .{resp});
+ const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, resp_payload.items, .{}) catch |e| {
+ log.err("Unexpected Json response from IMDS endpoint: {s}", .{resp_payload.items});
log.err("Error parsing json: {}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
diff --git a/src/aws_http.zig b/src/aws_http.zig
index 31e84eb..c3a4369 100644
--- a/src/aws_http.zig
+++ b/src/aws_http.zig
@@ -44,7 +44,7 @@ pub const Options = struct {
signing_time: ?i64 = null,
};
-pub const Header = base.Header;
+pub const Header = std.http.Header;
pub const HttpRequest = base.Request;
pub const HttpResult = base.Result;
@@ -64,11 +64,11 @@ const EndPoint = struct {
};
pub const AwsHttp = struct {
allocator: std.mem.Allocator,
- proxy: ?std.http.Client.HttpProxy,
+ proxy: ?std.http.Client.Proxy,
const Self = @This();
- pub fn init(allocator: std.mem.Allocator, proxy: ?std.http.Client.HttpProxy) Self {
+ pub fn init(allocator: std.mem.Allocator, proxy: ?std.http.Client.Proxy) Self {
return Self{
.allocator = allocator,
.proxy = proxy,
@@ -149,7 +149,7 @@ pub const AwsHttp = struct {
// We will use endpoint instead
request_cp.path = endpoint.path;
- var request_headers = std.ArrayList(base.Header).init(self.allocator);
+ var request_headers = std.ArrayList(std.http.Header).init(self.allocator);
defer request_headers.deinit();
const len = try addHeaders(self.allocator, &request_headers, endpoint.host, request_cp.body, request_cp.content_type, request_cp.headers);
@@ -163,108 +163,75 @@ pub const AwsHttp = struct {
}
}
- var headers = std.http.Headers.init(self.allocator);
+ var headers = std.ArrayList(std.http.Header).init(self.allocator);
defer headers.deinit();
for (request_cp.headers) |header|
- try headers.append(header.name, header.value);
+ try headers.append(.{ .name = header.name, .value = header.value });
log.debug("All Request Headers:", .{});
- for (headers.list.items) |h| {
+ for (headers.items) |h| {
log.debug("\t{s}: {s}", .{ h.name, h.value });
}
const url = try std.fmt.allocPrint(self.allocator, "{s}{s}{s}", .{ endpoint.uri, request_cp.path, request_cp.query });
defer self.allocator.free(url);
log.debug("Request url: {s}", .{url});
- var cl = std.http.Client{ .allocator = self.allocator, .proxy = self.proxy };
+ // TODO: Fix this proxy stuff. This is all a kludge just to compile, but std.http.Client has it all built in now
+ var cl = std.http.Client{ .allocator = self.allocator, .https_proxy = if (self.proxy) |*p| @constCast(p) else null };
defer cl.deinit(); // TODO: Connection pooling
- //
- // var req = try zfetch.Request.init(self.allocator, url, self.trust_chain);
- // defer req.deinit();
const method = std.meta.stringToEnum(std.http.Method, request_cp.method).?;
- // std.Uri has a format function here that is used by start() (below)
- // to escape the string we're about to send. But we don't want that...
- // we need the control, because the signing above relies on the url above.
- // We can't seem to have our cake and eat it too, because we need escaped
- // ':' characters, but if we escape them, we'll get them double encoded.
- // If we don't escape them, they won't get encoded at all. I believe the
- // only answer may be to copy the Request.start function from the
- // standard library and tweak the print statements such that they don't
- // escape (but do still handle full uri (in proxy) vs path only (normal)
+ var server_header_buffer: [16 * 1024]u8 = undefined;
+ var resp_payload = std.ArrayList(u8).init(self.allocator);
+ defer resp_payload.deinit();
+ const req = try cl.fetch(.{
+ .server_header_buffer = &server_header_buffer,
+ .method = method,
+ .payload = if (request_cp.body.len > 0) request_cp.body else null,
+ .response_storage = .{ .dynamic = &resp_payload },
+ .raw_uri = true,
+ .location = .{ .url = url },
+ .extra_headers = headers.items,
+ });
+ // TODO: Need to test for payloads > 2^14. I believe one of our tests does this, but not sure
+ // if (request_cp.body.len > 0) {
+ // // Workaround for https://github.com/ziglang/zig/issues/15626
+ // const max_bytes: usize = 1 << 14;
+ // var inx: usize = 0;
+ // while (request_cp.body.len > inx) {
+ // try req.writeAll(request_cp.body[inx..@min(request_cp.body.len, inx + max_bytes)]);
+ // inx += max_bytes;
+ // }
//
- // Bug report filed here:
- // https://github.com/ziglang/zig/issues/17015
- //
- // https://github.com/ziglang/zig/blob/0.11.0/lib/std/http/Client.zig#L538-L636
- //
- // Look at lines 551 and 553:
- // https://github.com/ziglang/zig/blob/0.11.0/lib/std/http/Client.zig#L551
- //
- // This ends up executing the format function here:
- // https://github.com/ziglang/zig/blob/0.11.0/lib/std/http/Client.zig#L551
- //
- // Which is basically the what we want, without the escaping on lines
- // 249, 254, and 260:
- // https://github.com/ziglang/zig/blob/0.11.0/lib/std/Uri.zig#L249
- //
- // const unescaped_url = try std.Uri.unescapeString(self.allocator, url);
- // defer self.allocator.free(unescaped_url);
- var req = try cl.request(method, try std.Uri.parse(url), headers, .{});
- defer req.deinit();
- if (request_cp.body.len > 0)
- req.transfer_encoding = .{ .content_length = request_cp.body.len };
- try @import("http_client_17015_issue.zig").start(&req);
- // try req.start();
- if (request_cp.body.len > 0) {
- // Workaround for https://github.com/ziglang/zig/issues/15626
- const max_bytes: usize = 1 << 14;
- var inx: usize = 0;
- while (request_cp.body.len > inx) {
- try req.writeAll(request_cp.body[inx..@min(request_cp.body.len, inx + max_bytes)]);
- inx += max_bytes;
- }
-
- try req.finish();
- }
- try req.wait();
+ // try req.finish();
+ // }
+ // try req.wait();
// TODO: Timeout - is this now above us?
log.debug(
"Request Complete. Response code {d}: {?s}",
- .{ @intFromEnum(req.response.status), req.response.status.phrase() },
+ .{ @intFromEnum(req.status), req.status.phrase() },
);
log.debug("Response headers:", .{});
- var resp_headers = try std.ArrayList(Header).initCapacity(
+ var resp_headers = std.ArrayList(Header).init(
self.allocator,
- req.response.headers.list.items.len,
);
defer resp_headers.deinit();
- var content_length: usize = 0;
- for (req.response.headers.list.items) |h| {
+ var it = std.http.HeaderIterator.init(server_header_buffer[0..]);
+ while (it.next()) |h| { // even though we don't expect to fill the buffer,
+ // we don't get a length, but looks via stdlib source
+ // it should be ok to call next on the undefined memory
log.debug(" {s}: {s}", .{ h.name, h.value });
- resp_headers.appendAssumeCapacity(.{
+ try resp_headers.append(.{
.name = try (self.allocator.dupe(u8, h.name)),
.value = try (self.allocator.dupe(u8, h.value)),
});
- if (content_length == 0 and std.ascii.eqlIgnoreCase("content-length", h.name))
- content_length = std.fmt.parseInt(usize, h.value, 10) catch 0;
}
- var response_data: []u8 =
- if (req.response.transfer_encoding) |_| // the only value here is "chunked"
- try req.reader().readAllAlloc(self.allocator, std.math.maxInt(usize))
- else blk: {
- // content length
- const tmp_data = try self.allocator.alloc(u8, content_length);
- errdefer self.allocator.free(tmp_data);
- _ = try req.readAll(tmp_data);
- break :blk tmp_data;
- };
- log.debug("raw response body:\n{s}", .{response_data});
+ log.debug("raw response body:\n{s}", .{resp_payload.items});
const rc = HttpResult{
- .response_code = @intFromEnum(req.response.status),
- .body = response_data,
+ .response_code = @intFromEnum(req.status),
+ .body = try resp_payload.toOwnedSlice(),
.headers = try resp_headers.toOwnedSlice(),
.allocator = self.allocator,
};
@@ -277,7 +244,16 @@ fn getRegion(service: []const u8, region: []const u8) []const u8 {
return region;
}
-fn addHeaders(allocator: std.mem.Allocator, headers: *std.ArrayList(base.Header), host: []const u8, body: []const u8, content_type: []const u8, additional_headers: []Header) !?[]const u8 {
+fn addHeaders(allocator: std.mem.Allocator, headers: *std.ArrayList(std.http.Header), host: []const u8, body: []const u8, content_type: []const u8, additional_headers: []const Header) !?[]const u8 {
+ // We don't need allocator and body because they were to add a
+ // Content-Length header. But that is being added by the client send()
+ // function, so we don't want it on the request twice. But I also feel
+ // pretty strongly that send() should be providing us control, because
+ // I think if we don't add it here, it won't get signed, and we would
+ // really prefer it to be signed. So, we will wait and watch for this
+ // situation to change in stdlib
+ _ = allocator;
+ _ = body;
var has_content_type = false;
for (additional_headers) |h| {
if (std.ascii.eqlIgnoreCase(h.name, "Content-Type")) {
@@ -291,11 +267,6 @@ fn addHeaders(allocator: std.mem.Allocator, headers: *std.ArrayList(base.Header)
if (!has_content_type)
try headers.append(.{ .name = "Content-Type", .value = content_type });
try headers.appendSlice(additional_headers);
- if (body.len > 0) {
- const len = try std.fmt.allocPrint(allocator, "{d}", .{body.len});
- try headers.append(.{ .name = "Content-Length", .value = len });
- return len;
- }
return null;
}
diff --git a/src/aws_http_base.zig b/src/aws_http_base.zig
index 5b05cf5..eb05a59 100644
--- a/src/aws_http_base.zig
+++ b/src/aws_http_base.zig
@@ -7,12 +7,12 @@ pub const Request = struct {
body: []const u8 = "",
method: []const u8 = "POST",
content_type: []const u8 = "application/json", // Can we get away with this?
- headers: []Header = &[_]Header{},
+ headers: []const std.http.Header = &.{},
};
pub const Result = struct {
response_code: u16, // actually 3 digits can fit in u10
body: []const u8,
- headers: []Header,
+ headers: []const std.http.Header,
allocator: std.mem.Allocator,
pub fn deinit(self: Result) void {
@@ -26,8 +26,3 @@ pub const Result = struct {
return;
}
};
-
-pub const Header = struct {
- name: []const u8,
- value: []const u8,
-};
diff --git a/src/aws_signing.zig b/src/aws_signing.zig
index e7bfe95..96b25ad 100644
--- a/src/aws_signing.zig
+++ b/src/aws_signing.zig
@@ -169,19 +169,19 @@ pub fn signRequest(allocator: std.mem.Allocator, request: base.Request, config:
additional_header_count += 1;
if (config.signed_body_header == .none)
additional_header_count -= 1;
- const newheaders = try allocator.alloc(base.Header, rc.headers.len + additional_header_count);
+ const newheaders = try allocator.alloc(std.http.Header, rc.headers.len + additional_header_count);
errdefer allocator.free(newheaders);
const oldheaders = rc.headers;
if (config.credentials.session_token) |t| {
- newheaders[newheaders.len - additional_header_count] = base.Header{
+ newheaders[newheaders.len - additional_header_count] = std.http.Header{
.name = "X-Amz-Security-Token",
.value = try allocator.dupe(u8, t),
};
additional_header_count -= 1;
}
errdefer freeSignedRequest(allocator, &rc, config);
- std.mem.copy(base.Header, newheaders, oldheaders);
- newheaders[newheaders.len - additional_header_count] = base.Header{
+ @memcpy(newheaders[0..oldheaders.len], oldheaders);
+ newheaders[newheaders.len - additional_header_count] = std.http.Header{
.name = "X-Amz-Date",
.value = signing_iso8601,
};
@@ -200,7 +200,7 @@ pub fn signRequest(allocator: std.mem.Allocator, request: base.Request, config:
// may not add this header
// This will be freed in freeSignedRequest
// defer allocator.free(payload_hash);
- newheaders[newheaders.len - additional_header_count] = base.Header{
+ newheaders[newheaders.len - additional_header_count] = std.http.Header{
.name = "x-amz-content-sha256",
.value = payload_hash,
};
@@ -259,7 +259,7 @@ pub fn signRequest(allocator: std.mem.Allocator, request: base.Request, config:
const signature = try hmac(allocator, signing_key, string_to_sign);
defer allocator.free(signature);
- newheaders[newheaders.len - 1] = base.Header{
+ newheaders[newheaders.len - 1] = std.http.Header{
.name = "Authorization",
.value = try std.fmt.allocPrint(
allocator,
@@ -299,27 +299,51 @@ pub fn freeSignedRequest(allocator: std.mem.Allocator, request: *base.Request, c
pub const credentialsFn = *const fn ([]const u8) ?Credentials;
-pub fn verifyServerRequest(allocator: std.mem.Allocator, request: std.http.Server.Request, request_body_reader: anytype, credentials_fn: credentialsFn) !bool {
- const unverified_request = UnverifiedRequest{
- .headers = request.headers,
- .target = request.target,
- .method = request.method,
- };
+pub fn verifyServerRequest(allocator: std.mem.Allocator, request: *std.http.Server.Request, request_body_reader: anytype, credentials_fn: credentialsFn) !bool {
+ var unverified_request = try UnverifiedRequest.init(allocator, request);
+ defer unverified_request.deinit();
return verify(allocator, unverified_request, request_body_reader, credentials_fn);
}
pub const UnverifiedRequest = struct {
- headers: std.http.Headers,
+ headers: []std.http.Header,
target: []const u8,
method: std.http.Method,
+ allocator: std.mem.Allocator,
+
+ pub fn init(allocator: std.mem.Allocator, request: *std.http.Server.Request) !UnverifiedRequest {
+ var al = std.ArrayList(std.http.Header).init(allocator);
+ defer al.deinit();
+ var it = request.iterateHeaders();
+ while (it.next()) |h| try al.append(h);
+ return .{
+ .target = request.head.target,
+ .method = request.head.method,
+ .headers = try al.toOwnedSlice(),
+ .allocator = allocator,
+ };
+ }
+
+ pub fn getFirstHeaderValue(self: UnverifiedRequest, name: []const u8) ?[]const u8 {
+ for (self.headers) |*h| {
+ if (std.ascii.eqlIgnoreCase(name, h.name))
+ return h.value; // I don't think this is the whole story here, but should suffice for now
+ // We need to return the value before the first ';' IIRC
+ }
+ return null;
+ }
+
+ pub fn deinit(self: *UnverifiedRequest) void {
+ self.allocator.free(self.headers);
+ }
};
pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_body_reader: anytype, credentials_fn: credentialsFn) !bool {
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
- var aa = arena.allocator();
+ const aa = arena.allocator();
// Authorization: AWS4-HMAC-SHA256 Credential=ACCESS/20230908/us-west-2/s3/aws4_request, SignedHeaders=accept;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class, Signature=fcc43ce73a34c9bd1ddf17e8a435f46a859812822f944f9eeb2aabcd64b03523
- const auth_header_or_null = request.headers.getFirstValue("Authorization");
+ const auth_header_or_null = request.getFirstHeaderValue("Authorization");
const auth_header = if (auth_header_or_null) |a| a else return error.AuthorizationHeaderMissing;
if (!std.mem.startsWith(u8, auth_header, "AWS4-HMAC-SHA256")) return error.UnsupportedAuthorizationType;
var credential: ?[]const u8 = null;
@@ -373,8 +397,8 @@ fn verifyParsedAuthorization(
const credentials = credentials_fn(access_key) orelse return error.CredentialsNotFound;
// TODO: https://stackoverflow.com/questions/29276609/aws-authentication-requires-a-valid-date-or-x-amz-date-header-curl
// For now I want to see this test pass
- const normalized_iso_date = request.headers.getFirstValue("x-amz-date") orelse
- request.headers.getFirstValue("Date").?;
+ const normalized_iso_date = request.getFirstHeaderValue("x-amz-date") orelse
+ request.getFirstHeaderValue("Date").?;
log.debug("Got date: {s}", .{normalized_iso_date});
_ = credential_iterator.next().?; // skip the date...I don't think we need this
const region = credential_iterator.next().?;
@@ -392,7 +416,7 @@ fn verifyParsedAuthorization(
.signing_time = try date.dateTimeToTimestamp(try date.parseIso8601ToDateTime(normalized_iso_date)),
};
- var headers = try allocator.alloc(base.Header, std.mem.count(u8, signed_headers, ";") + 1);
+ var headers = try allocator.alloc(std.http.Header, std.mem.count(u8, signed_headers, ";") + 1);
defer allocator.free(headers);
var signed_headers_iterator = std.mem.splitSequence(u8, signed_headers, ";");
var inx: usize = 0;
@@ -409,7 +433,7 @@ fn verifyParsedAuthorization(
if (is_forbidden) continue;
headers[inx] = .{
.name = signed_header,
- .value = request.headers.getFirstValue(signed_header).?,
+ .value = request.getFirstHeaderValue(signed_header).?,
};
inx += 1;
}
@@ -418,7 +442,7 @@ fn verifyParsedAuthorization(
.path = target_iterator.first(),
.headers = headers[0..inx],
.method = @tagName(request.method),
- .content_type = request.headers.getFirstValue("content-type").?,
+ .content_type = request.getFirstHeaderValue("content-type").?,
};
signed_request.query = request.target[signed_request.path.len..]; // TODO: should this be +1? query here would include '?'
signed_request.body = try request_body_reader.readAllAlloc(allocator, std.math.maxInt(usize));
@@ -780,7 +804,7 @@ const CanonicalHeaders = struct {
str: []const u8,
signed_headers: []const u8,
};
-fn canonicalHeaders(allocator: std.mem.Allocator, headers: []base.Header, service: []const u8) !CanonicalHeaders {
+fn canonicalHeaders(allocator: std.mem.Allocator, headers: []const std.http.Header, service: []const u8) !CanonicalHeaders {
//
// Doc example. Original:
//
@@ -796,7 +820,7 @@ fn canonicalHeaders(allocator: std.mem.Allocator, headers: []base.Header, servic
// my-header1:a b c\n
// my-header2:"a b c"\n
// x-amz-date:20150830T123600Z\n
- var dest = try std.ArrayList(base.Header).initCapacity(allocator, headers.len);
+ var dest = try std.ArrayList(std.http.Header).initCapacity(allocator, headers.len);
defer {
for (dest.items) |h| {
allocator.free(h.name);
@@ -835,7 +859,7 @@ fn canonicalHeaders(allocator: std.mem.Allocator, headers: []base.Header, servic
try dest.append(.{ .name = n, .value = v });
}
- std.sort.pdq(base.Header, dest.items, {}, lessThan);
+ std.sort.pdq(std.http.Header, dest.items, {}, lessThan);
var dest_str = try std.ArrayList(u8).initCapacity(allocator, total_len);
defer dest_str.deinit();
@@ -883,7 +907,7 @@ fn canonicalHeaderValue(allocator: std.mem.Allocator, value: []const u8) ![]cons
_ = allocator.resize(rc, rc_inx);
return rc[0..rc_inx];
}
-fn lessThan(context: void, lhs: base.Header, rhs: base.Header) bool {
+fn lessThan(context: void, lhs: std.http.Header, rhs: std.http.Header) bool {
_ = context;
return std.ascii.lessThanIgnoreCase(lhs.name, rhs.name);
}
@@ -935,7 +959,7 @@ test "canonical query" {
}
test "canonical headers" {
const allocator = std.testing.allocator;
- var headers = try std.ArrayList(base.Header).initCapacity(allocator, 5);
+ var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5);
defer headers.deinit();
try headers.append(.{ .name = "Host", .value = "iam.amazonaws.com" });
try headers.append(.{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" });
@@ -960,7 +984,7 @@ test "canonical headers" {
test "canonical request" {
const allocator = std.testing.allocator;
- var headers = try std.ArrayList(base.Header).initCapacity(allocator, 5);
+ var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5);
defer headers.deinit();
try headers.append(.{ .name = "User-agent", .value = "c sdk v1.0" });
// In contrast to AWS CRT (aws-c-auth), we add the date as part of the
@@ -1020,7 +1044,7 @@ test "can sign" {
// [debug] (awshttp): Content-Length: 43
const allocator = std.testing.allocator;
- var headers = try std.ArrayList(base.Header).initCapacity(allocator, 5);
+ var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5);
defer headers.deinit();
try headers.append(.{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" });
try headers.append(.{ .name = "Content-Length", .value = "13" });
@@ -1077,34 +1101,39 @@ test "can verify server request" {
test_credential = Credentials.init(allocator, access_key, secret_key, null);
defer test_credential.?.deinit();
- var headers = std.http.Headers.init(allocator);
- defer headers.deinit();
- try headers.append("Connection", "keep-alive");
- try headers.append("Accept-Encoding", "gzip, deflate, zstd");
- try headers.append("TE", "gzip, deflate, trailers");
- try headers.append("Accept", "application/json");
- try headers.append("Host", "127.0.0.1");
- try headers.append("User-Agent", "zig-aws 1.0");
- try headers.append("Content-Type", "text/plain");
- try headers.append("x-amz-storage-class", "STANDARD");
- try headers.append("Content-Length", "3");
- try headers.append("X-Amz-Date", "20230908T170252Z");
- try headers.append("x-amz-content-sha256", "fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9");
- try headers.append("Authorization", "AWS4-HMAC-SHA256 Credential=ACCESS/20230908/us-west-2/s3/aws4_request, SignedHeaders=accept;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class, Signature=fcc43ce73a34c9bd1ddf17e8a435f46a859812822f944f9eeb2aabcd64b03523");
-
- var buf = "bar".*;
- var fis = std.io.fixedBufferStream(&buf);
- const request = std.http.Server.Request{
- .method = std.http.Method.PUT,
- .target = "/mysfitszj3t6webstack-hostingbucketa91a61fe-1ep3ezkgwpxr0/i/am/a/teapot/foo?x-id=PutObject",
- .version = .@"HTTP/1.1",
- .content_length = 3,
- .headers = headers,
- .parser = std.http.protocol.HeadersParser.initDynamic(std.math.maxInt(usize)),
+ const req =
+ "PUT /mysfitszj3t6webstack-hostingbucketa91a61fe-1ep3ezkgwpxr0/i/am/a/teapot/foo?x-id=PutObject HTTP/1.1\r\n" ++
+ "Connection: keep-alive\r\n" ++
+ "Accept-Encoding: gzip, deflate, zstd\r\n" ++
+ "TE: gzip, deflate, trailers\r\n" ++
+ "Accept: application/json\r\n" ++
+ "Host: 127.0.0.1\r\n" ++
+ "User-Agent: zig-aws 1.0\r\n" ++
+ "Content-Type: text/plain\r\n" ++
+ "x-amz-storage-class: STANDARD\r\n" ++
+ "Content-Length: 3\r\n" ++
+ "X-Amz-Date: 20230908T170252Z\r\n" ++
+ "x-amz-content-sha256: fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9\r\n" ++
+ "Authorization: AWS4-HMAC-SHA256 Credential=ACCESS/20230908/us-west-2/s3/aws4_request, SignedHeaders=accept;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class, Signature=fcc43ce73a34c9bd1ddf17e8a435f46a859812822f944f9eeb2aabcd64b03523\r\n\r\nbar";
+ var read_buffer: [1024]u8 = undefined;
+ @memcpy(read_buffer[0..req.len], req);
+ var server: std.http.Server = .{
+ .connection = undefined,
+ .state = .ready,
+ .read_buffer = &read_buffer,
+ .read_buffer_len = req.len,
+ .next_request_start = 0,
+ };
+ var request: std.http.Server.Request = .{
+ .server = &server,
+ .head_end = req.len - 3,
+ .head = try std.http.Server.Request.Head.parse(read_buffer[0 .. req.len - 3]),
+ .reader_state = undefined,
};
// std.testing.log_level = .debug;
- try std.testing.expect(try verifyServerRequest(allocator, request, fis.reader(), struct {
+ var fbs = std.io.fixedBufferStream("bar");
+ try std.testing.expect(try verifyServerRequest(allocator, &request, fbs.reader(), struct {
cred: Credentials,
const Self = @This();
@@ -1122,34 +1151,51 @@ test "can verify server request without x-amz-content-sha256" {
test_credential = Credentials.init(allocator, access_key, secret_key, null);
defer test_credential.?.deinit();
- var headers = std.http.Headers.init(allocator);
- defer headers.deinit();
- try headers.append("Connection", "keep-alive");
- try headers.append("Accept-Encoding", "gzip, deflate, zstd");
- try headers.append("TE", "gzip, deflate, trailers");
- try headers.append("Accept", "application/json");
- try headers.append("X-Amz-Target", "DynamoDB_20120810.CreateTable");
- try headers.append("Host", "dynamodb.us-west-2.amazonaws.com");
- try headers.append("User-Agent", "zig-aws 1.0");
- try headers.append("Content-Type", "application/x-amz-json-1.0");
- try headers.append("Content-Length", "403");
- try headers.append("X-Amz-Date", "20240224T154944Z");
- try headers.append("x-amz-content-sha256", "fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9");
- try headers.append("Authorization", "AWS4-HMAC-SHA256 Credential=ACCESS/20240224/us-west-2/dynamodb/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-amz-target, Signature=8fd23dc7dbcb36c4aa54207a7118f8b9fcd680da73a0590b498e9577ff68ec33");
+ const head =
+ "POST / HTTP/1.1\r\n" ++
+ "Connection: keep-alive\r\n" ++
+ "Accept-Encoding: gzip, deflate, zstd\r\n" ++
+ "TE: gzip, deflate, trailers\r\n" ++
+ "Accept: application/json\r\n" ++
+ "X-Amz-Target: DynamoDB_20120810.CreateTable\r\n" ++
+ "Host: dynamodb.us-west-2.amazonaws.com\r\n" ++
+ "User-Agent: zig-aws 1.0\r\n" ++
+ "Content-Type: application/x-amz-json-1.0\r\n" ++
+ "Content-Length: 403\r\n" ++
+ "X-Amz-Date: 20240224T154944Z\r\n" ++
+ "x-amz-content-sha256: fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9\r\n" ++
+ "Authorization: AWS4-HMAC-SHA256 Credential=ACCESS/20240224/us-west-2/dynamodb/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-amz-target, Signature=8fd23dc7dbcb36c4aa54207a7118f8b9fcd680da73a0590b498e9577ff68ec33\r\n\r\n";
const body =
\\{"AttributeDefinitions": [{"AttributeName": "Artist", "AttributeType": "S"}, {"AttributeName": "SongTitle", "AttributeType": "S"}], "TableName": "MusicCollection", "KeySchema": [{"AttributeName": "Artist", "KeyType": "HASH"}, {"AttributeName": "SongTitle", "KeyType": "RANGE"}], "ProvisionedThroughput": {"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, "Tags": [{"Key": "Owner", "Value": "blueTeam"}]}
;
+ const req_data = head ++ body;
+ var read_buffer: [2048]u8 = undefined;
+ @memcpy(read_buffer[0..req_data.len], req_data);
+ var server: std.http.Server = .{
+ .connection = undefined,
+ .state = .ready,
+ .read_buffer = &read_buffer,
+ .read_buffer_len = req_data.len,
+ .next_request_start = 0,
+ };
+ var request: std.http.Server.Request = .{
+ .server = &server,
+ .head_end = head.len,
+ .head = try std.http.Server.Request.Head.parse(read_buffer[0..head.len]),
+ .reader_state = undefined,
+ };
{
- var h = try std.ArrayList(base.Header).initCapacity(allocator, headers.list.items.len);
+ var h = std.ArrayList(std.http.Header).init(allocator);
defer h.deinit();
const signed_headers = &[_][]const u8{ "content-type", "host", "x-amz-date", "x-amz-target" };
- for (headers.list.items) |source| {
+ var it = request.iterateHeaders();
+ while (it.next()) |source| {
var match = false;
for (signed_headers) |s| {
match = std.ascii.eqlIgnoreCase(s, source.name);
if (match) break;
}
- if (match) h.appendAssumeCapacity(.{ .name = source.name, .value = source.value });
+ if (match) try h.append(.{ .name = source.name, .value = source.value });
}
const req = base.Request{
.path = "/",
@@ -1187,16 +1233,8 @@ test "can verify server request without x-amz-content-sha256" {
{ // verification
var fis = std.io.fixedBufferStream(body[0..]);
- const request = std.http.Server.Request{
- .method = std.http.Method.POST,
- .target = "/",
- .version = .@"HTTP/1.1",
- .content_length = 403,
- .headers = headers,
- .parser = std.http.protocol.HeadersParser.initDynamic(std.math.maxInt(usize)),
- };
- try std.testing.expect(try verifyServerRequest(allocator, request, fis.reader(), struct {
+ try std.testing.expect(try verifyServerRequest(allocator, &request, fis.reader(), struct {
cred: Credentials,
const Self = @This();
diff --git a/src/http_client_17015_issue.zig b/src/http_client_17015_issue.zig
deleted file mode 100644
index 3427f74..0000000
--- a/src/http_client_17015_issue.zig
+++ /dev/null
@@ -1,155 +0,0 @@
-const std = @import("std");
-const Uri = std.Uri;
-
-///////////////////////////////////////////////////////////////////////////
-/// This function imported from:
-/// https://github.com/ziglang/zig/blob/0.11.0/lib/std/http/Client.zig#L538-L636
-///
-/// The first commit of this file will be unchanged from 0.11.0 to more
-/// clearly indicate changes moving forward. The plan is to change
-/// only the two w.print lines for req.uri 16 and 18 lines down from this comment
-///////////////////////////////////////////////////////////////////////////
-/// Send the request to the server.
-pub fn start(req: *std.http.Client.Request) std.http.Client.Request.StartError!void {
- var buffered = std.io.bufferedWriter(req.connection.?.data.writer());
- const w = buffered.writer();
-
- try w.writeAll(@tagName(req.method));
- try w.writeByte(' ');
-
- if (req.method == .CONNECT) {
- try w.writeAll(req.uri.host.?);
- try w.writeByte(':');
- try w.print("{}", .{req.uri.port.?});
- } else if (req.connection.?.data.proxied) {
- // proxied connections require the full uri
- try format(req.uri, "+/", .{}, w);
- } else {
- try format(req.uri, "/", .{}, w);
- }
-
- try w.writeByte(' ');
- try w.writeAll(@tagName(req.version));
- try w.writeAll("\r\n");
-
- if (!req.headers.contains("host")) {
- try w.writeAll("Host: ");
- try w.writeAll(req.uri.host.?);
- try w.writeAll("\r\n");
- }
-
- if (!req.headers.contains("user-agent")) {
- try w.writeAll("User-Agent: zig/");
- try w.writeAll(@import("builtin").zig_version_string);
- try w.writeAll(" (std.http)\r\n");
- }
-
- if (!req.headers.contains("connection")) {
- try w.writeAll("Connection: keep-alive\r\n");
- }
-
- if (!req.headers.contains("accept-encoding")) {
- try w.writeAll("Accept-Encoding: gzip, deflate, zstd\r\n");
- }
-
- if (!req.headers.contains("te")) {
- try w.writeAll("TE: gzip, deflate, trailers\r\n");
- }
-
- const has_transfer_encoding = req.headers.contains("transfer-encoding");
- const has_content_length = req.headers.contains("content-length");
-
- if (!has_transfer_encoding and !has_content_length) {
- switch (req.transfer_encoding) {
- .chunked => try w.writeAll("Transfer-Encoding: chunked\r\n"),
- .content_length => |content_length| try w.print("Content-Length: {d}\r\n", .{content_length}),
- .none => {},
- }
- } else {
- if (has_content_length) {
- const content_length = std.fmt.parseInt(u64, req.headers.getFirstValue("content-length").?, 10) catch return error.InvalidContentLength;
-
- req.transfer_encoding = .{ .content_length = content_length };
- } else if (has_transfer_encoding) {
- const transfer_encoding = req.headers.getFirstValue("transfer-encoding").?;
- if (std.mem.eql(u8, transfer_encoding, "chunked")) {
- req.transfer_encoding = .chunked;
- } else {
- return error.UnsupportedTransferEncoding;
- }
- } else {
- req.transfer_encoding = .none;
- }
- }
-
- try w.print("{}", .{req.headers});
-
- try w.writeAll("\r\n");
-
- try buffered.flush();
-}
-
-///////////////////////////////////////////////////////////////////////////
-/// This function imported from:
-/// https://github.com/ziglang/zig/blob/0.11.0/lib/std/Uri.zig#L209-L264
-///
-/// The first commit of this file will be unchanged from 0.11.0 to more
-/// clearly indicate changes moving forward. The plan is to change
-/// only the writeEscapedPath call 42 lines down from this comment
-///////////////////////////////////////////////////////////////////////////
-pub fn format(
- uri: Uri,
- comptime fmt: []const u8,
- options: std.fmt.FormatOptions,
- writer: anytype,
-) @TypeOf(writer).Error!void {
- _ = options;
-
- const needs_absolute = comptime std.mem.indexOf(u8, fmt, "+") != null;
- const needs_path = comptime std.mem.indexOf(u8, fmt, "/") != null or fmt.len == 0;
- const needs_fragment = comptime std.mem.indexOf(u8, fmt, "#") != null;
-
- if (needs_absolute) {
- try writer.writeAll(uri.scheme);
- try writer.writeAll(":");
- if (uri.host) |host| {
- try writer.writeAll("//");
-
- if (uri.user) |user| {
- try writer.writeAll(user);
- if (uri.password) |password| {
- try writer.writeAll(":");
- try writer.writeAll(password);
- }
- try writer.writeAll("@");
- }
-
- try writer.writeAll(host);
-
- if (uri.port) |port| {
- try writer.writeAll(":");
- try std.fmt.formatInt(port, 10, .lower, .{}, writer);
- }
- }
- }
-
- if (needs_path) {
- if (uri.path.len == 0) {
- try writer.writeAll("/");
- } else {
- try writer.writeAll(uri.path); // do not mess with our path
- }
-
- if (uri.query) |q| {
- try writer.writeAll("?");
- try Uri.writeEscapedQuery(writer, q);
- }
-
- if (needs_fragment) {
- if (uri.fragment) |f| {
- try writer.writeAll("#");
- try Uri.writeEscapedQuery(writer, f);
- }
- }
- }
-}
diff --git a/src/json.zig b/src/json.zig
index 7b280e6..598f3d3 100644
--- a/src/json.zig
+++ b/src/json.zig
@@ -1762,7 +1762,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
var r: T = undefined;
const source_slice = stringToken.slice(tokens.slice, tokens.i - 1);
switch (stringToken.escapes) {
- .None => mem.copy(u8, &r, source_slice),
+ .None => @memcpy(&r, source_slice),
.Some => try unescapeValidString(&r, source_slice),
}
return r;
@@ -2019,7 +2019,7 @@ test "parse into tagged union" {
}
{ // failing allocations should be bubbled up instantly without trying next member
- var fail_alloc = testing.FailingAllocator.init(testing.allocator, 0);
+ var fail_alloc = testing.FailingAllocator.init(testing.allocator, .{ .fail_index = 0 });
const options = ParseOptions{ .allocator = fail_alloc.allocator() };
const T = union(enum) {
// both fields here match the input
@@ -2067,7 +2067,7 @@ test "parse union bubbles up AllocatorRequired" {
}
test "parseFree descends into tagged union" {
- var fail_alloc = testing.FailingAllocator.init(testing.allocator, 1);
+ var fail_alloc = testing.FailingAllocator.init(testing.allocator, .{ .fail_index = 1 });
const options = ParseOptions{ .allocator = fail_alloc.allocator() };
const T = union(enum) {
int: i32,
@@ -2827,14 +2827,14 @@ pub fn stringify(
}
},
.Enum => {
- if (comptime std.meta.trait.hasFn("jsonStringify")(T)) {
+ if (comptime std.meta.hasFn(T, "jsonStringify")) {
return value.jsonStringify(options, out_stream);
}
@compileError("Unable to stringify enum '" ++ @typeName(T) ++ "'");
},
.Union => {
- if (comptime std.meta.trait.hasFn("jsonStringify")(T)) {
+ if (comptime std.meta.hasFn(T, "jsonStringify")) {
return value.jsonStringify(options, out_stream);
}
@@ -2850,7 +2850,7 @@ pub fn stringify(
}
},
.Struct => |S| {
- if (comptime std.meta.trait.hasFn("jsonStringify")(T)) {
+ if (comptime std.meta.hasFn(T, "jsonStringify")) {
return value.jsonStringify(options, out_stream);
}
@@ -2874,11 +2874,11 @@ pub fn stringify(
try child_whitespace.outputIndent(out_stream);
}
var field_written = false;
- if (comptime std.meta.trait.hasFn("jsonStringifyField")(T))
+ if (comptime std.meta.hasFn(T, "jsonStringifyField"))
field_written = try value.jsonStringifyField(Field.name, child_options, out_stream);
if (!field_written) {
- if (comptime std.meta.trait.hasFn("fieldNameFor")(T)) {
+ if (comptime std.meta.hasFn(T, "fieldNameFor")) {
const name = value.fieldNameFor(Field.name);
try stringify(name, options, out_stream);
} else {
diff --git a/src/main.zig b/src/main.zig
index 3aad5c9..755d771 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -38,8 +38,8 @@ pub fn log(
nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return;
}
-pub const std_options = struct {
- pub const logFn = log;
+pub const std_options = std.Options{
+ .logFn = log,
};
const Tests = enum {
query_no_input,
@@ -71,7 +71,7 @@ pub fn main() anyerror!void {
defer bw.flush() catch unreachable;
const stdout = bw.writer();
var arg0: ?[]const u8 = null;
- var proxy: ?std.http.Client.HttpProxy = null;
+ var proxy: ?std.http.Client.Proxy = null;
while (args.next()) |arg| {
if (arg0 == null) arg0 = arg;
if (std.mem.eql(u8, "-h", arg) or std.mem.eql(u8, "--help", arg)) {
@@ -353,17 +353,22 @@ pub fn main() anyerror!void {
std.log.info("===== Tests complete =====", .{});
}
-fn proxyFromString(string: []const u8) !std.http.Client.HttpProxy {
- var rc = std.http.Client.HttpProxy{
+fn proxyFromString(string: []const u8) !std.http.Client.Proxy {
+ var rc = std.http.Client.Proxy{
.protocol = undefined,
.host = undefined,
+ .authorization = null,
+ .port = undefined,
+ .supports_connect = true, // TODO: Is this a good default?
};
var remaining: []const u8 = string;
if (std.mem.startsWith(u8, string, "http://")) {
remaining = remaining["http://".len..];
rc.protocol = .plain;
+ rc.port = 80;
} else if (std.mem.startsWith(u8, string, "https://")) {
remaining = remaining["https://".len..];
+ rc.port = 443;
rc.protocol = .tls;
} else return error.InvalidScheme;
var split_iterator = std.mem.split(u8, remaining, ":");
diff --git a/src/servicemodel.zig b/src/servicemodel.zig
index 79a4e4f..a3ff8f6 100644
--- a/src/servicemodel.zig
+++ b/src/servicemodel.zig
@@ -21,7 +21,7 @@ pub fn Services(comptime service_imports: anytype) type {
// finally, generate the type
return @Type(.{
.Struct = .{
- .layout = .Auto,
+ .layout = .Auto, // will be .auto in the future
.fields = &fields,
.decls = &[_]std.builtin.Type.Declaration{},
.is_tuple = false,
diff --git a/src/url.zig b/src/url.zig
index e31d97f..b2e1500 100644
--- a/src/url.zig
+++ b/src/url.zig
@@ -1,15 +1,14 @@
const std = @import("std");
-fn defaultTransformer(allocator: std.mem.Allocator, field_name: []const u8, options: EncodingOptions) anyerror![]const u8 {
- _ = options;
+fn defaultTransformer(allocator: std.mem.Allocator, field_name: []const u8) anyerror![]const u8 {
_ = allocator;
return field_name;
}
-pub const fieldNameTransformerFn = *const fn (std.mem.Allocator, []const u8, EncodingOptions) anyerror![]const u8;
+pub const fieldNameTransformerFn = *const fn (std.mem.Allocator, []const u8) anyerror![]const u8;
pub const EncodingOptions = struct {
- field_name_transformer: fieldNameTransformerFn = &defaultTransformer,
+ field_name_transformer: fieldNameTransformerFn = defaultTransformer,
};
pub fn encode(allocator: std.mem.Allocator, obj: anytype, writer: anytype, comptime options: EncodingOptions) !void {
@@ -26,7 +25,7 @@ fn encodeStruct(
) !bool {
var rc = first;
inline for (@typeInfo(@TypeOf(obj)).Struct.fields) |field| {
- const field_name = try options.field_name_transformer(allocator, field.name, options);
+ const field_name = try options.field_name_transformer(allocator, field.name);
defer if (options.field_name_transformer.* != defaultTransformer)
allocator.free(field_name);
// @compileLog(@typeInfo(field.field_type).Pointer);
diff --git a/src/xml_shaper.zig b/src/xml_shaper.zig
index f97d188..e4febc6 100644
--- a/src/xml_shaper.zig
+++ b/src/xml_shaper.zig
@@ -219,9 +219,9 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
log.debug("Processing fields in struct: {s}", .{@typeName(T)});
inline for (struct_info.fields, 0..) |field, i| {
- var name = field.name;
+ var name: []const u8 = field.name;
var found_value = false;
- if (comptime std.meta.trait.hasFn("fieldNameFor")(T))
+ if (comptime std.meta.hasFn(T, "fieldNameFor"))
name = r.fieldNameFor(field.name);
log.debug("Field name: {s}, Element: {s}, Adjusted field name: {s}", .{ field.name, element.tag, name });
var iterator = element.findChildrenByTag(name);