zig build compiles using zig 0.15.1
Some checks failed
AWS-Zig Build / build-zig-amd64-host (push) Failing after 2m27s

This commit is contained in:
Emil Lerch 2025-08-22 18:00:25 -07:00
parent 5334cc3bfe
commit 8d399cb8a6
Signed by: lobo
GPG key ID: A7B62D657EF764F8
14 changed files with 336 additions and 297 deletions

View file

@ -17,10 +17,10 @@ pub const Timestamp = enum(zeit.Nanoseconds) {
}) catch std.debug.panic("Failed to parse timestamp to instant: {d}", .{value});
const fmt = "Mon, 02 Jan 2006 15:04:05 GMT";
var buf = std.mem.zeroes([fmt.len]u8);
var buf: [fmt.len]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buf);
instant.time().gofmt(fbs.writer(), fmt) catch std.debug.panic("Failed to format instant: {d}", .{instant.timestamp});
var fbs = std.Io.Writer.fixed(&buf);
instant.time().gofmt(&fbs, fmt) catch std.debug.panic("Failed to format instant: {d}", .{instant.timestamp});
try jw.write(&buf);
}

View file

@ -1772,12 +1772,12 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
.slice => {
switch (token) {
.ArrayBegin => {
var arraylist = std.ArrayList(ptrInfo.child).init(allocator);
var arraylist = std.ArrayList(ptrInfo.child){};
errdefer {
while (arraylist.pop()) |v| {
parseFree(ptrInfo.child, v, options);
}
arraylist.deinit();
arraylist.deinit(allocator);
}
while (true) {
@ -1787,11 +1787,11 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
else => {},
}
try arraylist.ensureTotalCapacity(arraylist.items.len + 1);
try arraylist.ensureTotalCapacity(allocator, arraylist.items.len + 1);
const v = try parseInternal(ptrInfo.child, tok, tokens, options);
arraylist.appendAssumeCapacity(v);
}
return arraylist.toOwnedSlice();
return arraylist.toOwnedSlice(allocator);
},
.String => |stringToken| {
if (ptrInfo.child != u8) return error.UnexpectedToken;
@ -1817,12 +1817,12 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
if (key_type == null) return error.UnexpectedToken;
const value_type = typeForField(ptrInfo.child, "value");
if (value_type == null) return error.UnexpectedToken;
var arraylist = std.ArrayList(ptrInfo.child).init(allocator);
var arraylist = std.ArrayList(ptrInfo.child){};
errdefer {
while (arraylist.pop()) |v| {
parseFree(ptrInfo.child, v, options);
}
arraylist.deinit();
arraylist.deinit(allocator);
}
while (true) {
const key = (try tokens.next()) orelse return error.UnexpectedEndOfJson;
@ -1831,13 +1831,13 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
else => {},
}
try arraylist.ensureTotalCapacity(arraylist.items.len + 1);
try arraylist.ensureTotalCapacity(allocator, arraylist.items.len + 1);
const key_val = try parseInternal(key_type.?, key, tokens, options);
const val = (try tokens.next()) orelse return error.UnexpectedEndOfJson;
const val_val = try parseInternal(value_type.?, val, tokens, options);
arraylist.appendAssumeCapacity(.{ .key = key_val, .value = val_val });
}
return arraylist.toOwnedSlice();
return arraylist.toOwnedSlice(allocator);
},
else => return error.UnexpectedToken,
}

View file

@ -1,11 +1,11 @@
const builtin = @import("builtin");
const case = @import("case");
const std = @import("std");
const zeit = @import("zeit");
const awshttp = @import("aws_http.zig");
const json = @import("json");
const url = @import("url.zig");
const case = @import("case.zig");
const date = @import("date");
const servicemodel = @import("servicemodel.zig");
const xml_shaper = @import("xml_shaper.zig");
@ -196,8 +196,8 @@ pub fn Request(comptime request_action: anytype) type {
log.debug("Rest method: '{s}'", .{aws_request.method});
log.debug("Rest success code: '{d}'", .{Action.http_config.success_code});
log.debug("Rest raw uri: '{s}'", .{Action.http_config.uri});
var al = std.ArrayList([]const u8).init(options.client.allocator);
defer al.deinit();
var al = std.ArrayList([]const u8){};
defer al.deinit(options.client.allocator);
aws_request.path = try buildPath(
options.client.allocator,
Action.http_config.uri,
@ -230,14 +230,13 @@ pub fn Request(comptime request_action: anytype) type {
log.debug("Rest query: '{s}'", .{aws_request.query});
defer options.client.allocator.free(aws_request.query);
// We don't know if we need a body...guessing here, this should cover most
var buffer = std.ArrayList(u8).init(options.client.allocator);
var buffer = std.Io.Writer.Allocating.init(options.client.allocator);
defer buffer.deinit();
if (Self.service_meta.aws_protocol == .rest_json_1) {
if (std.mem.eql(u8, "PUT", aws_request.method) or std.mem.eql(u8, "POST", aws_request.method)) {
try std.json.stringify(request, .{ .whitespace = .indent_4 }, buffer.writer());
if (std.mem.eql(u8, "PUT", aws_request.method) or std.mem.eql(u8, "POST", aws_request.method))
try buffer.writer.print("{f}", .{std.json.fmt(request, .{ .whitespace = .indent_4 })});
}
}
aws_request.body = buffer.items;
aws_request.body = buffer.written();
var rest_xml_body: ?[]const u8 = null;
defer if (rest_xml_body) |b| options.client.allocator.free(b);
if (Self.service_meta.aws_protocol == .rest_xml) {
@ -315,9 +314,6 @@ pub fn Request(comptime request_action: anytype) type {
});
defer options.client.allocator.free(target);
var buffer = std.ArrayList(u8).init(options.client.allocator);
defer buffer.deinit();
// The transformer needs to allocate stuff out of band, but we
// can guarantee we don't need the memory after this call completes,
// so we'll use an arena allocator to whack everything.
@ -326,7 +322,13 @@ pub fn Request(comptime request_action: anytype) type {
// smithy spec, "A null value MAY be provided or omitted
// for a boxed member with no observable difference." But we're
// seeing a lot of differences here between spec and reality
try std.json.stringify(request, .{ .whitespace = .indent_4 }, buffer.writer());
const body = try std.fmt.allocPrint(
options.client.allocator,
"{f}",
.{std.json.fmt(request, .{ .whitespace = .indent_4 })},
);
defer options.client.allocator.free(body);
var content_type: []const u8 = undefined;
switch (Self.service_meta.aws_protocol) {
@ -336,7 +338,7 @@ pub fn Request(comptime request_action: anytype) type {
}
return try Self.callAws(.{
.query = "",
.body = buffer.items,
.body = body,
.content_type = content_type,
.headers = @constCast(&[_]awshttp.Header{.{ .name = "X-Amz-Target", .value = target }}),
}, options);
@ -348,13 +350,13 @@ pub fn Request(comptime request_action: anytype) type {
// handle lists and maps properly anyway yet, so we'll go for it and see
// where it breaks. PRs and/or failing test cases appreciated.
fn callQuery(request: ActionRequest, options: Options) !FullResponseType {
var buffer = std.ArrayList(u8).init(options.client.allocator);
defer buffer.deinit();
const writer = buffer.writer();
var aw: std.Io.Writer.Allocating = .init(options.client.allocator);
defer aw.deinit();
const writer = &aw.writer;
try url.encode(options.client.allocator, request, writer, .{
.field_name_transformer = queryFieldTransformer,
});
const continuation = if (buffer.items.len > 0) "&" else "";
const continuation = if (aw.written().len > 0) "&" else "";
const query = if (Self.service_meta.aws_protocol == .query)
""
@ -376,7 +378,7 @@ pub fn Request(comptime request_action: anytype) type {
action.action_name,
Self.service_meta.version.?, // Version required for the protocol, we should panic if it is not present
continuation,
buffer.items,
aw.written(),
});
defer options.client.allocator.free(body);
@ -889,9 +891,25 @@ fn parseInt(comptime T: type, val: []const u8) !T {
}
fn generalAllocPrint(allocator: Allocator, val: anytype) !?[]const u8 {
switch (@typeInfo(@TypeOf(val))) {
const T = @TypeOf(val);
switch (@typeInfo(T)) {
.optional => if (val) |v| return generalAllocPrint(allocator, v) else return null,
.array, .pointer => return try std.fmt.allocPrint(allocator, "{s}", .{val}),
.array, .pointer => switch (@typeInfo(T)) {
.array => return try std.fmt.allocPrint(allocator, "{s}", .{val}),
.pointer => |info| switch (info.size) {
.one => return try std.fmt.allocPrint(allocator, "{s}", .{val}),
.many => return try std.fmt.allocPrint(allocator, "{s}", .{val}),
.slice => {
log.warn(
"printing object of type [][]const u8...pretty sure this is wrong: {any}",
.{val},
);
return try std.fmt.allocPrint(allocator, "{any}", .{val});
},
.c => return try std.fmt.allocPrint(allocator, "{s}", .{val}),
},
else => {},
},
else => return try std.fmt.allocPrint(allocator, "{any}", .{val}),
}
}
@ -904,6 +922,7 @@ fn headersFor(allocator: Allocator, request: anytype) ![]awshttp.Header {
// It would be awesome to have a fixed array, but we can't because
// it depends on a runtime value based on whether these variables are null
var headers = try std.ArrayList(awshttp.Header).initCapacity(allocator, fields.len);
defer headers.deinit(allocator);
inline for (fields) |f| {
// Header name = value of field
// Header value = value of the field of the request based on field name
@ -916,7 +935,7 @@ fn headersFor(allocator: Allocator, request: anytype) ![]awshttp.Header {
});
}
}
return headers.toOwnedSlice();
return headers.toOwnedSlice(allocator);
}
fn freeHeadersFor(allocator: Allocator, request: anytype, headers: []const awshttp.Header) void {
@ -1027,14 +1046,14 @@ fn ServerResponse(comptime action: anytype) type {
.type = T,
.default_value_ptr = null,
.is_comptime = false,
.alignment = 0,
.alignment = std.meta.alignment(T),
},
.{
.name = "ResponseMetadata",
.type = ResponseMetadata,
.default_value_ptr = null,
.is_comptime = false,
.alignment = 0,
.alignment = std.meta.alignment(ResponseMetadata),
},
},
.decls = &[_]std.builtin.Type.Declaration{},
@ -1050,7 +1069,7 @@ fn ServerResponse(comptime action: anytype) type {
.type = Result,
.default_value_ptr = null,
.is_comptime = false,
.alignment = 0,
.alignment = std.meta.alignment(Result),
},
},
.decls = &[_]std.builtin.Type.Declaration{},
@ -1111,7 +1130,13 @@ fn safeFree(allocator: Allocator, obj: anytype) void {
}
}
fn queryFieldTransformer(allocator: Allocator, field_name: []const u8) anyerror![]const u8 {
return try case.snakeToPascal(allocator, field_name);
var reader = std.Io.Reader.fixed(field_name);
var aw = try std.Io.Writer.Allocating.initCapacity(allocator, 100);
defer aw.deinit();
const writer = &aw.writer;
try case.to(.pascal, &reader, writer);
return aw.toOwnedSlice();
// return try case.snakeToPascal(allocator, field_name);
}
fn buildPath(
@ -1123,8 +1148,7 @@ fn buildPath(
replaced_fields: *std.ArrayList([]const u8),
) ![]const u8 {
var buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
// const writer = buffer.writer();
defer buffer.deinit();
defer buffer.deinit(allocator);
var in_label = false;
var start: usize = 0;
for (raw_uri, 0..) |c, inx| {
@ -1142,40 +1166,42 @@ fn buildPath(
const replacement_label = raw_uri[start..end];
inline for (std.meta.fields(ActionRequest)) |field| {
if (std.mem.eql(u8, request.fieldNameFor(field.name), replacement_label)) {
try replaced_fields.append(replacement_label);
try replaced_fields.append(allocator, replacement_label);
var replacement_buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
defer replacement_buffer.deinit();
var encoded_buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
defer replacement_buffer.deinit(allocator);
var encoded_buffer = std.Io.Writer.Allocating.init(allocator);
defer encoded_buffer.deinit();
const replacement_writer = replacement_buffer.writer();
// std.mem.replacementSize
try std.json.stringify(
try (&encoded_buffer.writer).print(
"{f}",
.{std.json.fmt(
@field(request, field.name),
.{ .whitespace = .indent_4 },
replacement_writer,
)},
);
const trimmed_replacement_val = std.mem.trim(u8, replacement_buffer.items, "\"");
// NOTE: We have to encode here as it is a portion of the rest JSON protocol.
// This makes the encoding in the standard library wrong
try uriEncode(trimmed_replacement_val, encoded_buffer.writer(), encode_slash);
try buffer.appendSlice(encoded_buffer.items);
try uriEncode(trimmed_replacement_val, &encoded_buffer.writer, encode_slash);
try buffer.appendSlice(allocator, encoded_buffer.written());
}
}
},
else => if (!in_label) {
try buffer.append(c);
try buffer.append(allocator, c);
} else {},
}
}
return buffer.toOwnedSlice();
return buffer.toOwnedSlice(allocator);
}
fn uriEncode(input: []const u8, writer: anytype, encode_slash: bool) !void {
fn uriEncode(input: []const u8, writer: *std.Io.Writer, encode_slash: bool) !void {
for (input) |c|
try uriEncodeByte(c, writer, encode_slash);
}
fn uriEncodeByte(char: u8, writer: anytype, encode_slash: bool) !void {
fn uriEncodeByte(char: u8, writer: *std.Io.Writer, encode_slash: bool) !void {
switch (char) {
'!' => _ = try writer.write("%21"),
'#' => _ = try writer.write("%23"),
@ -1209,9 +1235,9 @@ fn buildQuery(allocator: Allocator, request: anytype) ![]const u8 {
// .function_version = "FunctionVersion",
// .marker = "Marker",
// };
var buffer = std.ArrayList(u8).init(allocator);
const writer = buffer.writer();
var buffer = std.Io.Writer.Allocating.init(allocator);
defer buffer.deinit();
const writer = &buffer.writer;
var prefix = "?";
if (@hasDecl(@TypeOf(request), "http_query")) {
const query_arguments = @field(@TypeOf(request), "http_query");
@ -1224,7 +1250,7 @@ fn buildQuery(allocator: Allocator, request: anytype) ![]const u8 {
return buffer.toOwnedSlice();
}
fn addQueryArg(comptime ValueType: type, prefix: []const u8, key: []const u8, value: anytype, writer: anytype) !bool {
fn addQueryArg(comptime ValueType: type, prefix: []const u8, key: []const u8, value: anytype, writer: *std.Io.Writer) !bool {
switch (@typeInfo(@TypeOf(value))) {
.optional => {
if (value) |v|
@ -1259,69 +1285,77 @@ fn addQueryArg(comptime ValueType: type, prefix: []const u8, key: []const u8, va
},
}
}
fn addBasicQueryArg(prefix: []const u8, key: []const u8, value: anytype, writer: anytype) !bool {
fn addBasicQueryArg(prefix: []const u8, key: []const u8, value: anytype, writer: *std.Io.Writer) !bool {
_ = try writer.write(prefix);
// TODO: url escaping
try uriEncode(key, writer, true);
_ = try writer.write("=");
var encoding_writer = uriEncodingWriter(writer);
var ignoring_writer = ignoringWriter(encoding_writer.writer(), '"');
try std.json.stringify(value, .{}, ignoring_writer.writer());
var encoding_writer = UriEncodingWriter.init(writer);
var ignoring_writer = IgnoringWriter.init(&encoding_writer.writer, '"');
try ignoring_writer.writer.print("{f}", .{std.json.fmt(value, .{})});
return true;
}
pub fn uriEncodingWriter(child_stream: anytype) UriEncodingWriter(@TypeOf(child_stream)) {
return .{ .child_stream = child_stream };
}
/// A Writer that ignores a character
pub fn UriEncodingWriter(comptime WriterType: type) type {
return struct {
child_stream: WriterType,
const UriEncodingWriter = struct {
child_writer: *std.Io.Writer,
writer: std.Io.Writer,
pub const Error = WriterType.Error;
pub const Writer = std.io.Writer(*Self, Error, write);
const Self = @This();
pub fn write(self: *Self, bytes: []const u8) Error!usize {
try uriEncode(bytes, self.child_stream, true);
return bytes.len; // We say that all bytes are "written", even if they're not, as caller may be retrying
}
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
pub fn init(child: *std.Io.Writer) UriEncodingWriter {
return .{
.child_writer = child,
.writer = .{
.buffer = &.{},
.vtable = &.{
.drain = drain,
},
},
};
}
}
pub fn ignoringWriter(child_stream: anytype, ignore: u8) IgnoringWriter(@TypeOf(child_stream)) {
return .{ .child_stream = child_stream, .ignore = ignore };
}
fn drain(w: *std.Io.Writer, data: []const []const u8, splat: usize) std.Io.Writer.Error!usize {
if (splat > 0) return error.WriteFailed; // no splat support
const self: *UriEncodingWriter = @fieldParentPtr("writer", w);
var total: usize = 0;
for (data) |bytes| {
try uriEncode(bytes, self.child_writer, true);
total += bytes.len;
}
return total; // We say that all bytes are "written", even if they're not, as caller may be retrying
}
};
/// A Writer that ignores a character
pub fn IgnoringWriter(comptime WriterType: type) type {
return struct {
child_stream: WriterType,
const IgnoringWriter = struct {
child_writer: *std.Io.Writer,
ignore: u8,
writer: std.Io.Writer,
pub const Error = WriterType.Error;
pub const Writer = std.io.Writer(*Self, Error, write);
const Self = @This();
pub fn write(self: *Self, bytes: []const u8) Error!usize {
for (bytes) |b| {
if (b != self.ignore)
try self.child_stream.writeByte(b);
}
return bytes.len; // We say that all bytes are "written", even if they're not, as caller may be retrying
}
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
pub fn init(child: *std.Io.Writer, ignore: u8) IgnoringWriter {
return .{
.child_writer = child,
.ignore = ignore,
.writer = .{
.buffer = &.{},
.vtable = &.{
.drain = drain,
},
},
};
}
}
fn drain(w: *std.Io.Writer, data: []const []const u8, splat: usize) std.Io.Writer.Error!usize {
if (splat > 0) return error.WriteFailed; // no splat support
const self: *IgnoringWriter = @fieldParentPtr("writer", w);
var total: usize = 0;
for (data) |bytes| {
for (bytes) |b|
if (b != self.ignore)
try self.child_writer.writeByte(b);
total += bytes.len;
}
return total; // We say that all bytes are "written", even if they're not, as caller may be retrying
}
};
fn reportTraffic(
allocator: Allocator,
@ -1330,9 +1364,9 @@ fn reportTraffic(
response: awshttp.HttpResult,
comptime reporter: fn (comptime []const u8, anytype) void,
) !void {
var msg = std.ArrayList(u8).init(allocator);
var msg = try std.Io.Writer.Allocating.initCapacity(allocator, 256);
defer msg.deinit();
const writer = msg.writer();
const writer = &msg.writer;
try writer.print("{s}\n\n", .{info});
try writer.print("Return status: {d}\n\n", .{response.response_code});
if (request.query.len > 0) try writer.print("Request Query:\n \t{s}\n", .{request.query});
@ -1354,7 +1388,7 @@ fn reportTraffic(
_ = try writer.write("Response Body:\n");
try writer.print("--------------\n{s}\n", .{response.body});
_ = try writer.write("--------------\n");
reporter("{s}\n", .{msg.items});
reporter("{s}\n", .{msg.written()});
}
////////////////////////////////////////////////////////////////////////

View file

@ -25,7 +25,7 @@ pub const Credentials = struct {
};
}
pub fn deinit(self: Self) void {
std.crypto.utils.secureZero(u8, self.secret_key);
std.crypto.secureZero(u8, self.secret_key);
self.allocator.free(self.secret_key);
self.allocator.free(self.access_key);
if (self.session_token) |t| self.allocator.free(t);

View file

@ -173,11 +173,12 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
var cl = std.http.Client{ .allocator = allocator };
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
var resp_payload = std.ArrayList(u8).init(allocator);
defer resp_payload.deinit();
var aw: std.Io.Writer.Allocating = .init(allocator);
defer aw.deinit();
const response_payload = &aw.writer;
const req = try cl.fetch(.{
.location = .{ .url = container_uri },
.response_storage = .{ .dynamic = &resp_payload },
.response_writer = response_payload,
});
if (req.status != .ok and req.status != .not_found) {
log.warn("Bad status code received from container credentials endpoint: {}", .{@intFromEnum(req.status)});
@ -185,8 +186,8 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
}
if (req.status == .not_found) return null;
log.debug("Read {d} bytes from container credentials endpoint", .{resp_payload.items.len});
if (resp_payload.items.len == 0) return null;
log.debug("Read {d} bytes from container credentials endpoint", .{aw.written().len});
if (aw.written().len == 0) return null;
const CredsResponse = struct {
AccessKeyId: []const u8,
@ -196,8 +197,8 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
Token: []const u8,
};
const creds_response = blk: {
const res = std.json.parseFromSlice(CredsResponse, allocator, resp_payload.items, .{}) catch |e| {
log.err("Unexpected Json response from container credentials endpoint: {s}", .{resp_payload.items});
const res = std.json.parseFromSlice(CredsResponse, allocator, aw.written(), .{}) catch |e| {
log.err("Unexpected Json response from container credentials endpoint: {s}", .{aw.written()});
log.err("Error parsing json: {}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
@ -224,26 +225,27 @@ fn getImdsv2Credentials(allocator: std.mem.Allocator) !?auth.Credentials {
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
// Get token
{
var resp_payload = std.ArrayList(u8).init(allocator);
defer resp_payload.deinit();
var aw: std.Io.Writer.Allocating = .init(allocator);
defer aw.deinit();
const response_payload = &aw.writer;
const req = try cl.fetch(.{
.method = .PUT,
.location = .{ .url = "http://169.254.169.254/latest/api/token" },
.extra_headers = &[_]std.http.Header{
.{ .name = "X-aws-ec2-metadata-token-ttl-seconds", .value = "21600" },
},
.response_storage = .{ .dynamic = &resp_payload },
.response_writer = response_payload,
});
if (req.status != .ok) {
log.warn("Bad status code received from IMDS v2: {}", .{@intFromEnum(req.status)});
return null;
}
if (resp_payload.items.len == 0) {
if (aw.written().len == 0) {
log.warn("Unexpected zero response from IMDS v2", .{});
return null;
}
token = try resp_payload.toOwnedSlice();
token = try aw.toOwnedSlice();
errdefer if (token) |t| allocator.free(t);
}
std.debug.assert(token != null);
@ -265,15 +267,16 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
// "InstanceProfileArn" : "arn:aws:iam::550620852718:instance-profile/ec2-dev",
// "InstanceProfileId" : "AIPAYAM4POHXCFNKZ7HU2"
// }
var resp_payload = std.ArrayList(u8).init(allocator);
defer resp_payload.deinit();
var aw: std.Io.Writer.Allocating = .init(allocator);
defer aw.deinit();
const response_payload = &aw.writer;
const req = try client.fetch(.{
.method = .GET,
.location = .{ .url = "http://169.254.169.254/latest/meta-data/iam/info" },
.extra_headers = &[_]std.http.Header{
.{ .name = "X-aws-ec2-metadata-token", .value = imds_token },
},
.response_storage = .{ .dynamic = &resp_payload },
.response_writer = response_payload,
});
if (req.status != .ok and req.status != .not_found) {
@ -281,7 +284,7 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
return null;
}
if (req.status == .not_found) return null;
if (resp_payload.items.len == 0) {
if (aw.written().len == 0) {
log.warn("Unexpected empty response from IMDS endpoint post token", .{});
return null;
}
@ -292,8 +295,8 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
InstanceProfileArn: []const u8,
InstanceProfileId: []const u8,
};
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, resp_payload.items, .{}) catch |e| {
log.err("Unexpected Json response from IMDS endpoint: {s}", .{resp_payload.items});
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, aw.written(), .{}) catch |e| {
log.err("Unexpected Json response from IMDS endpoint: {s}", .{aw.written()});
log.err("Error parsing json: {}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
@ -315,15 +318,16 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, role_name: []const u8, imds_token: []u8) !?auth.Credentials {
const url = try std.fmt.allocPrint(allocator, "http://169.254.169.254/latest/meta-data/iam/security-credentials/{s}/", .{role_name});
defer allocator.free(url);
var resp_payload = std.ArrayList(u8).init(allocator);
defer resp_payload.deinit();
var aw: std.Io.Writer.Allocating = .init(allocator);
defer aw.deinit();
const response_payload = &aw.writer;
const req = try client.fetch(.{
.method = .GET,
.location = .{ .url = url },
.extra_headers = &[_]std.http.Header{
.{ .name = "X-aws-ec2-metadata-token", .value = imds_token },
},
.response_storage = .{ .dynamic = &resp_payload },
.response_writer = response_payload,
});
if (req.status != .ok and req.status != .not_found) {
@ -331,7 +335,7 @@ fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, ro
return null;
}
if (req.status == .not_found) return null;
if (resp_payload.items.len == 0) {
if (aw.written().len == 0) {
log.warn("Unexpected empty response from IMDS role endpoint", .{});
return null;
}
@ -346,8 +350,8 @@ fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, ro
Token: []const u8,
Expiration: []const u8,
};
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, resp_payload.items, .{}) catch |e| {
log.err("Unexpected Json response from IMDS endpoint: {s}", .{resp_payload.items});
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, aw.written(), .{}) catch |e| {
log.err("Unexpected Json response from IMDS endpoint: {s}", .{aw.written()});
log.err("Error parsing json: {}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);

View file

@ -199,8 +199,8 @@ pub const AwsHttp = struct {
// We will use endpoint instead
request_cp.path = endpoint.path;
var request_headers = std.ArrayList(std.http.Header).init(self.allocator);
defer request_headers.deinit();
var request_headers = std.ArrayList(std.http.Header){};
defer request_headers.deinit(self.allocator);
const len = try addHeaders(self.allocator, &request_headers, endpoint.host, request_cp.body, request_cp.content_type, request_cp.headers);
defer if (len) |l| self.allocator.free(l);
@ -213,10 +213,10 @@ pub const AwsHttp = struct {
}
}
var headers = std.ArrayList(std.http.Header).init(self.allocator);
defer headers.deinit();
var headers = std.ArrayList(std.http.Header){};
defer headers.deinit(self.allocator);
for (request_cp.headers) |header|
try headers.append(.{ .name = header.name, .value = header.value });
try headers.append(self.allocator, .{ .name = header.name, .value = header.value });
log.debug("All Request Headers:", .{});
for (headers.items) |h| {
log.debug("\t{s}: {s}", .{ h.name, h.value });
@ -230,16 +230,10 @@ pub const AwsHttp = struct {
defer cl.deinit(); // TODO: Connection pooling
const method = std.meta.stringToEnum(std.http.Method, request_cp.method).?;
var server_header_buffer: [16 * 1024]u8 = undefined;
var resp_payload = std.ArrayList(u8).init(self.allocator);
defer resp_payload.deinit();
const req = try cl.fetch(.{
.server_header_buffer = &server_header_buffer,
.method = method,
.payload = if (request_cp.body.len > 0) request_cp.body else null,
.response_storage = .{ .dynamic = &resp_payload },
.raw_uri = true,
.location = .{ .url = url },
// Fetch API in 0.15.1 is insufficient as it does not provide
// server headers. We'll construct and send the request ourselves
var req = try cl.request(method, try std.Uri.parse(url), .{
// we need full control over most headers. I wish libraries would do a
// better job of having default headers as an opt-in...
.headers = .{
@ -266,33 +260,64 @@ pub const AwsHttp = struct {
// }
// try req.wait();
if (request_cp.body.len > 0) {
// This seems a bit silly, but we can't have a []const u8 here
// because when it sends, it's using a writer, and this becomes
// the buffer of the writer. It's conceivable that something
// in the chain then does actually modify the body of the request
// so we'll need to duplicate it here
const req_body = try self.allocator.dupe(u8, request_cp.body);
try req.sendBodyComplete(req_body);
} else try req.sendBodiless();
var response = try req.receiveHead(&.{});
// TODO: Timeout - is this now above us?
log.debug(
"Request Complete. Response code {d}: {?s}",
.{ @intFromEnum(req.status), req.status.phrase() },
.{ @intFromEnum(response.head.status), response.head.status.phrase() },
);
log.debug("Response headers:", .{});
var resp_headers = std.ArrayList(Header).init(
self.allocator,
);
defer resp_headers.deinit();
var it = std.http.HeaderIterator.init(server_header_buffer[0..]);
var resp_headers = std.ArrayList(Header){};
defer resp_headers.deinit(self.allocator);
var it = response.head.iterateHeaders();
while (it.next()) |h| { // even though we don't expect to fill the buffer,
// we don't get a length, but looks via stdlib source
// it should be ok to call next on the undefined memory
log.debug(" {s}: {s}", .{ h.name, h.value });
try resp_headers.append(.{
try resp_headers.append(self.allocator, .{
.name = try (self.allocator.dupe(u8, h.name)),
.value = try (self.allocator.dupe(u8, h.value)),
});
}
// This is directly lifted from fetch, as there is no function in
// 0.15.1 client to negotiate decompression
const decompress_buffer: []u8 = switch (response.head.content_encoding) {
.identity => &.{},
.zstd => try self.allocator.alloc(u8, std.compress.zstd.default_window_len),
.deflate, .gzip => try self.allocator.alloc(u8, std.compress.flate.max_window_len),
.compress => return error.UnsupportedCompressionMethod,
};
defer self.allocator.free(decompress_buffer);
log.debug("raw response body:\n{s}", .{resp_payload.items});
var transfer_buffer: [64]u8 = undefined;
var decompress: std.http.Decompress = undefined;
const reader = response.readerDecompressing(&transfer_buffer, &decompress, decompress_buffer);
// Not sure on optimal size here, but should definitely be > 0
var aw = try std.Io.Writer.Allocating.initCapacity(self.allocator, 128);
defer aw.deinit();
const response_writer = &aw.writer;
_ = reader.streamRemaining(response_writer) catch |err| switch (err) {
error.ReadFailed => return response.bodyErr().?,
else => |e| return e,
};
log.debug("raw response body:\n{s}", .{aw.written()});
const rc = HttpResult{
.response_code = @intFromEnum(req.status),
.body = try resp_payload.toOwnedSlice(),
.headers = try resp_headers.toOwnedSlice(),
.response_code = @intFromEnum(response.head.status),
.body = try aw.toOwnedSlice(),
.headers = try resp_headers.toOwnedSlice(self.allocator),
.allocator = self.allocator,
};
return rc;
@ -305,15 +330,21 @@ fn getRegion(service: []const u8, region: []const u8) []const u8 {
return region;
}
fn addHeaders(allocator: std.mem.Allocator, headers: *std.ArrayList(std.http.Header), host: []const u8, body: []const u8, content_type: []const u8, additional_headers: []const Header) !?[]const u8 {
// We don't need allocator and body because they were to add a
// Content-Length header. But that is being added by the client send()
// function, so we don't want it on the request twice. But I also feel
// pretty strongly that send() should be providing us control, because
// I think if we don't add it here, it won't get signed, and we would
// really prefer it to be signed. So, we will wait and watch for this
// situation to change in stdlib
_ = allocator;
fn addHeaders(
allocator: std.mem.Allocator,
headers: *std.ArrayList(std.http.Header),
host: []const u8,
body: []const u8,
content_type: []const u8,
additional_headers: []const Header,
) !?[]const u8 {
// We don't need body because they were to add a Content-Length header. But
// that is being added by the client send() function, so we don't want it
// on the request twice. But I also feel pretty strongly that send() should
// be providing us control, because I think if we don't add it here, it
// won't get signed, and we would really prefer it to be signed. So, we
// will wait and watch for this situation to change in stdlib
_ = body;
var has_content_type = false;
for (additional_headers) |h| {
@ -322,12 +353,12 @@ fn addHeaders(allocator: std.mem.Allocator, headers: *std.ArrayList(std.http.Hea
break;
}
}
try headers.append(.{ .name = "Accept", .value = "application/json" });
try headers.append(.{ .name = "Host", .value = host });
try headers.append(.{ .name = "User-Agent", .value = "zig-aws 1.0" });
try headers.append(allocator, .{ .name = "Accept", .value = "application/json" });
try headers.append(allocator, .{ .name = "Host", .value = host });
try headers.append(allocator, .{ .name = "User-Agent", .value = "zig-aws 1.0" });
if (!has_content_type)
try headers.append(.{ .name = "Content-Type", .value = content_type });
try headers.appendSlice(additional_headers);
try headers.append(allocator, .{ .name = "Content-Type", .value = content_type });
try headers.appendSlice(allocator, additional_headers);
return null;
}

View file

@ -157,7 +157,7 @@ pub const SigningError = error{
XAmzExpiresHeaderInRequest,
/// Used if the request headers already includes x-amz-region-set
XAmzRegionSetHeaderInRequest,
} || std.fmt.AllocPrintError;
} || error{OutOfMemory};
const forbidden_headers = .{
.{ .name = "x-amz-content-sha256", .err = SigningError.XAmzContentSha256HeaderInRequest },
@ -312,12 +312,12 @@ pub fn signRequest(allocator: std.mem.Allocator, request: base.Request, config:
.name = "Authorization",
.value = try std.fmt.allocPrint(
allocator,
"AWS4-HMAC-SHA256 Credential={s}/{s}, SignedHeaders={s}, Signature={s}",
"AWS4-HMAC-SHA256 Credential={s}/{s}, SignedHeaders={s}, Signature={x}",
.{
config.credentials.access_key,
scope,
canonical_request.headers.signed_headers,
std.fmt.fmtSliceHexLower(signature),
signature,
},
),
};
@ -545,7 +545,7 @@ fn getSigningKey(allocator: std.mem.Allocator, signing_date: []const u8, config:
defer {
// secureZero avoids compiler optimizations that may say
// "WTF are you doing this thing? Looks like nothing to me. It's silly and we will remove it"
std.crypto.utils.secureZero(u8, secret); // zero our copy of secret
std.crypto.secureZero(u8, secret); // zero our copy of secret
allocator.free(secret);
}
// log.debug("secret: {s}", .{secret});
@ -673,7 +673,7 @@ fn canonicalUri(allocator: std.mem.Allocator, path: []const u8, double_encode: b
fn encodeParamPart(allocator: std.mem.Allocator, path: []const u8) ![]const u8 {
const unreserved_marks = "-_.!~*'()";
var encoded = try std.ArrayList(u8).initCapacity(allocator, path.len);
defer encoded.deinit();
defer encoded.deinit(allocator);
for (path) |c| {
var should_encode = true;
for (unreserved_marks) |r|
@ -685,16 +685,16 @@ fn encodeParamPart(allocator: std.mem.Allocator, path: []const u8) ![]const u8 {
should_encode = false;
if (!should_encode) {
try encoded.append(c);
try encoded.append(allocator, c);
continue;
}
// Whatever remains, encode it
try encoded.append('%');
const hex = try std.fmt.allocPrint(allocator, "{s}", .{std.fmt.fmtSliceHexUpper(&[_]u8{c})});
try encoded.append(allocator, '%');
const hex = try std.fmt.allocPrint(allocator, "{X}", .{&[_]u8{c}});
defer allocator.free(hex);
try encoded.appendSlice(hex);
try encoded.appendSlice(allocator, hex);
}
return encoded.toOwnedSlice();
return encoded.toOwnedSlice(allocator);
}
// URI encode every byte except the unreserved characters:
@ -715,7 +715,7 @@ fn encodeUri(allocator: std.mem.Allocator, path: []const u8) ![]u8 {
const reserved_characters = ";,/?:@&=+$#";
const unreserved_marks = "-_.!~*'()";
var encoded = try std.ArrayList(u8).initCapacity(allocator, path.len);
defer encoded.deinit();
defer encoded.deinit(allocator);
// if (std.mem.startsWith(u8, path, "/2017-03-31/tags/arn")) {
// try encoded.appendSlice("/2017-03-31/tags/arn%25253Aaws%25253Alambda%25253Aus-west-2%25253A550620852718%25253Afunction%25253Aawsome-lambda-LambdaStackawsomeLambda");
// return encoded.toOwnedSlice();
@ -738,16 +738,16 @@ fn encodeUri(allocator: std.mem.Allocator, path: []const u8) ![]u8 {
should_encode = false;
if (!should_encode) {
try encoded.append(c);
try encoded.append(allocator, c);
continue;
}
// Whatever remains, encode it
try encoded.append('%');
const hex = try std.fmt.allocPrint(allocator, "{s}", .{std.fmt.fmtSliceHexUpper(&[_]u8{c})});
try encoded.append(allocator, '%');
const hex = try std.fmt.allocPrint(allocator, "{X}", .{&[_]u8{c}});
defer allocator.free(hex);
try encoded.appendSlice(hex);
try encoded.appendSlice(allocator, hex);
}
return encoded.toOwnedSlice();
return encoded.toOwnedSlice(allocator);
}
fn canonicalQueryString(allocator: std.mem.Allocator, path: []const u8) ![]const u8 {
@ -800,25 +800,25 @@ fn canonicalQueryString(allocator: std.mem.Allocator, path: []const u8) ![]const
// Split this by component
var portions = std.mem.splitScalar(u8, query, '&');
var sort_me = std.ArrayList([]const u8).init(allocator);
defer sort_me.deinit();
var sort_me = std.ArrayList([]const u8){};
defer sort_me.deinit(allocator);
while (portions.next()) |item|
try sort_me.append(item);
try sort_me.append(allocator, item);
std.sort.pdq([]const u8, sort_me.items, {}, lessThanBinary);
var normalized = try std.ArrayList(u8).initCapacity(allocator, path.len);
defer normalized.deinit();
defer normalized.deinit(allocator);
var first = true;
for (sort_me.items) |i| {
if (!first) try normalized.append('&');
if (!first) try normalized.append(allocator, '&');
first = false;
const first_equals = std.mem.indexOf(u8, i, "=");
if (first_equals == null) {
// Rare. This is "foo="
const normed_item = try encodeUri(allocator, i);
defer allocator.free(normed_item);
try normalized.appendSlice(i); // This should be encoded
try normalized.append('=');
try normalized.appendSlice(allocator, i); // This should be encoded
try normalized.append(allocator, '=');
continue;
}
@ -831,12 +831,12 @@ fn canonicalQueryString(allocator: std.mem.Allocator, path: []const u8) ![]const
// Double-encode any = in the value. But not anything else?
const weird_equals_in_value_thing = try replace(allocator, value, "%3D", "%253D");
defer allocator.free(weird_equals_in_value_thing);
try normalized.appendSlice(key);
try normalized.append('=');
try normalized.appendSlice(weird_equals_in_value_thing);
try normalized.appendSlice(allocator, key);
try normalized.append(allocator, '=');
try normalized.appendSlice(allocator, weird_equals_in_value_thing);
}
return normalized.toOwnedSlice();
return normalized.toOwnedSlice(allocator);
}
fn replace(allocator: std.mem.Allocator, haystack: []const u8, needle: []const u8, replacement_value: []const u8) ![]const u8 {
@ -875,7 +875,7 @@ fn canonicalHeaders(allocator: std.mem.Allocator, headers: []const std.http.Head
allocator.free(h.name);
allocator.free(h.value);
}
dest.deinit();
dest.deinit(allocator);
}
var total_len: usize = 0;
var total_name_len: usize = 0;
@ -905,15 +905,15 @@ fn canonicalHeaders(allocator: std.mem.Allocator, headers: []const std.http.Head
defer allocator.free(value);
const n = try std.ascii.allocLowerString(allocator, h.name);
const v = try std.fmt.allocPrint(allocator, "{s}", .{value});
try dest.append(.{ .name = n, .value = v });
try dest.append(allocator, .{ .name = n, .value = v });
}
std.sort.pdq(std.http.Header, dest.items, {}, lessThan);
var dest_str = try std.ArrayList(u8).initCapacity(allocator, total_len);
defer dest_str.deinit();
defer dest_str.deinit(allocator);
var signed_headers = try std.ArrayList(u8).initCapacity(allocator, total_name_len);
defer signed_headers.deinit();
defer signed_headers.deinit(allocator);
var first = true;
for (dest.items) |h| {
dest_str.appendSliceAssumeCapacity(h.name);
@ -926,8 +926,8 @@ fn canonicalHeaders(allocator: std.mem.Allocator, headers: []const std.http.Head
signed_headers.appendSliceAssumeCapacity(h.name);
}
return CanonicalHeaders{
.str = try dest_str.toOwnedSlice(),
.signed_headers = try signed_headers.toOwnedSlice(),
.str = try dest_str.toOwnedSlice(allocator),
.signed_headers = try signed_headers.toOwnedSlice(allocator),
};
}
@ -972,7 +972,7 @@ fn hash(allocator: std.mem.Allocator, payload: []const u8, sig_type: SignatureTy
};
var out: [std.crypto.hash.sha2.Sha256.digest_length]u8 = undefined;
std.crypto.hash.sha2.Sha256.hash(to_hash, &out, .{});
return try std.fmt.allocPrint(allocator, "{s}", .{std.fmt.fmtSliceHexLower(&out)});
return try std.fmt.allocPrint(allocator, "{x}", .{out});
}
// SignedHeaders + '\n' +
// HexEncode(Hash(RequestPayload))

View file

@ -1,47 +0,0 @@
const std = @import("std");
const expectEqualStrings = std.testing.expectEqualStrings;
pub fn snakeToCamel(allocator: std.mem.Allocator, name: []const u8) ![]u8 {
var utf8_name = (std.unicode.Utf8View.init(name) catch unreachable).iterator();
var target_inx: usize = 0;
var previous_ascii: u8 = 0;
var rc = try allocator.alloc(u8, name.len);
while (utf8_name.nextCodepoint()) |cp| {
if (cp > 0xff) return error.UnicodeNotSupported;
const ascii_char: u8 = @truncate(cp);
if (ascii_char != '_') {
if (previous_ascii == '_' and ascii_char >= 'a' and ascii_char <= 'z') {
const uppercase_char = ascii_char - ('a' - 'A');
rc[target_inx] = uppercase_char;
} else {
rc[target_inx] = ascii_char;
}
target_inx = target_inx + 1;
}
previous_ascii = ascii_char;
}
// Do we care if the allocator refuses resize?
_ = allocator.resize(rc, target_inx);
return rc[0..target_inx];
}
pub fn snakeToPascal(allocator: std.mem.Allocator, name: []const u8) ![]u8 {
const rc = try snakeToCamel(allocator, name);
if (rc[0] >= 'a' and rc[0] <= 'z') {
const uppercase_char = rc[0] - ('a' - 'A');
rc[0] = uppercase_char;
}
return rc;
}
test "converts from snake to camelCase" {
const allocator = std.testing.allocator;
const camel = try snakeToCamel(allocator, "access_key_id");
defer allocator.free(camel);
try expectEqualStrings("accessKeyId", camel);
}
test "single word" {
const allocator = std.testing.allocator;
const camel = try snakeToCamel(allocator, "word");
defer allocator.free(camel);
try expectEqualStrings("word", camel);
}

View file

@ -34,7 +34,8 @@ pub fn log(
// Print the message to stderr, silently ignoring any errors
std.debug.lockStdErr();
defer std.debug.unlockStdErr();
const stderr = std.io.getStdErr().writer();
var stderr_writer = std.fs.File.stderr().writer(&.{});
const stderr = &stderr_writer.interface;
nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return;
}
@ -62,14 +63,14 @@ pub fn main() anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
var tests = std.ArrayList(Tests).init(allocator);
defer tests.deinit();
var tests = try std.ArrayList(Tests).initCapacity(allocator, @typeInfo(Tests).@"enum".fields.len);
defer tests.deinit(allocator);
var args = try std.process.argsWithAllocator(allocator);
defer args.deinit();
const stdout_raw = std.io.getStdOut().writer();
var bw = std.io.bufferedWriter(stdout_raw);
defer bw.flush() catch unreachable;
const stdout = bw.writer();
var stdout_buf: [4096]u8 = undefined;
const stdout_raw = std.fs.File.stdout().writer(&stdout_buf);
var stdout = stdout_raw.interface;
defer stdout.flush() catch @panic("could not flush stdout");
var arg0: ?[]const u8 = null;
var proxy: ?std.http.Client.Proxy = null;
while (args.next()) |arg| {
@ -99,14 +100,14 @@ pub fn main() anyerror!void {
}
inline for (@typeInfo(Tests).@"enum".fields) |f| {
if (std.mem.eql(u8, f.name, arg)) {
try tests.append(@field(Tests, f.name));
try tests.append(allocator, @field(Tests, f.name));
break;
}
}
}
if (tests.items.len == 0) {
inline for (@typeInfo(Tests).@"enum".fields) |f|
try tests.append(@field(Tests, f.name));
try tests.append(allocator, @field(Tests, f.name));
}
std.log.info("Start\n", .{});
@ -193,7 +194,7 @@ pub fn main() anyerror!void {
const arn = func.function_arn.?;
// This is a bit ugly. Maybe a helper function in the library would help?
var tags = try std.ArrayList(aws.services.lambda.TagKeyValue).initCapacity(allocator, 1);
defer tags.deinit();
defer tags.deinit(allocator);
tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" });
const req = services.lambda.tag_resource.Request{ .resource = arn, .tags = tags.items };
const addtag = try aws.Request(services.lambda.tag_resource).call(req, options);
@ -262,7 +263,7 @@ pub fn main() anyerror!void {
defer result.deinit();
std.log.info("request id: {s}", .{result.response_metadata.request_id});
const list = result.response.key_group_list.?;
std.log.info("key group list max: {?d}", .{list.max_items});
std.log.info("key group list max: {d}", .{list.max_items});
std.log.info("key group quantity: {d}", .{list.quantity});
},
.rest_xml_work_with_s3 => {

View file

@ -14,7 +14,7 @@ pub fn Services(comptime service_imports: anytype) type {
.type = @TypeOf(import_field),
.default_value_ptr = &import_field,
.is_comptime = false,
.alignment = 0,
.alignment = std.meta.alignment(@TypeOf(import_field)),
};
}

View file

@ -11,7 +11,7 @@ pub const EncodingOptions = struct {
field_name_transformer: fieldNameTransformerFn = defaultTransformer,
};
pub fn encode(allocator: std.mem.Allocator, obj: anytype, writer: anytype, comptime options: EncodingOptions) !void {
pub fn encode(allocator: std.mem.Allocator, obj: anytype, writer: *std.Io.Writer, comptime options: EncodingOptions) !void {
_ = try encodeInternal(allocator, "", "", true, obj, writer, options);
}
@ -20,7 +20,7 @@ fn encodeStruct(
parent: []const u8,
first: bool,
obj: anytype,
writer: anytype,
writer: *std.Io.Writer,
comptime options: EncodingOptions,
) !bool {
var rc = first;
@ -41,7 +41,7 @@ pub fn encodeInternal(
field_name: []const u8,
first: bool,
obj: anytype,
writer: anytype,
writer: *std.Io.Writer,
comptime options: EncodingOptions,
) !bool {
// @compileLog(@typeName(@TypeOf(obj)));
@ -56,10 +56,18 @@ pub fn encodeInternal(
} else {
if (!first) _ = try writer.write("&");
// @compileLog(@typeInfo(@TypeOf(obj)));
if (ti.child == []const u8 or ti.child == u8)
try writer.print("{s}{s}={s}", .{ parent, field_name, obj })
else
switch (ti.child) {
// TODO: not sure this first one is valid. How should [][]const u8 be serialized here?
[]const u8 => {
std.log.warn(
"encoding object of type [][]const u8...pretty sure this is wrong {s}{s}={any}",
.{ parent, field_name, obj },
);
try writer.print("{s}{s}={any}", .{ parent, field_name, obj });
},
u8 => try writer.print("{s}{s}={s}", .{ parent, field_name, obj }),
else => try writer.print("{s}{s}={any}", .{ parent, field_name, obj }),
}
rc = false;
},
.@"struct" => if (std.mem.eql(u8, "", field_name)) {

View file

@ -26,12 +26,14 @@ pub const Element = struct {
attributes: AttributeList,
children: ContentList,
next_sibling: ?*Element = null,
allocator: std.mem.Allocator,
fn init(tag: []const u8, alloc: Allocator) Element {
return .{
.tag = tag,
.attributes = AttributeList.init(alloc),
.children = ContentList.init(alloc),
.attributes = AttributeList{},
.children = ContentList{},
.allocator = alloc,
};
}
@ -454,7 +456,7 @@ fn tryParseElement(ctx: *ParseContext, alloc: Allocator, parent: ?*Element) !?*E
while (ctx.eatWs()) {
const attr = (try tryParseAttr(ctx, alloc)) orelse break;
try element.attributes.append(attr);
try element.attributes.append(element.allocator, attr);
}
if (ctx.eatStr("/>")) {
@ -471,7 +473,7 @@ fn tryParseElement(ctx: *ParseContext, alloc: Allocator, parent: ?*Element) !?*E
}
const content = try parseContent(ctx, alloc, element);
try element.children.append(content);
try element.children.append(element.allocator, content);
}
const closing_tag = try parseNameNoDupe(ctx);

View file

@ -53,7 +53,7 @@ pub const XmlSerializeError = error{
pub fn stringify(
value: anytype,
options: StringifyOptions,
writer: anytype,
writer: *std.Io.Writer,
) !void {
// Write XML declaration if requested
if (options.include_declaration)
@ -62,9 +62,9 @@ pub fn stringify(
// Start serialization with the root element
const root_name = options.root_name;
if (@typeInfo(@TypeOf(value)) != .optional or value == null)
try serializeValue(value, root_name, options, writer.any(), 0)
try serializeValue(value, root_name, options, writer, 0)
else
try serializeValue(value.?, root_name, options, writer.any(), 0);
try serializeValue(value.?, root_name, options, writer, 0);
}
/// Serializes a value to XML and returns an allocated string
@ -73,10 +73,10 @@ pub fn stringifyAlloc(
value: anytype,
options: StringifyOptions,
) ![]u8 {
var list = std.ArrayList(u8).init(allocator);
errdefer list.deinit();
var list = std.Io.Writer.Allocating.init(allocator);
defer list.deinit();
try stringify(value, options, list.writer());
try stringify(value, options, &list.writer);
return list.toOwnedSlice();
}
@ -85,7 +85,7 @@ fn serializeValue(
value: anytype,
element_name: ?[]const u8,
options: StringifyOptions,
writer: anytype,
writer: *std.Io.Writer,
depth: usize,
) !void {
const T = @TypeOf(value);
@ -274,7 +274,7 @@ fn serializeValue(
try writeClose(writer, element_name);
}
fn writeClose(writer: anytype, element_name: ?[]const u8) !void {
fn writeClose(writer: *std.Io.Writer, element_name: ?[]const u8) !void {
// Close element tag
if (element_name) |n| {
try writer.writeAll("</");
@ -284,7 +284,7 @@ fn writeClose(writer: anytype, element_name: ?[]const u8) !void {
}
/// Writes indentation based on depth and indent level
fn writeIndent(writer: anytype, depth: usize, whitespace: StringifyOptions.Whitespace) @TypeOf(writer).Error!void {
fn writeIndent(writer: *std.Io.Writer, depth: usize, whitespace: StringifyOptions.Whitespace) std.Io.Writer.Error!void {
var char: u8 = ' ';
const n_chars = switch (whitespace) {
.minified => return,
@ -298,16 +298,16 @@ fn writeIndent(writer: anytype, depth: usize, whitespace: StringifyOptions.White
break :blk depth;
},
};
try writer.writeByteNTimes(char, n_chars);
try writer.splatBytesAll(&.{char}, n_chars);
}
fn serializeString(
writer: anytype,
writer: *std.Io.Writer,
element_name: ?[]const u8,
value: []const u8,
options: StringifyOptions,
depth: usize,
) @TypeOf(writer).Error!void {
) error{ WriteFailed, OutOfMemory }!void {
if (options.emit_strings_as_arrays) {
// if (true) return error.seestackrun;
for (value) |c| {
@ -333,7 +333,7 @@ fn serializeString(
try escapeString(writer, value);
}
/// Escapes special characters in XML strings
fn escapeString(writer: anytype, value: []const u8) @TypeOf(writer).Error!void {
fn escapeString(writer: *std.Io.Writer, value: []const u8) std.Io.Writer.Error!void {
for (value) |c| {
switch (c) {
'&' => try writer.writeAll("&amp;"),

View file

@ -381,14 +381,17 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
log.debug("type = {s}, style = {s}, ptr_info.child == {s}, element = {s}", .{ @typeName(T), @tagName(array_style), @typeName(ptr_info.child), element.tag });
var children = std.ArrayList(ptr_info.child).init(allocator);
defer children.deinit();
var children = std.ArrayList(ptr_info.child){};
defer children.deinit(allocator);
switch (array_style) {
.collection => {
var iterator = element.elements();
while (iterator.next()) |child_element| {
try children.append(try parseInternal(ptr_info.child, child_element, options));
try children.append(
allocator,
try parseInternal(ptr_info.child, child_element, options),
);
}
},
.repeated_root => {
@ -396,12 +399,15 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
while (current) |el| : (current = el.next_sibling) {
if (!std.mem.eql(u8, el.tag, element.tag)) continue;
try children.append(try parseInternal(ptr_info.child, el, options));
try children.append(
allocator,
try parseInternal(ptr_info.child, el, options),
);
}
},
}
return children.toOwnedSlice();
return children.toOwnedSlice(allocator);
}
return try allocator.dupe(u8, element.children.items[0].CharData);
},