From 5109a6d9a77a3f4c60c8a8afab0174965c30759f Mon Sep 17 00:00:00 2001 From: Emil Lerch Date: Thu, 23 Dec 2021 08:51:48 -0800 Subject: [PATCH] upgrade to zig 0.9.0 (will be overridden) This is not signed as I do not have access to my key at the moment. This will be signed and overridden later, as will any subsequent commits. --- .drone.yml | 6 ++-- Dockerfile | 2 +- build.zig | 10 +------ codegen/src/main.zig | 20 +++++++------- codegen/src/snake.zig | 2 +- smithy/src/smithy.zig | 33 +++++++++++----------- src/aws.zig | 10 +++---- src/awshttp.zig | 64 +++++++++++++++++++++---------------------- src/case.zig | 4 +-- src/json.zig | 36 ++++++++++++------------ src/main.zig | 6 ++-- src/url.zig | 6 ++-- 12 files changed, 95 insertions(+), 104 deletions(-) diff --git a/.drone.yml b/.drone.yml index e97e607..be33fd4 100644 --- a/.drone.yml +++ b/.drone.yml @@ -13,10 +13,10 @@ steps: REGISTRY: from_secret: docker_registry commands: - - wget https://ziglang.org/builds/zig-linux-x86_64-0.9.0-dev.321+15a030ef3.tar.xz - - tar x -C /usr/local -f zig-linux-x86_64-0.9.0-dev.321+15a030ef3.tar.xz + - wget https://ziglang.org/download/0.9.0/zig-linux-x86_64-0.9.0.tar.xz + - tar x -C /usr/local -f zig-linux-x86_64-0.9.0.tar.xz - rm /usr/local/bin/zig - - ln -s /usr/local/zig-linux-x86_64-0.9.0-dev.321+15a030ef3/zig /usr/local/bin/zig + - ln -s /usr/local/zig-linux-x86_64-0.9.0/zig /usr/local/bin/zig - (cd codegen && zig build test) - zig build # implicitly does a codegen - zig build test diff --git a/Dockerfile b/Dockerfile index f5e7ea9..6abd536 100644 --- a/Dockerfile +++ b/Dockerfile @@ -105,6 +105,6 @@ RUN tar -czf aws-c-auth-clang.tgz /usr/local/* FROM alpine:3.13 as final COPY --from=auth /aws-c-auth-clang.tgz / -ADD https://ziglang.org/download/0.8.1/zig-linux-x86_64-0.8.1.tar.xz / +ADD https://ziglang.org/download/0.9.0/zig-linux-x86_64-0.9.0.tar.xz / RUN tar -xzf /aws-c-auth-clang.tgz && mkdir /src && tar -C /usr/local -xf zig-linux* && \ ln -s /usr/local/zig-linux*/zig /usr/local/bin/zig diff --git a/build.zig b/build.zig index 2408106..221585a 100644 --- a/build.zig +++ b/build.zig @@ -49,15 +49,7 @@ pub fn build(b: *Builder) !void { exe.setTarget(target); exe.setBuildMode(mode); - // This line works as of c5d412268 - // Earliest nightly is 05b5e49bc on 2021-06-12 - // https://ziglang.org/builds/zig-linux-x86_64-0.9.0-dev.113+05b5e49bc.tar.xz - // exe.override_dest_dir = .{ .Custom = ".." }; exe.override_dest_dir = .{ .custom = ".." }; - - // Static linkage flag was nonfunctional until 2b2efa24d0855 - // Did not notice this until 2021-06-28, and that nightly is: - // https://ziglang.org/builds/zig-linux-x86_64-0.9.0-dev.321+15a030ef3.tar.xz exe.linkage = .static; // TODO: Strip doesn't actually fully strip the executable. If we're on @@ -93,7 +85,7 @@ pub fn build(b: *Builder) !void { } // TODO: Support > linux - if (std.builtin.os.tag == .linux) { + if (builtin.os.tag == .linux) { const codegen = b.step("gen", "Generate zig service code from smithy models"); codegen.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", "cd codegen && zig build" }).step); // Since codegen binary is built every time, if it's newer than our diff --git a/codegen/src/main.zig b/codegen/src/main.zig index 1e66168..c42de07 100644 --- a/codegen/src/main.zig +++ b/codegen/src/main.zig @@ -6,7 +6,7 @@ const json_zig = @embedFile("json.zig"); pub fn main() anyerror!void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); - const allocator = &arena.allocator; + const allocator = arena.allocator(); const args = try std.process.argsAlloc(allocator); defer std.process.argsFree(allocator, args); @@ -38,7 +38,7 @@ fn processFile(arg: []const u8, stdout: anytype, manifest: anytype) !void { // day I'm not sure we want to track down leaks var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); - const allocator = &arena.allocator; + const allocator = arena.allocator(); var writer = &stdout; var file: std.fs.File = undefined; const filename = try std.fmt.allocPrint(allocator, "{s}.zig", .{arg}); @@ -51,7 +51,7 @@ fn processFile(arg: []const u8, stdout: anytype, manifest: anytype) !void { _ = try writer.write("const smithy = @import(\"smithy\");\n\n"); std.log.info("Processing file: {s}", .{arg}); const service_names = generateServicesForFilePath(allocator, ";", arg, writer) catch |err| { - std.log.crit("Error processing file: {s}", .{arg}); + std.log.err("Error processing file: {s}", .{arg}); return err; }; defer { @@ -64,7 +64,7 @@ fn processFile(arg: []const u8, stdout: anytype, manifest: anytype) !void { } } -fn generateServicesForFilePath(allocator: *std.mem.Allocator, comptime terminator: []const u8, path: []const u8, writer: anytype) ![][]const u8 { +fn generateServicesForFilePath(allocator: std.mem.Allocator, comptime terminator: []const u8, path: []const u8, writer: anytype) ![][]const u8 { const file = try std.fs.cwd().openFile(path, .{ .read = true, .write = false }); defer file.close(); return try generateServices(allocator, terminator, file, writer); @@ -135,7 +135,7 @@ fn countReferences(shape: smithy.ShapeInfo, shapes: std.StringHashMap(smithy.Sha } } -fn generateServices(allocator: *std.mem.Allocator, comptime _: []const u8, file: std.fs.File, writer: anytype) ![][]const u8 { +fn generateServices(allocator: std.mem.Allocator, comptime _: []const u8, file: std.fs.File, writer: anytype) ![][]const u8 { const json = try file.readToEndAlloc(allocator, 1024 * 1024 * 1024); defer allocator.free(json); const model = try smithy.parse(allocator, json); @@ -229,7 +229,7 @@ fn generateServices(allocator: *std.mem.Allocator, comptime _: []const u8, file: return constant_names.toOwnedSlice(); } -fn generateAdditionalTypes(allocator: *std.mem.Allocator, file_state: FileGenerationState, writer: anytype) !void { +fn generateAdditionalTypes(allocator: std.mem.Allocator, file_state: FileGenerationState, writer: anytype) !void { // More types may be added during processing while (file_state.additional_types_to_generate.popOrNull()) |t| { if (file_state.additional_types_generated.getEntry(t.name) != null) continue; @@ -250,7 +250,7 @@ fn generateAdditionalTypes(allocator: *std.mem.Allocator, file_state: FileGenera } } -fn constantName(allocator: *std.mem.Allocator, id: []const u8) ![]const u8 { +fn constantName(allocator: std.mem.Allocator, id: []const u8) ![]const u8 { // There are some ids that don't follow consistent rules, so we'll // look for the exceptions and, if not found, revert to the snake case // algorithm @@ -281,7 +281,7 @@ const GenerationState = struct { type_stack: *std.ArrayList(*const smithy.ShapeInfo), file_state: FileGenerationState, // we will need some sort of "type decls needed" for recursive structures - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, indent_level: u64, }; @@ -289,7 +289,7 @@ fn outputIndent(state: GenerationState, writer: anytype) !void { const n_chars = 4 * state.indent_level; try writer.writeByteNTimes(' ', n_chars); } -fn generateOperation(allocator: *std.mem.Allocator, operation: smithy.ShapeInfo, file_state: FileGenerationState, writer: anytype) !void { +fn generateOperation(allocator: std.mem.Allocator, operation: smithy.ShapeInfo, file_state: FileGenerationState, writer: anytype) !void { const snake_case_name = try snake.fromPascalCase(allocator, operation.name); defer allocator.free(snake_case_name); @@ -707,7 +707,7 @@ fn writeOptional(traits: ?[]smithy.Trait, writer: anytype, value: ?[]const u8) ! _ = try writer.write(v); } else _ = try writer.write("?"); } -fn camelCase(allocator: *std.mem.Allocator, name: []const u8) ![]const u8 { +fn camelCase(allocator: std.mem.Allocator, name: []const u8) ![]const u8 { const first_letter = name[0] + ('a' - 'A'); return try std.fmt.allocPrint(allocator, "{c}{s}", .{ first_letter, name[1..] }); } diff --git a/codegen/src/snake.zig b/codegen/src/snake.zig index 2747793..fba9dfa 100644 --- a/codegen/src/snake.zig +++ b/codegen/src/snake.zig @@ -1,7 +1,7 @@ const std = @import("std"); const expectEqualStrings = std.testing.expectEqualStrings; -pub fn fromPascalCase(allocator: *std.mem.Allocator, name: []const u8) ![]u8 { +pub fn fromPascalCase(allocator: std.mem.Allocator, name: []const u8) ![]u8 { const rc = try allocator.alloc(u8, name.len * 2); // This is overkill, but is > the maximum length possibly needed errdefer allocator.free(rc); var utf8_name = (std.unicode.Utf8View.init(name) catch unreachable).iterator(); diff --git a/smithy/src/smithy.zig b/smithy/src/smithy.zig index 0d39bc5..76653ff 100644 --- a/smithy/src/smithy.zig +++ b/smithy/src/smithy.zig @@ -6,10 +6,10 @@ pub const Smithy = struct { version: []const u8, metadata: ModelMetadata, shapes: []ShapeInfo, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, const Self = @This(); - pub fn init(allocator: *std.mem.Allocator, version: []const u8, metadata: ModelMetadata, shapeinfo: []ShapeInfo) Smithy { + pub fn init(allocator: std.mem.Allocator, version: []const u8, metadata: ModelMetadata, shapeinfo: []ShapeInfo) Smithy { return .{ .version = version, .metadata = metadata, @@ -233,7 +233,7 @@ pub const AwsProtocol = enum { ec2_query, }; -pub fn parse(allocator: *std.mem.Allocator, json_model: []const u8) !Smithy { +pub fn parse(allocator: std.mem.Allocator, json_model: []const u8) !Smithy { // construct a parser. We're not copying strings here, but that may // be a poor decision var parser = std.json.Parser.init(allocator, false); @@ -253,7 +253,7 @@ pub fn parse(allocator: *std.mem.Allocator, json_model: []const u8) !Smithy { // anytype: HashMap([]const u8, std.json.Value...) // list must be deinitialized by caller -fn shapes(allocator: *std.mem.Allocator, map: anytype) ![]ShapeInfo { +fn shapes(allocator: std.mem.Allocator, map: anytype) ![]ShapeInfo { var list = try std.ArrayList(ShapeInfo).initCapacity(allocator, map.count()); defer list.deinit(); var iterator = map.iterator(); @@ -329,7 +329,7 @@ fn shapes(allocator: *std.mem.Allocator, map: anytype) ![]ShapeInfo { return list.toOwnedSlice(); } -fn getShape(allocator: *std.mem.Allocator, shape: std.json.Value) SmithyParseError!Shape { +fn getShape(allocator: std.mem.Allocator, shape: std.json.Value) SmithyParseError!Shape { const shape_type = shape.Object.get("type").?.String; if (std.mem.eql(u8, shape_type, "service")) return Shape{ @@ -429,7 +429,7 @@ fn getShape(allocator: *std.mem.Allocator, shape: std.json.Value) SmithyParseErr return SmithyParseError.InvalidType; } -fn parseMembers(allocator: *std.mem.Allocator, shape: ?std.json.Value) SmithyParseError![]TypeMember { +fn parseMembers(allocator: std.mem.Allocator, shape: ?std.json.Value) SmithyParseError![]TypeMember { var rc: []TypeMember = &.{}; if (shape == null) return rc; @@ -449,7 +449,7 @@ fn parseMembers(allocator: *std.mem.Allocator, shape: ?std.json.Value) SmithyPar } // ArrayList of std.Json.Value -fn parseTargetList(allocator: *std.mem.Allocator, list: anytype) SmithyParseError![][]const u8 { +fn parseTargetList(allocator: std.mem.Allocator, list: anytype) SmithyParseError![][]const u8 { var array_list = std.ArrayList([]const u8).initCapacity(allocator, list.items.len) catch return SmithyParseError.OutOfMemory; defer array_list.deinit(); for (list.items) |i| { @@ -457,13 +457,13 @@ fn parseTargetList(allocator: *std.mem.Allocator, list: anytype) SmithyParseErro } return array_list.toOwnedSlice(); } -fn parseTraitsOnly(allocator: *std.mem.Allocator, shape: std.json.Value) SmithyParseError!TraitsOnly { +fn parseTraitsOnly(allocator: std.mem.Allocator, shape: std.json.Value) SmithyParseError!TraitsOnly { return TraitsOnly{ .traits = try parseTraits(allocator, shape.Object.get("traits")), }; } -fn parseTraits(allocator: *std.mem.Allocator, shape: ?std.json.Value) SmithyParseError![]Trait { +fn parseTraits(allocator: std.mem.Allocator, shape: ?std.json.Value) SmithyParseError![]Trait { var rc: []Trait = &.{}; if (shape == null) return rc; @@ -620,8 +620,7 @@ fn getTrait(trait_type: []const u8, value: std.json.Value) SmithyParseError!?Tra \\smithy.api#xmlName \\smithy.waiters#waitable ; - // var iterator = std.mem.split(u8, list, "\n"); // Uncomment for 0.8.1 - var iterator = std.mem.split(list, "\n"); + var iterator = std.mem.split(u8, list, "\n"); while (iterator.next()) |known_but_unimplemented| { if (std.mem.eql(u8, trait_type, known_but_unimplemented)) return null; @@ -677,7 +676,7 @@ fn parseId(id: []const u8) SmithyParseError!IdInfo { .member = member, }; } -fn read_file_to_string(allocator: *std.mem.Allocator, file_name: []const u8, max_bytes: usize) ![]const u8 { +fn read_file_to_string(allocator: std.mem.Allocator, file_name: []const u8, max_bytes: usize) ![]const u8 { const file = try std.fs.cwd().openFile(file_name, std.fs.File.OpenFlags{}); defer file.close(); return file.readToEndAlloc(allocator, max_bytes); @@ -688,13 +687,13 @@ const intrinsic_type_count: usize = 5; // 5 intrinsic types are added to every m fn getTestData(_: *std.mem.Allocator) []const u8 { if (test_data) |d| return d; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - test_data = read_file_to_string(&gpa.allocator, "test.json", 150000) catch @panic("could not read test.json"); + test_data = read_file_to_string(gpa.allocator, "test.json", 150000) catch @panic("could not read test.json"); return test_data.?; } test "read file" { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer if (gpa.deinit()) @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator; _ = getTestData(allocator); // test stuff } @@ -720,7 +719,7 @@ test "parse string" { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer if (gpa.deinit()) @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator; const model = try parse(allocator, test_string); defer model.deinit(); try expect(std.mem.eql(u8, model.version, "1.0")); @@ -754,7 +753,7 @@ test "parse shape with member" { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer if (gpa.deinit()) @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator; const model = try parse(allocator, test_string); defer model.deinit(); try expect(std.mem.eql(u8, model.version, "1.0")); @@ -768,7 +767,7 @@ test "parse shape with member" { test "parse file" { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer if (gpa.deinit()) @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator; const test_string = getTestData(allocator); const model = try parse(allocator, test_string); defer model.deinit(); diff --git a/src/aws.zig b/src/aws.zig index 7b987ba..117797f 100644 --- a/src/aws.zig +++ b/src/aws.zig @@ -26,12 +26,12 @@ pub const services = servicemodel.services; pub const Services = servicemodel.Services; pub const Client = struct { - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, aws_http: awshttp.AwsHttp, const Self = @This(); - pub fn init(allocator: *std.mem.Allocator) Self { + pub fn init(allocator: std.mem.Allocator) Self { return .{ .allocator = allocator, .aws_http = awshttp.AwsHttp.init(allocator), @@ -413,7 +413,7 @@ fn queryFieldTransformer(field_name: []const u8, encoding_options: url.EncodingO return try case.snakeToPascal(encoding_options.allocator.?, field_name); } -fn buildPath(allocator: *std.mem.Allocator, raw_uri: []const u8, comptime ActionRequest: type, request: anytype) ![]const u8 { +fn buildPath(allocator: std.mem.Allocator, raw_uri: []const u8, comptime ActionRequest: type, request: anytype) ![]const u8 { var buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len); // const writer = buffer.writer(); defer buffer.deinit(); @@ -487,7 +487,7 @@ fn uriEncodeByte(char: u8, writer: anytype) !void { } } -fn buildQuery(allocator: *std.mem.Allocator, request: anytype) ![]const u8 { +fn buildQuery(allocator: std.mem.Allocator, request: anytype) ![]const u8 { // query should look something like this: // pub const http_query = .{ // .master_region = "MasterRegion", @@ -615,7 +615,7 @@ pub fn IgnoringWriter(comptime WriterType: type) type { }; } -fn reportTraffic(allocator: *std.mem.Allocator, info: []const u8, request: awshttp.HttpRequest, response: awshttp.HttpResult, comptime reporter: fn (comptime []const u8, anytype) void) !void { +fn reportTraffic(allocator: std.mem.Allocator, info: []const u8, request: awshttp.HttpRequest, response: awshttp.HttpResult, comptime reporter: fn (comptime []const u8, anytype) void) !void { var msg = std.ArrayList(u8).init(allocator); defer msg.deinit(); const writer = msg.writer(); diff --git a/src/awshttp.zig b/src/awshttp.zig index 48dc02a..1408249 100644 --- a/src/awshttp.zig +++ b/src/awshttp.zig @@ -86,7 +86,7 @@ pub const HttpResult = struct { response_code: u16, // actually 3 digits can fit in u10 body: []const u8, headers: []Header, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, pub fn deinit(self: HttpResult) void { self.allocator.free(self.body); @@ -110,14 +110,14 @@ const EndPoint = struct { host: []const u8, scheme: []const u8, port: u16, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, fn deinit(self: EndPoint) void { self.allocator.free(self.uri); } }; -fn cInit(_: *std.mem.Allocator) void { +fn cInit(_: std.mem.Allocator) void { // TODO: what happens if we actually get an allocator? httplog.debug("auth init", .{}); c_allocator = c.aws_default_allocator(); @@ -179,7 +179,7 @@ fn cDeinit() void { // probably the wrong name } pub const AwsHttp = struct { - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, bootstrap: *c.aws_client_bootstrap, resolver: *c.aws_host_resolver, eventLoopGroup: *c.aws_event_loop_group, @@ -187,7 +187,7 @@ pub const AwsHttp = struct { const Self = @This(); - pub fn init(allocator: *std.mem.Allocator) Self { + pub fn init(allocator: std.mem.Allocator) Self { if (reference_count == 0) cInit(allocator); reference_count += 1; httplog.debug("auth ref count: {}", .{reference_count}); @@ -347,7 +347,7 @@ pub const AwsHttp = struct { c.aws_tls_ctx_options_init_default_client(tls_ctx_options.?, c_allocator); // h2;http/1.1 if (c.aws_tls_ctx_options_set_alpn_list(tls_ctx_options, "http/1.1") != c.AWS_OP_SUCCESS) { - httplog.alert("Failed to load alpn list with error {s}.", .{c.aws_error_debug_str(c.aws_last_error())}); + httplog.err("Failed to load alpn list with error {s}.", .{c.aws_error_debug_str(c.aws_last_error())}); return AwsError.AlpnError; } @@ -374,7 +374,7 @@ pub const AwsHttp = struct { var host_var = host; var host_cur = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, host_var)); if (c.aws_tls_connection_options_set_server_name(tls_connection_options, c_allocator, &host_cur) != c.AWS_OP_SUCCESS) { - httplog.alert("Failed to set servername with error {s}.", .{c.aws_error_debug_str(c.aws_last_error())}); + httplog.err("Failed to set servername with error {s}.", .{c.aws_error_debug_str(c.aws_last_error())}); return AwsError.TlsError; } } @@ -409,7 +409,7 @@ pub const AwsHttp = struct { .on_shutdown = connectionShutdownCallback, }; if (c.aws_http_client_connect(&http_client_options) != c.AWS_OP_SUCCESS) { - httplog.alert("HTTP client connect failed with {s}.", .{c.aws_error_debug_str(c.aws_last_error())}); + httplog.err("HTTP client connect failed with {s}.", .{c.aws_error_debug_str(c.aws_last_error())}); return AwsError.HttpClientConnectError; } // TODO: Timeout @@ -425,17 +425,17 @@ pub const AwsHttp = struct { .on_response_header_block_done = null, .on_response_body = incomingBodyCallback, .on_complete = requestCompleteCallback, - .user_data = @ptrCast(*c_void, &context), + .user_data = @ptrCast(*anyopaque, &context), .request = http_request, }; const stream = c.aws_http_connection_make_request(context.connection, &request_options); if (stream == null) { - httplog.alert("failed to create request.", .{}); + httplog.err("failed to create request.", .{}); return AwsError.RequestCreateError; } if (c.aws_http_stream_activate(stream) != c.AWS_OP_SUCCESS) { - httplog.alert("HTTP request failed with {s}.", .{c.aws_error_debug_str(c.aws_last_error())}); + httplog.err("HTTP request failed with {s}.", .{c.aws_error_debug_str(c.aws_last_error())}); return AwsError.HttpRequestError; } // TODO: Timeout @@ -599,7 +599,7 @@ pub const AwsHttp = struct { var sign_result_request = AsyncResult(AwsAsyncCallbackResult(c.aws_http_message)){ .result = &signing_result }; if (c.aws_sign_request_aws(c_allocator, signable, fullCast([*c]const c.aws_signing_config_base, signing_config), signComplete, &sign_result_request) != c.AWS_OP_SUCCESS) { const error_code = c.aws_last_error(); - httplog.alert("Could not initiate signing request: {s}:{s}", .{ c.aws_error_name(error_code), c.aws_error_str(error_code) }); + httplog.err("Could not initiate signing request: {s}:{s}", .{ c.aws_error_name(error_code), c.aws_error_str(error_code) }); return AwsError.SigningInitiationError; } @@ -615,7 +615,7 @@ pub const AwsHttp = struct { /// It's my theory that the aws event loop has a trigger to corrupt the /// signing result after this call completes. So the technique of assigning /// now, using later will not work - fn signComplete(result: ?*c.aws_signing_result, error_code: c_int, user_data: ?*c_void) callconv(.C) void { + fn signComplete(result: ?*c.aws_signing_result, error_code: c_int, user_data: ?*anyopaque) callconv(.C) void { var async_result = userDataTo(AsyncResult(AwsAsyncCallbackResult(c.aws_http_message)), user_data); var http_request = async_result.result.result; async_result.sync.store(true, .SeqCst); @@ -625,11 +625,11 @@ pub const AwsHttp = struct { if (result != null) { if (c.aws_apply_signing_result_to_http_request(http_request, c_allocator, result) != c.AWS_OP_SUCCESS) { - httplog.alert("Could not apply signing request to http request: {s}", .{c.aws_error_debug_str(c.aws_last_error())}); + httplog.err("Could not apply signing request to http request: {s}", .{c.aws_error_debug_str(c.aws_last_error())}); } httplog.debug("signing result applied", .{}); } else { - httplog.alert("Did not receive signing result: {s}", .{c.aws_error_debug_str(c.aws_last_error())}); + httplog.err("Did not receive signing result: {s}", .{c.aws_error_debug_str(c.aws_last_error())}); } async_result.sync.store(false, .SeqCst); } @@ -710,11 +710,11 @@ pub const AwsHttp = struct { } } - fn connectionSetupCallback(connection: ?*c.aws_http_connection, error_code: c_int, user_data: ?*c_void) callconv(.C) void { + fn connectionSetupCallback(connection: ?*c.aws_http_connection, error_code: c_int, user_data: ?*anyopaque) callconv(.C) void { httplog.debug("connection setup callback start", .{}); var context = userDataTo(RequestContext, user_data); if (error_code != c.AWS_OP_SUCCESS) { - httplog.alert("Failed to setup connection: {s}.", .{c.aws_error_debug_str(c.aws_last_error())}); + httplog.err("Failed to setup connection: {s}.", .{c.aws_error_debug_str(c.aws_last_error())}); context.return_error = AwsError.SetupConnectionError; } context.connection = connection; @@ -722,13 +722,13 @@ pub const AwsHttp = struct { httplog.debug("connection setup callback end", .{}); } - fn connectionShutdownCallback(connection: ?*c.aws_http_connection, error_code: c_int, _: ?*c_void) callconv(.C) void { + fn connectionShutdownCallback(connection: ?*c.aws_http_connection, error_code: c_int, _: ?*anyopaque) callconv(.C) void { // ^^ error_code ^^ user_data httplog.debug("connection shutdown callback start ({*}). error_code: {d}", .{ connection, error_code }); httplog.debug("connection shutdown callback end", .{}); } - fn incomingHeadersCallback(stream: ?*c.aws_http_stream, _: c.aws_http_header_block, headers: [*c]const c.aws_http_header, num_headers: usize, user_data: ?*c_void) callconv(.C) c_int { + fn incomingHeadersCallback(stream: ?*c.aws_http_stream, _: c.aws_http_header_block, headers: [*c]const c.aws_http_header, num_headers: usize, user_data: ?*anyopaque) callconv(.C) c_int { var context = userDataTo(RequestContext, user_data); if (context.response_code == null) { @@ -737,7 +737,7 @@ pub const AwsHttp = struct { context.response_code = @intCast(u16, status); // RFC says this is a 3 digit number, so c_int is silly httplog.debug("response status code from callback: {d}", .{status}); } else { - httplog.alert("could not get status code", .{}); + httplog.err("could not get status code", .{}); context.return_error = AwsError.StatusCodeError; } } @@ -746,11 +746,11 @@ pub const AwsHttp = struct { const value = header.value.ptr[0..header.value.len]; httplog.debug("header from callback: {s}: {s}", .{ name, value }); context.addHeader(name, value) catch - httplog.alert("could not append header to request context", .{}); + httplog.err("could not append header to request context", .{}); } return c.AWS_OP_SUCCESS; } - fn incomingBodyCallback(_: ?*c.aws_http_stream, data: [*c]const c.aws_byte_cursor, user_data: ?*c_void) callconv(.C) c_int { + fn incomingBodyCallback(_: ?*c.aws_http_stream, data: [*c]const c.aws_byte_cursor, user_data: ?*anyopaque) callconv(.C) c_int { var context = userDataTo(RequestContext, user_data); httplog.debug("inbound body, len {d}", .{data.*.len}); @@ -758,10 +758,10 @@ pub const AwsHttp = struct { // Need this to be a slice because it does not necessarily have a \0 sentinal const body_chunk = array[0..data.*.len]; context.appendToBody(body_chunk) catch - httplog.alert("could not append to body!", .{}); + httplog.err("could not append to body!", .{}); return c.AWS_OP_SUCCESS; } - fn requestCompleteCallback(stream: ?*c.aws_http_stream, _: c_int, user_data: ?*c_void) callconv(.C) void { + fn requestCompleteCallback(stream: ?*c.aws_http_stream, _: c_int, user_data: ?*anyopaque) callconv(.C) void { // ^^ error_code var context = userDataTo(RequestContext, user_data); context.request_complete.store(true, .SeqCst); @@ -780,7 +780,7 @@ pub const AwsHttp = struct { waitOnCallback(c.aws_credentials, &callback_results); if (credential_result.error_code != c.AWS_ERROR_SUCCESS) { - httplog.alert("Could not acquire credentials: {s}:{s}", .{ c.aws_error_name(credential_result.error_code), c.aws_error_str(credential_result.error_code) }); + httplog.err("Could not acquire credentials: {s}:{s}", .{ c.aws_error_name(credential_result.error_code), c.aws_error_str(credential_result.error_code) }); return AwsError.CredentialsError; } return credential_result.result orelse unreachable; @@ -813,7 +813,7 @@ pub const AwsHttp = struct { } // Generic function that generates a type-specific funtion for callback use - fn awsAsyncCallback(comptime T: type, comptime message: []const u8) (fn (result: ?*T, error_code: c_int, user_data: ?*c_void) callconv(.C) void) { + fn awsAsyncCallback(comptime T: type, comptime message: []const u8) (fn (result: ?*T, error_code: c_int, user_data: ?*anyopaque) callconv(.C) void) { const inner = struct { fn func(userData: *AsyncResult(AwsAsyncCallbackResult(T)), apiData: ?*T) void { userData.result.result = apiData; @@ -824,15 +824,15 @@ pub const AwsHttp = struct { // used by awsAsyncCallbackResult to cast our generic userdata void * // into a type known to zig - fn userDataTo(comptime T: type, userData: ?*c_void) *T { + fn userDataTo(comptime T: type, userData: ?*anyopaque) *T { return @ptrCast(*T, @alignCast(@alignOf(T), userData)); } // generic callback ability. Takes a function for the actual assignment // If you need a standard assignment, use awsAsyncCallback instead - fn awsAsyncCallbackResult(comptime T: type, comptime message: []const u8, comptime resultAssignment: (fn (user: *AsyncResult(AwsAsyncCallbackResult(T)), apiData: ?*T) void)) (fn (result: ?*T, error_code: c_int, user_data: ?*c_void) callconv(.C) void) { + fn awsAsyncCallbackResult(comptime T: type, comptime message: []const u8, comptime resultAssignment: (fn (user: *AsyncResult(AwsAsyncCallbackResult(T)), apiData: ?*T) void)) (fn (result: ?*T, error_code: c_int, user_data: ?*anyopaque) callconv(.C) void) { const inner = struct { - fn innerfunc(result: ?*T, error_code: c_int, user_data: ?*c_void) callconv(.C) void { + fn innerfunc(result: ?*T, error_code: c_int, user_data: ?*anyopaque) callconv(.C) void { httplog.debug(message, .{}); var asyncResult = userDataTo(AsyncResult(AwsAsyncCallbackResult(T)), user_data); @@ -883,7 +883,7 @@ fn fullCast(comptime T: type, val: anytype) T { return @ptrCast(T, @alignCast(@alignOf(T), val)); } -fn regionSubDomain(allocator: *std.mem.Allocator, service: []const u8, region: []const u8, useDualStack: bool) !EndPoint { +fn regionSubDomain(allocator: std.mem.Allocator, service: []const u8, region: []const u8, useDualStack: bool) !EndPoint { const environment_override = std.os.getenv("AWS_ENDPOINT_URL"); if (environment_override) |override| { const uri = try allocator.dupeZ(u8, override); @@ -916,7 +916,7 @@ fn regionSubDomain(allocator: *std.mem.Allocator, service: []const u8, region: [ /// /// allocator: Will be used only to construct the EndPoint struct /// uri: string constructed in such a way that deallocation is needed -fn endPointFromUri(allocator: *std.mem.Allocator, uri: []const u8) !EndPoint { +fn endPointFromUri(allocator: std.mem.Allocator, uri: []const u8) !EndPoint { var scheme: []const u8 = ""; var host: []const u8 = ""; var port: u16 = 443; @@ -966,7 +966,7 @@ const RequestContext = struct { connection_complete: std.atomic.Atomic(bool) = std.atomic.Atomic(bool).init(false), request_complete: std.atomic.Atomic(bool) = std.atomic.Atomic(bool).init(false), return_error: ?AwsError = null, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, body: ?[]const u8 = null, response_code: ?u16 = null, headers: ?std.ArrayList(Header) = null, diff --git a/src/case.zig b/src/case.zig index ae73171..0d53edf 100644 --- a/src/case.zig +++ b/src/case.zig @@ -1,7 +1,7 @@ const std = @import("std"); const expectEqualStrings = std.testing.expectEqualStrings; -pub fn snakeToCamel(allocator: *std.mem.Allocator, name: []const u8) ![]u8 { +pub fn snakeToCamel(allocator: std.mem.Allocator, name: []const u8) ![]u8 { var utf8_name = (std.unicode.Utf8View.init(name) catch unreachable).iterator(); var target_inx: u64 = 0; var previous_ascii: u8 = 0; @@ -24,7 +24,7 @@ pub fn snakeToCamel(allocator: *std.mem.Allocator, name: []const u8) ![]u8 { rc[target_inx] = 0; // add zero sentinel return rc[0..target_inx]; } -pub fn snakeToPascal(allocator: *std.mem.Allocator, name: []const u8) ![]u8 { +pub fn snakeToPascal(allocator: std.mem.Allocator, name: []const u8) ![]u8 { const rc = try snakeToCamel(allocator, name); if (rc[0] >= 'a' and rc[0] <= 'z') { const uppercase_char = rc[0] - ('a' - 'A'); diff --git a/src/json.zig b/src/json.zig index 10eaf32..9bddbab 100644 --- a/src/json.zig +++ b/src/json.zig @@ -1453,7 +1453,7 @@ fn parsedEqual(a: anytype, b: @TypeOf(a)) bool { } pub const ParseOptions = struct { - allocator: ?*Allocator = null, + allocator: ?Allocator = null, /// Behaviour when a duplicate field is encountered. duplicate_field_behavior: enum { @@ -1795,7 +1795,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options: else => {}, } - try arraylist.ensureCapacity(arraylist.items.len + 1); + try arraylist.ensureTotalCapacity(arraylist.items.len + 1); const v = try parseInternal(ptrInfo.child, tok, tokens, options); arraylist.appendAssumeCapacity(v); } @@ -1835,7 +1835,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options: else => {}, } - try arraylist.ensureCapacity(arraylist.items.len + 1); + try arraylist.ensureTotalCapacity(arraylist.items.len + 1); const key_val = try parseInternal(try typeForField(ptrInfo.child, "key"), key, tokens, options); const val = (try tokens.next()) orelse return error.UnexpectedEndOfJson; const val_val = try parseInternal(try typeForField(ptrInfo.child, "value"), val, tokens, options); @@ -2014,7 +2014,7 @@ test "parse into tagged union" { { // failing allocations should be bubbled up instantly without trying next member var fail_alloc = testing.FailingAllocator.init(testing.allocator, 0); - const options = ParseOptions{ .allocator = &fail_alloc.allocator }; + const options = ParseOptions{ .allocator = fail_alloc.allocator() }; const T = union(enum) { // both fields here match the input string: []const u8, @@ -2062,7 +2062,7 @@ test "parse union bubbles up AllocatorRequired" { test "parseFree descends into tagged union" { var fail_alloc = testing.FailingAllocator.init(testing.allocator, 1); - const options = ParseOptions{ .allocator = &fail_alloc.allocator }; + const options = ParseOptions{ .allocator = fail_alloc.allocator() }; const T = union(enum) { int: i32, float: f64, @@ -2217,7 +2217,7 @@ test "parse into struct with duplicate field" { /// A non-stream JSON parser which constructs a tree of Value's. pub const Parser = struct { - allocator: *Allocator, + allocator: Allocator, state: State, copy_strings: bool, // Stores parent nodes and un-combined Values. @@ -2230,7 +2230,7 @@ pub const Parser = struct { Simple, }; - pub fn init(allocator: *Allocator, copy_strings: bool) Parser { + pub fn init(allocator: Allocator, copy_strings: bool) Parser { return Parser{ .allocator = allocator, .state = .Simple, @@ -2255,7 +2255,7 @@ pub const Parser = struct { errdefer arena.deinit(); while (try s.next()) |token| { - try p.transition(&arena.allocator, input, s.i - 1, token); + try p.transition(arena.allocator(), input, s.i - 1, token); } debug.assert(p.stack.items.len == 1); @@ -2268,7 +2268,7 @@ pub const Parser = struct { // Even though p.allocator exists, we take an explicit allocator so that allocation state // can be cleaned up on error correctly during a `parse` on call. - fn transition(p: *Parser, allocator: *Allocator, input: []const u8, i: usize, token: Token) !void { + fn transition(p: *Parser, allocator: Allocator, input: []const u8, i: usize, token: Token) !void { switch (p.state) { .ObjectKey => switch (token) { .ObjectEnd => { @@ -2425,7 +2425,7 @@ pub const Parser = struct { } } - fn parseString(p: *Parser, allocator: *Allocator, s: std.meta.TagPayload(Token, Token.String), input: []const u8, i: usize) !Value { + fn parseString(p: *Parser, allocator: Allocator, s: std.meta.TagPayload(Token, Token.String), input: []const u8, i: usize) !Value { const slice = s.slice(input, i); switch (s.escapes) { .None => return Value{ .String = if (p.copy_strings) try allocator.dupe(u8, slice) else slice }, @@ -2623,7 +2623,7 @@ test "import more json tests" { // try testing.expect(mem.eql(u8, tree.root.Object.get("str").?.String, "hello")); // } -fn test_parse(arena_allocator: *std.mem.Allocator, json_str: []const u8) !Value { +fn test_parse(arena_allocator: std.mem.Allocator, json_str: []const u8) !Value { var p = Parser.init(arena_allocator, false); return (try p.parse(json_str)).root; } @@ -2631,13 +2631,13 @@ fn test_parse(arena_allocator: *std.mem.Allocator, json_str: []const u8) !Value test "parsing empty string gives appropriate error" { var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena_allocator.deinit(); - try testing.expectError(error.UnexpectedEndOfJson, test_parse(&arena_allocator.allocator, "")); + try testing.expectError(error.UnexpectedEndOfJson, test_parse(arena_allocator.allocator(), "")); } test "integer after float has proper type" { var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena_allocator.deinit(); - const json = try test_parse(&arena_allocator.allocator, + const json = try test_parse(arena_allocator.allocator(), \\{ \\ "float": 3.14, \\ "ints": [1, 2, 3] @@ -2664,7 +2664,7 @@ test "escaped characters" { \\} ; - const obj = (try test_parse(&arena_allocator.allocator, input)).Object; + const obj = (try test_parse(arena_allocator.allocator(), input)).Object; try testing.expectEqualSlices(u8, obj.get("backslash").?.String, "\\"); try testing.expectEqualSlices(u8, obj.get("forwardslash").?.String, "/"); @@ -2691,10 +2691,10 @@ test "string copy option" { var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena_allocator.deinit(); - const tree_nocopy = try Parser.init(&arena_allocator.allocator, false).parse(input); + const tree_nocopy = try Parser.init(arena_allocator.allocator(), false).parse(input); const obj_nocopy = tree_nocopy.root.Object; - const tree_copy = try Parser.init(&arena_allocator.allocator, true).parse(input); + const tree_copy = try Parser.init(arena_allocator.allocator(), true).parse(input); const obj_copy = tree_copy.root.Object; for ([_][]const u8{ "noescape", "simple", "unicode", "surrogatepair" }) |field_name| { @@ -3009,7 +3009,7 @@ fn teststringify(expected: []const u8, value: anytype, options: StringifyOptions fn write(self: *Self, bytes: []const u8) Error!usize { if (self.expected_remaining.len < bytes.len) { - std.debug.warn( + std.log.warn( \\====== expected this output: ========= \\{s} \\======== instead found this: ========= @@ -3022,7 +3022,7 @@ fn teststringify(expected: []const u8, value: anytype, options: StringifyOptions return error.TooMuchData; } if (!mem.eql(u8, self.expected_remaining[0..bytes.len], bytes)) { - std.debug.warn( + std.log.warn( \\====== expected this output: ========= \\{s} \\======== instead found this: ========= diff --git a/src/main.zig b/src/main.zig index 0081691..7b36b2d 100644 --- a/src/main.zig +++ b/src/main.zig @@ -17,8 +17,8 @@ pub fn log( const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix; // Print the message to stderr, silently ignoring any errors - const held = std.debug.getStderrMutex().acquire(); - defer held.release(); + std.debug.getStderrMutex().lock(); + defer std.debug.getStderrMutex().unlock(); const stderr = std.io.getStdErr().writer(); nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return; } @@ -42,7 +42,7 @@ pub fn main() anyerror!void { .backing_allocator = c_allocator, }; defer _ = gpa.deinit(); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); var tests = std.ArrayList(Tests).init(allocator); defer tests.deinit(); var args = std.process.args(); diff --git a/src/url.zig b/src/url.zig index 57712d4..fcab0c1 100644 --- a/src/url.zig +++ b/src/url.zig @@ -7,7 +7,7 @@ fn defaultTransformer(field_name: []const u8, _: EncodingOptions) anyerror![]con pub const FieldNameTransformer = fn ([]const u8, EncodingOptions) anyerror![]const u8; pub const EncodingOptions = struct { - allocator: ?*std.mem.Allocator = null, + allocator: ?std.mem.Allocator = null, field_name_transformer: *const FieldNameTransformer = &defaultTransformer, }; @@ -97,7 +97,7 @@ fn testencode(expected: []const u8, value: anytype, options: EncodingOptions) !v fn write(self: *Self, bytes: []const u8) Error!usize { // std.debug.print("{s}\n", .{bytes}); if (self.expected_remaining.len < bytes.len) { - std.debug.warn( + std.log.warn( \\====== expected this output: ========= \\{s} \\======== instead found this: ========= @@ -110,7 +110,7 @@ fn testencode(expected: []const u8, value: anytype, options: EncodingOptions) !v return error.TooMuchData; } if (!std.mem.eql(u8, self.expected_remaining[0..bytes.len], bytes)) { - std.debug.warn( + std.log.warn( \\====== expected this output: ========= \\{s} \\======== instead found this: =========