Compare commits

...
Sign in to create a new pull request.

26 commits

Author SHA1 Message Date
dafc69726f
handle http headers with value lists
All checks were successful
AWS-Zig Build / build-zig-amd64-host (push) Successful in 9m11s
aws-zig nightly build / build-zig-nightly (push) Successful in 8m49s
2025-08-29 15:00:27 -07:00
f0f7b180c4
ignoring json.zig in zlint as it was imported from stdlib 2025-08-29 14:02:52 -07:00
0dcdba7887
add zls 0.15.0 to mise config 2025-08-29 13:54:41 -07:00
a487d6c2e7
update setup-zig on nightly to 2.0.5
All checks were successful
aws-zig nightly build / build-zig-nightly (push) Successful in 15m28s
AWS-Zig Build / build-zig-amd64-host (push) Successful in 7m43s
2025-08-25 17:32:42 -07:00
8f3ca1d9cd
fix remaining signature tests
Some checks failed
aws-zig nightly build / build-zig-nightly (push) Failing after 52s
AWS-Zig Build / build-zig-amd64-host (push) Successful in 7m55s
2025-08-25 17:25:08 -07:00
b1a096fa1e
update example to latest working lib
All checks were successful
AWS-Zig Build / build-zig-amd64-host (push) Successful in 7m42s
2025-08-25 14:25:56 -07:00
fd30c9f870
remove caveat in readme. we should be gtg 2025-08-25 14:16:30 -07:00
cfc8aee1a6
a lot of test code, plus a one line ".deinit()" fix
All checks were successful
AWS-Zig Build / build-zig-amd64-host (push) Successful in 7m39s
2025-08-25 14:14:15 -07:00
214c580db4
get failing live request under unit test 2025-08-25 13:07:46 -07:00
2fab8ac0b8
add note regarding 0.15.1 status
All checks were successful
AWS-Zig Build / build-zig-amd64-host (push) Successful in 7m43s
2025-08-25 11:08:48 -07:00
ffdd31d7b0
upgrade example to 0.15.1
All checks were successful
AWS-Zig Build / build-zig-amd64-host (push) Successful in 7m51s
2025-08-25 10:39:29 -07:00
e29829f2a0
fix failing s3 test
Some checks failed
AWS-Zig Build / build-zig-amd64-host (push) Failing after 6m25s
Not quite sure the problem I saw earlier in aws.zig, but the data
entering that switch prong is []const u8, so it is fine for what we are
doing
2025-08-25 10:00:50 -07:00
74704506d8
update tests for zig 0.15.1
Some checks failed
AWS-Zig Build / build-zig-amd64-host (push) Failing after 1m41s
This removes the need to spin up a web server for each test, instead,
mocking the necessary methods to do everything in line. This will make
the tests much more resilient, and with the remaining WriterGate changes
expected in zig 0.16, I suspect the mocking will be unnecessary in the
next release.

There are several test issues that remain:

* Two skipped tests in signature verification. This is the most
  concerning of the remaining issues
* Serialization of [][]const u8 was probably broken in zig 0.14.1, but
  the new version has surfaced this issue. Warning messages are being
  sent, and this needs to be tracked down
* One of the tests is failing as S3 storage tier extra header is not
  being offered. I'm not sure what in the upgrade might have changed
  this behavior, but this needs to be investigated
2025-08-24 15:56:36 -07:00
1e8756cc9a
fix runtime panics, set tests to skip for now 2025-08-23 13:34:36 -07:00
b126ec25e8
fix aws.zig tests 2025-08-23 10:56:21 -07:00
b2ce163b6f
fix aws.zig and url.zig tests, simplify url testing and skip 1 test for now 2025-08-23 10:45:35 -07:00
90c5efcace
clean up all the basic things 2025-08-23 09:44:34 -07:00
0a0933e38f
move test suite to its own file 2025-08-23 09:20:22 -07:00
1170ba99fc
fix most test compilation errors 2025-08-23 08:48:32 -07:00
8d399cb8a6
zig build compiles using zig 0.15.1
Some checks failed
AWS-Zig Build / build-zig-amd64-host (push) Failing after 2m27s
2025-08-22 18:00:34 -07:00
5334cc3bfe
writer needs to be a constant to the pointer of the writer field 2025-08-22 13:30:09 -07:00
53ac60c875
don't forget to flush! 2025-08-22 12:52:57 -07:00
5541742db3
update README to reflect new zig
Some checks failed
AWS-Zig Build / build-zig-amd64-host (push) Failing after 1m10s
2025-08-22 11:47:02 -07:00
b865285b24
update codegen to 0.15.1 2025-08-22 11:23:26 -07:00
9ed9c9b447
update dependencies 2025-08-22 11:22:58 -07:00
20d7d5766b
begin 0.15.1 upgrade - CI and dev tooling 2025-08-22 11:22:24 -07:00
32 changed files with 2189 additions and 1973 deletions

View file

@ -18,11 +18,9 @@ jobs:
- name: Check out repository code
uses: actions/checkout@v4
- name: Setup Zig
uses: https://github.com/mlugg/setup-zig@v2.0.1
with:
version: 0.14.0
- name: Restore Zig caches
uses: https://github.com/Hanaasagi/zig-action-cache@3954aae427f8b05914e08dfd79f15e1f2e435929
uses: https://github.com/mlugg/setup-zig@v2.0.5
# We will let setup-zig use minimum_zig_version from build.zig.zon
# setup-zig also sets up the zig cache appropriately
- name: Ulimit
run: ulimit -a
- name: Run smoke test

View file

@ -26,11 +26,9 @@ jobs:
with:
ref: zig-develop
- name: Setup Zig
uses: https://github.com/mlugg/setup-zig@v2.0.1
uses: https://github.com/mlugg/setup-zig@v2.0.5
with:
version: master
- name: Restore Zig caches
uses: https://github.com/Hanaasagi/zig-action-cache@3954aae427f8b05914e08dfd79f15e1f2e435929
- name: Run smoke test
run: zig build smoke-test --verbose
- name: Run full tests

View file

@ -3,7 +3,7 @@ on:
workflow_dispatch:
push:
branches:
- 'zig-0.13'
- 'zig-0.14.x'
env:
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
@ -18,13 +18,11 @@ jobs:
- name: Check out repository code
uses: actions/checkout@v4
with:
ref: zig-0.13
ref: zig-0.14.x
- name: Setup Zig
uses: https://github.com/mlugg/setup-zig@v2.0.1
with:
version: 0.13.0
- name: Restore Zig caches
uses: https://github.com/Hanaasagi/zig-action-cache@3954aae427f8b05914e08dfd79f15e1f2e435929
version: 0.14.0
- name: Run smoke test
run: zig build smoke-test --verbose
- name: Run full tests

View file

@ -1,5 +1,5 @@
[tools]
pre-commit = "latest"
"ubi:DonIsaac/zlint" = "latest"
zig = "0.14.1"
zls = "0.14.0"
zig = "0.15.1"
zls = "0.15.0"

View file

@ -15,19 +15,16 @@ repos:
- id: zig-build
- repo: local
hooks:
- id: zlint
- id: smoke-test
name: Run zig build smoke-test
entry: zig
args: ["build", "--verbose", "smoke-test"]
language: system
types: [file]
pass_filenames: false
# - repo: local
# hooks:
# - id: zlint
# name: Run zlint
# entry: zlint
# args: ["--deny-warnings", "--fix"]
# language: system
# types: [zig]
- id: zlint
name: Run zlint
entry: zlint
args: ["--deny-warnings", "--fix"]
language: system
types: [zig]

View file

@ -1,18 +1,17 @@
AWS SDK for Zig
===============
[Zig 0.14](https://ziglang.org/download/#release-0.14.0):
[Zig 0.15.1](https://ziglang.org/download/#release-0.15.1):
[![Build Status: Zig 0.14.0](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/build.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=build.yaml&state=closed)
[![Build Status: Zig 0.15.1](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/build.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=build.yaml&state=closed)
[Nightly Zig](https://ziglang.org/download/):
[![Build Status: Zig Nightly](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-nightly.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-nightly.yaml&state=closed)
[Zig 0.13](https://ziglang.org/download/#release-0.13.0):
[![Build Status: Zig 0.13.0](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-previous.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-previous.yaml&state=closed)
[Zig 0.14.1](https://ziglang.org/download/#release-0.14.1):
[![Build Status: Zig 0.14.x](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-previous.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-previous.yaml&state=closed)
Current executable size for the demo is 980k after compiling with -Doptimize=ReleaseSmall
in x86_64-linux, and will vary based on services used. Tested targets:
@ -30,15 +29,15 @@ Tested targets are built, but not continuously tested, by CI.
Branches
--------
* **master**: This branch tracks the latest released zig version
* **zig-0.13**: This branch tracks the 0.13 released zig version.
Support for the previous version is best effort, generally
degrading over time. Fixes will generally appear in master, then
backported into the previous version.
* **zig-develop**: This branch tracks zig nightly, and is used mainly as a canary
for breaking changes that will need to be dealt with when
a new zig release appears. Expect significant delays in any
build failures (PRs always welcome!).
* **master**: This branch tracks the latest released zig version
* **zig-0.14.x**: This branch tracks the 0.14/0.14.1 released zig versions.
Support for these previous version is best effort, generally
degrading over time. Fixes will generally appear in master, then
backported into the previous version.
Other branches/tags exist but are unsupported

View file

@ -11,19 +11,20 @@
"README.md",
"LICENSE",
},
.minimum_zig_version = "0.15.1",
.dependencies = .{
.smithy = .{
.url = "https://git.lerch.org/lobo/smithy/archive/fd9be1afbfcc60d52896c077d8e9c963bb667bf1.tar.gz",
.hash = "smithy-1.0.0-uAyBgZPSAgBHStx7nrj0u3sN66g8Ppnn3XFUEJhn00rP",
.url = "git+https://git.lerch.org/lobo/smithy.git#09c0a618877ebaf8e15fbfc505983876f4e063d5",
.hash = "smithy-1.0.0-uAyBgTnTAgBp2v6vypGcK5-YOCtxs2iEqR-4LfC5FTlS",
},
.models = .{
.url = "https://github.com/aws/aws-sdk-go-v2/archive/refs/tags/release-2025-05-05.tar.gz",
.hash = "N-V-__8AAKWdeiawujEcrfukQbb8lLAiQIRT0uG5gCcm4b7W",
},
.zeit = .{
.url = "git+https://github.com/rockorager/zeit#f86d568b89a5922f084dae524a1eaf709855cd5e",
.hash = "zeit-0.6.0-5I6bkzt5AgC1_BCuSzXkV0JHeF4Mhti1Z_jFC7E_nmD2",
.url = "git+https://github.com/rockorager/zeit?ref=zig-0.15#ed2ca60db118414bda2b12df2039e33bad3b0b88",
.hash = "zeit-0.6.0-5I6bk0J9AgCVa0nnyL0lNY9Xa9F68hHq-ZarhuXNV-Jb",
},
.date = .{
.path = "lib/date",
@ -32,8 +33,8 @@
.path = "lib/json",
},
.case = .{
.url = "git+https://github.com/travisstaloch/case.git#610caade88ca54d2745f115114b08e73e2c6fe02",
.hash = "N-V-__8AAIfIAAC_RzCtghVVBVdqUzB8AaaGIyvK2WWz38bC",
.url = "git+https://github.com/travisstaloch/case.git#f8003fe5f93b65f673d10d41323e347225e8cb87",
.hash = "case-0.0.1-chGYqx_EAADaGJjmoln5M1iMBDTrMdd8to5wdEVpfXm4",
},
},
}

View file

@ -1,11 +1,19 @@
.{
.name = "aws-zig-codegen",
.name = .codegen,
.version = "0.0.1",
.paths = .{
"build.zig",
"build.zig.zon",
"src",
"README.md",
"LICENSE",
},
.fingerprint = 0x41c2ec2d551fe279,
.dependencies = .{
.smithy = .{
.url = "https://git.lerch.org/lobo/smithy/archive/41b61745d25a65817209dd5dddbb5f9b66896a99.tar.gz",
.hash = "122087deb0ae309b2258d59b40d82fe5921fdfc35b420bb59033244851f7f276fa34",
.url = "git+https://git.lerch.org/lobo/smithy.git#09c0a618877ebaf8e15fbfc505983876f4e063d5",
.hash = "smithy-1.0.0-uAyBgTnTAgBp2v6vypGcK5-YOCtxs2iEqR-4LfC5FTlS",
},
},
}

View file

@ -12,7 +12,7 @@ allocator: std.mem.Allocator,
indent_level: u64,
pub fn appendToTypeStack(self: @This(), shape_info: *const smithy.ShapeInfo) !void {
try self.type_stack.append(shape_info);
try self.type_stack.append(self.allocator, shape_info);
}
pub fn popFromTypeStack(self: @This()) void {

View file

@ -107,8 +107,9 @@ pub fn computeDirectoryHash(
const arena = arena_instance.allocator();
// Collect all files, recursively, then sort.
var all_files = std.ArrayList(*HashedFile).init(gpa);
defer all_files.deinit();
// Normally we're looking at around 300 model files
var all_files = try std.ArrayList(*HashedFile).initCapacity(gpa, 300);
defer all_files.deinit(gpa);
var walker = try dir.walk(gpa);
defer walker.deinit();
@ -139,7 +140,7 @@ pub fn computeDirectoryHash(
wait_group.start();
try thread_pool.spawn(workerHashFile, .{ dir, hashed_file, &wait_group });
try all_files.append(hashed_file);
try all_files.append(gpa, hashed_file);
}
}
@ -155,7 +156,7 @@ pub fn computeDirectoryHash(
hasher.update(&hashed_file.hash);
}
if (any_failures) return error.DirectoryHashUnavailable;
if (options.needFileHashes) options.fileHashes = try all_files.toOwnedSlice();
if (options.needFileHashes) options.fileHashes = try all_files.toOwnedSlice(gpa);
return hasher.finalResult();
}
fn workerHashFile(dir: std.fs.Dir, hashed_file: *HashedFile, wg: *std.Thread.WaitGroup) void {

View file

@ -17,6 +17,9 @@ const ServiceShape = smt.ServiceShape;
const ListShape = smt.ListShape;
const MapShape = smt.MapShape;
// manifest file 21k currently, but unbounded
var manifest_buf: [1024 * 32]u8 = undefined;
pub fn main() anyerror!void {
const root_progress_node = std.Progress.start(.{});
defer root_progress_node.end();
@ -27,7 +30,8 @@ pub fn main() anyerror!void {
const args = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, args);
const stdout = std.io.getStdOut().writer();
var stdout_writer = std.fs.File.stdout().writer(&.{});
const stdout = &stdout_writer.interface;
var output_dir = std.fs.cwd();
defer if (output_dir.fd > 0) output_dir.close();
@ -48,11 +52,10 @@ pub fn main() anyerror!void {
models_dir = try std.fs.cwd().openDir(args[i + 1], .{ .iterate = true });
}
// TODO: We need a different way to handle this file...
const manifest_file_started = false;
var manifest_file: std.fs.File = undefined;
defer if (manifest_file_started) manifest_file.close();
var manifest: std.fs.File.Writer = undefined;
var manifest_file = try output_dir.createFile("service_manifest.zig", .{});
defer manifest_file.close();
var manifest = manifest_file.writer(&manifest_buf).interface;
defer manifest.flush() catch @panic("Could not flush service manifest");
var files_processed: usize = 0;
var skip_next = true;
for (args) |arg| {
@ -71,11 +74,7 @@ pub fn main() anyerror!void {
skip_next = true;
continue;
}
if (!manifest_file_started) {
manifest_file = try output_dir.createFile("service_manifest.zig", .{});
manifest = manifest_file.writer();
}
try processFile(arg, output_dir, manifest);
try processFile(arg, output_dir, &manifest);
files_processed += 1;
}
if (files_processed == 0) {
@ -94,7 +93,7 @@ pub fn main() anyerror!void {
}
if (args.len == 0)
_ = try generateServices(allocator, ";", std.io.getStdIn(), stdout);
_ = try generateServices(allocator, ";", std.fs.File.stdin(), stdout);
if (verbose) {
const output_path = try output_dir.realpathAlloc(allocator, ".");
@ -133,7 +132,8 @@ fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir, parent_pro
// Do this in a brain dead fashion from here, no optimization
const manifest_file = try output_dir.createFile("service_manifest.zig", .{});
defer manifest_file.close();
const manifest = manifest_file.writer();
var manifest = manifest_file.writer(&manifest_buf);
defer manifest.interface.flush() catch @panic("Error flushing service_manifest.zig");
var mi = models_dir.iterate();
const generating_models_progress = parent_progress.start("generating models", count);
@ -141,18 +141,15 @@ fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir, parent_pro
while (try mi.next()) |e| {
if ((e.kind == .file or e.kind == .sym_link) and std.mem.endsWith(u8, e.name, ".json")) {
try processFile(e.name, output_dir, manifest);
try processFile(e.name, output_dir, &manifest.interface);
generating_models_progress.completeOne();
}
}
// re-calculate so we can store the manifest
model_digest = calculated_manifest.model_dir_hash_digest;
_, calculated_manifest = try calculateDigests(models_dir, output_dir, &thread_pool);
try output_dir.writeFile(.{ .sub_path = "output_manifest.json", .data = try std.json.stringifyAlloc(
allocator,
calculated_manifest,
.{ .whitespace = .indent_2 },
) });
const data = try std.fmt.allocPrint(allocator, "{f}", .{std.json.fmt(calculated_manifest, .{ .whitespace = .indent_2 })});
try output_dir.writeFile(.{ .sub_path = "output_manifest.json", .data = data });
}
var model_digest: ?[Hasher.hex_multihash_len]u8 = null;
@ -200,7 +197,7 @@ fn calculateDigests(models_dir: std.fs.Dir, output_dir: std.fs.Dir, thread_pool:
},
};
}
fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: anytype) !void {
fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: *std.Io.Writer) !void {
// It's probably best to create our own allocator here so we can deint at the end and
// toss all allocations related to the services in this file
// I can't guarantee we're not leaking something, and at the end of the
@ -209,11 +206,10 @@ fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: anytype)
defer arena.deinit();
const allocator = arena.allocator();
var output = try std.ArrayListUnmanaged(u8).initCapacity(allocator, 1024 * 1024 * 2);
defer output.deinit(allocator);
var output = try std.Io.Writer.Allocating.initCapacity(allocator, 1024 * 1024 * 2);
defer output.deinit();
var counting_writer = std.io.countingWriter(output.writer(allocator));
var writer = counting_writer.writer();
const writer = &output.writer;
_ = try writer.write("const std = @import(\"std\");\n");
_ = try writer.write("const smithy = @import(\"smithy\");\n");
@ -226,7 +222,12 @@ fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: anytype)
if (verbose) std.log.info("Processing file: {s}", .{file_name});
const service_names = generateServicesForFilePath(allocator, ";", file_name, writer) catch |err| {
const service_names = generateServicesForFilePath(
allocator,
";",
file_name,
writer,
) catch |err| {
std.log.err("Error processing file: {s}", .{file_name});
return err;
};
@ -249,7 +250,7 @@ fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: anytype)
output_file_name = new_output_file_name;
}
const unformatted: [:0]const u8 = try output.toOwnedSliceSentinel(allocator, 0);
const unformatted: [:0]const u8 = try output.toOwnedSliceSentinel(0);
const formatted = try zigFmt(allocator, unformatted);
// Dump our buffer out to disk
@ -266,14 +267,17 @@ fn zigFmt(allocator: std.mem.Allocator, buffer: [:0]const u8) ![]const u8 {
var tree = try std.zig.Ast.parse(allocator, buffer, .zig);
defer tree.deinit(allocator);
return try tree.render(allocator);
var aw = try std.Io.Writer.Allocating.initCapacity(allocator, buffer.len);
defer aw.deinit();
try tree.render(allocator, &aw.writer, .{});
return aw.toOwnedSlice();
}
fn generateServicesForFilePath(
allocator: std.mem.Allocator,
comptime terminator: []const u8,
path: []const u8,
writer: anytype,
writer: *std.Io.Writer,
) ![][]const u8 {
const file = try std.fs.cwd().openFile(path, .{});
defer file.close();
@ -288,28 +292,34 @@ fn addReference(id: []const u8, map: *std.StringHashMap(u64)) !void {
res.value_ptr.* = 1;
}
}
fn countAllReferences(shape_ids: [][]const u8, shapes: std.StringHashMap(smithy.ShapeInfo), shape_references: *std.StringHashMap(u64), stack: *std.ArrayList([]const u8)) anyerror!void {
fn countAllReferences(allocator: std.mem.Allocator, shape_ids: [][]const u8, shapes: std.StringHashMap(smithy.ShapeInfo), shape_references: *std.StringHashMap(u64), stack: *std.ArrayList([]const u8)) anyerror!void {
for (shape_ids) |id| {
const shape = shapes.get(id);
if (shape == null) {
std.log.err("Error - could not find shape with id {s}", .{id});
return error.ShapeNotFound;
}
try countReferences(shape.?, shapes, shape_references, stack);
try countReferences(allocator, shape.?, shapes, shape_references, stack);
}
}
fn countTypeMembersReferences(type_members: []smithy.TypeMember, shapes: std.StringHashMap(smithy.ShapeInfo), shape_references: *std.StringHashMap(u64), stack: *std.ArrayList([]const u8)) anyerror!void {
fn countTypeMembersReferences(allocator: std.mem.Allocator, type_members: []smithy.TypeMember, shapes: std.StringHashMap(smithy.ShapeInfo), shape_references: *std.StringHashMap(u64), stack: *std.ArrayList([]const u8)) anyerror!void {
for (type_members) |m| {
const target = shapes.get(m.target);
if (target == null) {
std.log.err("Error - could not find target {s}", .{m.target});
return error.TargetNotFound;
}
try countReferences(target.?, shapes, shape_references, stack);
try countReferences(allocator, target.?, shapes, shape_references, stack);
}
}
fn countReferences(shape: smithy.ShapeInfo, shapes: std.StringHashMap(smithy.ShapeInfo), shape_references: *std.StringHashMap(u64), stack: *std.ArrayList([]const u8)) anyerror!void {
fn countReferences(
allocator: std.mem.Allocator,
shape: smithy.ShapeInfo,
shapes: std.StringHashMap(smithy.ShapeInfo),
shape_references: *std.StringHashMap(u64),
stack: *std.ArrayList([]const u8),
) anyerror!void {
// Add ourselves as a reference, then we will continue down the tree
try addReference(shape.id, shape_references);
// Put ourselves on the stack. If we come back to ourselves, we want to end.
@ -317,7 +327,7 @@ fn countReferences(shape: smithy.ShapeInfo, shapes: std.StringHashMap(smithy.Sha
if (std.mem.eql(u8, shape.id, i))
return;
}
try stack.append(shape.id);
try stack.append(allocator, shape.id);
defer _ = stack.pop();
// Well, this is a fun read: https://awslabs.github.io/smithy/1.0/spec/core/model.html#recursive-shape-definitions
// Looks like recursion has special rules in the spec to accomodate Java.
@ -339,15 +349,15 @@ fn countReferences(shape: smithy.ShapeInfo, shapes: std.StringHashMap(smithy.Sha
.unit,
=> {},
.document, .member, .resource => {}, // less sure about these?
.list => |i| try countReferences(shapes.get(i.member_target).?, shapes, shape_references, stack),
.set => |i| try countReferences(shapes.get(i.member_target).?, shapes, shape_references, stack),
.list => |i| try countReferences(allocator, shapes.get(i.member_target).?, shapes, shape_references, stack),
.set => |i| try countReferences(allocator, shapes.get(i.member_target).?, shapes, shape_references, stack),
.map => |i| {
try countReferences(shapes.get(i.key).?, shapes, shape_references, stack);
try countReferences(shapes.get(i.value).?, shapes, shape_references, stack);
try countReferences(allocator, shapes.get(i.key).?, shapes, shape_references, stack);
try countReferences(allocator, shapes.get(i.value).?, shapes, shape_references, stack);
},
.structure => |m| try countTypeMembersReferences(m.members, shapes, shape_references, stack),
.uniontype => |m| try countTypeMembersReferences(m.members, shapes, shape_references, stack),
.service => |i| try countAllReferences(i.operations, shapes, shape_references, stack),
.structure => |m| try countTypeMembersReferences(allocator, m.members, shapes, shape_references, stack),
.uniontype => |m| try countTypeMembersReferences(allocator, m.members, shapes, shape_references, stack),
.service => |i| try countAllReferences(allocator, i.operations, shapes, shape_references, stack),
.operation => |op| {
if (op.input) |i| {
const val = shapes.get(i);
@ -355,7 +365,7 @@ fn countReferences(shape: smithy.ShapeInfo, shapes: std.StringHashMap(smithy.Sha
std.log.err("Error processing shape with id \"{s}\". Input shape \"{s}\" was not found", .{ shape.id, i });
return error.ShapeNotFound;
}
try countReferences(val.?, shapes, shape_references, stack);
try countReferences(allocator, val.?, shapes, shape_references, stack);
}
if (op.output) |i| {
const val = shapes.get(i);
@ -363,27 +373,31 @@ fn countReferences(shape: smithy.ShapeInfo, shapes: std.StringHashMap(smithy.Sha
std.log.err("Error processing shape with id \"{s}\". Output shape \"{s}\" was not found", .{ shape.id, i });
return error.ShapeNotFound;
}
try countReferences(val.?, shapes, shape_references, stack);
try countReferences(allocator, val.?, shapes, shape_references, stack);
}
if (op.errors) |i| try countAllReferences(i, shapes, shape_references, stack);
if (op.errors) |i| try countAllReferences(allocator, i, shapes, shape_references, stack);
},
.@"enum" => |m| try countTypeMembersReferences(m.members, shapes, shape_references, stack),
.@"enum" => |m| try countTypeMembersReferences(allocator, m.members, shapes, shape_references, stack),
}
}
fn generateServices(allocator: std.mem.Allocator, comptime _: []const u8, file: std.fs.File, writer: anytype) ![][]const u8 {
fn generateServices(
allocator: std.mem.Allocator,
comptime _: []const u8,
file: std.fs.File,
writer: *std.Io.Writer,
) ![][]const u8 {
const json = try file.readToEndAlloc(allocator, 1024 * 1024 * 1024);
defer allocator.free(json);
const model = try smithy.parse(allocator, json);
defer model.deinit();
var shapes = std.StringHashMap(smithy.ShapeInfo).init(allocator);
defer shapes.deinit();
var services = std.ArrayList(smithy.ShapeInfo).init(allocator);
defer services.deinit();
var services = try std.ArrayList(smithy.ShapeInfo).initCapacity(allocator, model.shapes.len);
for (model.shapes) |shape| {
try shapes.put(shape.id, shape);
switch (shape.shape) {
.service => try services.append(shape),
.service => services.appendAssumeCapacity(shape),
else => {},
}
}
@ -392,15 +406,15 @@ fn generateServices(allocator: std.mem.Allocator, comptime _: []const u8, file:
// a reference count in case there are recursive data structures
var shape_references = std.StringHashMap(u64).init(allocator);
defer shape_references.deinit();
var stack = std.ArrayList([]const u8).init(allocator);
defer stack.deinit();
var stack: std.ArrayList([]const u8) = .{};
defer stack.deinit(allocator);
for (services.items) |service|
try countReferences(service, shapes, &shape_references, &stack);
try countReferences(allocator, service, shapes, &shape_references, &stack);
var constant_names = std.ArrayList([]const u8).init(allocator);
defer constant_names.deinit();
var unresolved = std.ArrayList(smithy.ShapeInfo).init(allocator);
defer unresolved.deinit();
var constant_names = try std.ArrayList([]const u8).initCapacity(allocator, services.items.len);
defer constant_names.deinit(allocator);
var unresolved: std.ArrayList(smithy.ShapeInfo) = .{};
defer unresolved.deinit(allocator);
var generated = std.StringHashMap(void).init(allocator);
defer generated.deinit();
@ -445,7 +459,7 @@ fn generateServices(allocator: std.mem.Allocator, comptime _: []const u8, file:
// name of the field will be snake_case of whatever comes in from
// sdk_id. Not sure this will simple...
const constant_name = try support.constantName(allocator, sdk_id, .snake);
try constant_names.append(constant_name);
constant_names.appendAssumeCapacity(constant_name);
try writer.print("const Self = @This();\n", .{});
if (version) |v|
try writer.print("pub const version: ?[]const u8 = \"{s}\";\n", .{v})
@ -481,16 +495,16 @@ fn generateServices(allocator: std.mem.Allocator, comptime _: []const u8, file:
try generateOperation(allocator, shapes.get(op).?, state, writer);
}
try generateAdditionalTypes(allocator, state, writer);
return constant_names.toOwnedSlice();
return constant_names.toOwnedSlice(allocator);
}
fn generateAdditionalTypes(allocator: std.mem.Allocator, file_state: FileGenerationState, writer: anytype) !void {
fn generateAdditionalTypes(allocator: std.mem.Allocator, file_state: FileGenerationState, writer: *std.Io.Writer) !void {
// More types may be added during processing
while (file_state.additional_types_to_generate.pop()) |t| {
if (file_state.additional_types_generated.getEntry(t.name) != null) continue;
// std.log.info("\t\t{s}", .{t.name});
var type_stack = std.ArrayList(*const smithy.ShapeInfo).init(allocator);
defer type_stack.deinit();
var type_stack: std.ArrayList(*const smithy.ShapeInfo) = .{};
defer type_stack.deinit(allocator);
const state = GenerationState{
.type_stack = &type_stack,
.file_state = file_state,
@ -510,9 +524,9 @@ fn generateAdditionalTypes(allocator: std.mem.Allocator, file_state: FileGenerat
}
}
fn outputIndent(state: GenerationState, writer: anytype) !void {
fn outputIndent(state: GenerationState, writer: *std.Io.Writer) !void {
const n_chars = 4 * state.indent_level;
try writer.writeByteNTimes(' ', n_chars);
try writer.splatBytesAll(" ", n_chars);
}
const StructType = enum {
@ -536,12 +550,12 @@ const operation_sub_types = [_]OperationSubTypeInfo{
},
};
fn generateOperation(allocator: std.mem.Allocator, operation: smithy.ShapeInfo, file_state: FileGenerationState, writer: anytype) !void {
fn generateOperation(allocator: std.mem.Allocator, operation: smithy.ShapeInfo, file_state: FileGenerationState, writer: *std.Io.Writer) !void {
const snake_case_name = try support.constantName(allocator, operation.name, .snake);
defer allocator.free(snake_case_name);
var type_stack = std.ArrayList(*const smithy.ShapeInfo).init(allocator);
defer type_stack.deinit();
var type_stack: std.ArrayList(*const smithy.ShapeInfo) = .{};
defer type_stack.deinit(allocator);
const state = GenerationState{
.type_stack = &type_stack,
.file_state = file_state,
@ -586,7 +600,12 @@ fn generateOperation(allocator: std.mem.Allocator, operation: smithy.ShapeInfo,
new_state.indent_level = 0;
std.debug.assert(new_state.type_stack.items.len == 0);
try serialization.json.generateToJsonFunction(shape_id, writer.any(), new_state, generate_type_options.keyCase(.pascal));
try serialization.json.generateToJsonFunction(
shape_id,
writer,
new_state,
generate_type_options.keyCase(.pascal),
);
try writer.writeAll("\n");
},
@ -638,7 +657,7 @@ fn generateOperation(allocator: std.mem.Allocator, operation: smithy.ShapeInfo,
_ = try writer.write("} = .{};\n");
}
fn generateMetadataFunction(operation_name: []const u8, state: GenerationState, writer: anytype, options: GenerateTypeOptions) !void {
fn generateMetadataFunction(operation_name: []const u8, state: GenerationState, writer: *std.Io.Writer, options: GenerateTypeOptions) !void {
// TODO: Shove these lines in here, and also the else portion
// pub fn metaInfo(self: @This()) struct { service: @TypeOf(sts), action: @TypeOf(sts.get_caller_identity) } {
// return .{ .service = sts, .action = sts.get_caller_identity };
@ -699,7 +718,7 @@ fn getTypeName(allocator: std.mem.Allocator, shape: smithy.ShapeInfo) ![]const u
}
}
fn reuseCommonType(shape: smithy.ShapeInfo, writer: anytype, state: GenerationState) !bool {
fn reuseCommonType(shape: smithy.ShapeInfo, writer: *std.Io.Writer, state: GenerationState) !bool {
// We want to return if we're at the top level of the stack. There are three
// reasons for this:
// 1. For operations, we have a request that includes a metadata function
@ -729,14 +748,14 @@ fn reuseCommonType(shape: smithy.ShapeInfo, writer: anytype, state: GenerationSt
rc = true;
_ = try writer.write(type_name); // This can't possibly be this easy...
if (state.file_state.additional_types_generated.getEntry(shape.name) == null)
try state.file_state.additional_types_to_generate.append(shape);
try state.file_state.additional_types_to_generate.append(state.allocator, shape);
}
}
return rc;
}
/// return type is anyerror!void as this is a recursive function, so the compiler cannot properly infer error types
fn generateTypeFor(shape_id: []const u8, writer: anytype, state: GenerationState, comptime options: GenerateTypeOptions) anyerror!bool {
fn generateTypeFor(shape_id: []const u8, writer: *std.Io.Writer, state: GenerationState, comptime options: GenerateTypeOptions) anyerror!bool {
const end_structure = options.end_structure;
var rc = false;
@ -808,7 +827,8 @@ fn generateTypeFor(shape_id: []const u8, writer: anytype, state: GenerationState
.float => |s| try generateSimpleTypeFor(s, "f32", writer),
.long => |s| try generateSimpleTypeFor(s, "i64", writer),
.map => |m| {
if (!try reuseCommonType(shape_info, std.io.null_writer, state)) {
var null_writer = std.Io.Writer.Discarding.init(&.{}).writer;
if (!try reuseCommonType(shape_info, &null_writer, state)) {
try generateMapTypeFor(m, writer, state, options);
rc = true;
} else {
@ -825,7 +845,7 @@ fn generateTypeFor(shape_id: []const u8, writer: anytype, state: GenerationState
return rc;
}
fn generateMapTypeFor(map: anytype, writer: anytype, state: GenerationState, comptime options: GenerateTypeOptions) anyerror!void {
fn generateMapTypeFor(map: anytype, writer: *std.Io.Writer, state: GenerationState, comptime options: GenerateTypeOptions) anyerror!void {
_ = try writer.write("struct {\n");
try writer.writeAll("pub const is_map_type = true;\n\n");
@ -848,12 +868,12 @@ fn generateMapTypeFor(map: anytype, writer: anytype, state: GenerationState, com
_ = try writer.write("}");
}
fn generateSimpleTypeFor(_: anytype, type_name: []const u8, writer: anytype) !void {
fn generateSimpleTypeFor(_: anytype, type_name: []const u8, writer: *std.Io.Writer) !void {
_ = try writer.write(type_name); // This had required stuff but the problem was elsewhere. Better to leave as function just in case
}
const Mapping = struct { snake: []const u8, original: []const u8 };
fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, type_type_name: []const u8, writer: anytype, state: GenerationState, comptime options: GenerateTypeOptions) anyerror!void {
fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, type_type_name: []const u8, writer: *std.Io.Writer, state: GenerationState, comptime options: GenerateTypeOptions) anyerror!void {
_ = shape_id;
var arena = std.heap.ArenaAllocator.init(state.allocator);
@ -861,7 +881,7 @@ fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, ty
const allocator = arena.allocator();
var field_name_mappings = try std.ArrayList(Mapping).initCapacity(allocator, members.len);
defer field_name_mappings.deinit();
defer field_name_mappings.deinit(allocator);
// There is an httpQueryParams trait as well, but nobody is using it. API GW
// pretends to, but it's an empty map
//
@ -869,13 +889,13 @@ fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, ty
//
// httpLabel is interesting - right now we just assume anything can be used - do we need to track this?
var http_query_mappings = try std.ArrayList(Mapping).initCapacity(allocator, members.len);
defer http_query_mappings.deinit();
defer http_query_mappings.deinit(allocator);
var http_header_mappings = try std.ArrayList(Mapping).initCapacity(allocator, members.len);
defer http_header_mappings.deinit();
defer http_header_mappings.deinit(allocator);
var map_fields = std.ArrayList([]const u8).init(allocator);
defer map_fields.deinit();
var map_fields = try std.ArrayList([]const u8).initCapacity(allocator, members.len);
defer map_fields.deinit(allocator);
// prolog. We'll rely on caller to get the spacing correct here
_ = try writer.write(type_type_name);
@ -930,7 +950,7 @@ fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, ty
try writer.print("{s}: ", .{member_name});
try writeOptional(member.traits, writer, null);
if (try generateTypeFor(member.target, writer, child_state, options.endStructure(true)))
try map_fields.append(try std.fmt.allocPrint(allocator, "{s}", .{member_name}));
map_fields.appendAssumeCapacity(try std.fmt.allocPrint(allocator, "{s}", .{member_name}));
if (!std.mem.eql(u8, "union", type_type_name))
try writeOptional(member.traits, writer, " = null");
@ -978,7 +998,14 @@ fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, ty
_ = try writer.write("}\n");
}
fn writeMappings(state: GenerationState, @"pub": []const u8, mapping_name: []const u8, mappings: anytype, force_output: bool, writer: anytype) !void {
fn writeMappings(
state: GenerationState,
@"pub": []const u8,
mapping_name: []const u8,
mappings: anytype,
force_output: bool,
writer: *std.Io.Writer,
) !void {
if (mappings.items.len == 0 and !force_output) return;
try outputIndent(state, writer);
if (mappings.items.len == 0) {
@ -998,7 +1025,7 @@ fn writeMappings(state: GenerationState, @"pub": []const u8, mapping_name: []con
_ = try writer.write("};\n");
}
fn writeOptional(traits: ?[]smithy.Trait, writer: anytype, value: ?[]const u8) !void {
fn writeOptional(traits: ?[]smithy.Trait, writer: *std.Io.Writer, value: ?[]const u8) !void {
if (traits) |ts| if (smt.hasTrait(.required, ts)) return;
try writer.writeAll(value orelse "?");
}

View file

@ -17,7 +17,7 @@ const JsonMember = struct {
shape_info: smithy.ShapeInfo,
};
pub fn generateToJsonFunction(shape_id: []const u8, writer: std.io.AnyWriter, state: GenerationState, comptime options: GenerateTypeOptions) !void {
pub fn generateToJsonFunction(shape_id: []const u8, writer: *std.Io.Writer, state: GenerationState, comptime options: GenerateTypeOptions) !void {
_ = options;
const allocator = state.allocator;
@ -117,15 +117,15 @@ fn getMemberValueJson(allocator: std.mem.Allocator, source: []const u8, member:
const member_value = try std.fmt.allocPrint(allocator, "@field({s}, \"{s}\")", .{ source, member.field_name });
defer allocator.free(member_value);
var output_block = std.ArrayListUnmanaged(u8){};
const writer = output_block.writer(allocator);
var output_block = std.Io.Writer.Allocating.init(allocator);
defer output_block.deinit();
try writeMemberValue(
writer,
&output_block.writer,
member_value,
);
return output_block.toOwnedSlice(allocator);
return output_block.toOwnedSlice();
}
fn getShapeJsonValueType(shape: Shape) []const u8 {
@ -139,7 +139,7 @@ fn getShapeJsonValueType(shape: Shape) []const u8 {
}
fn writeMemberValue(
writer: anytype,
writer: *std.Io.Writer,
member_value: []const u8,
) !void {
try writer.writeAll(member_value);
@ -153,7 +153,7 @@ const WriteMemberJsonParams = struct {
member: smithy.TypeMember,
};
fn writeStructureJson(params: WriteMemberJsonParams, writer: std.io.AnyWriter) !void {
fn writeStructureJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) !void {
const shape_type = "structure";
const allocator = params.state.allocator;
const state = params.state;
@ -221,7 +221,7 @@ fn writeStructureJson(params: WriteMemberJsonParams, writer: std.io.AnyWriter) !
}
}
fn writeListJson(list: smithy_tools.ListShape, params: WriteMemberJsonParams, writer: std.io.AnyWriter) anyerror!void {
fn writeListJson(list: smithy_tools.ListShape, params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerror!void {
const state = params.state;
const allocator = state.allocator;
@ -274,7 +274,7 @@ fn writeListJson(list: smithy_tools.ListShape, params: WriteMemberJsonParams, wr
}
}
fn writeMapJson(map: smithy_tools.MapShape, params: WriteMemberJsonParams, writer: std.io.AnyWriter) anyerror!void {
fn writeMapJson(map: smithy_tools.MapShape, params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerror!void {
const state = params.state;
const name = params.field_name;
const value = params.field_value;
@ -351,11 +351,11 @@ fn writeMapJson(map: smithy_tools.MapShape, params: WriteMemberJsonParams, write
}
}
fn writeScalarJson(comment: []const u8, params: WriteMemberJsonParams, writer: std.io.AnyWriter) anyerror!void {
fn writeScalarJson(comment: []const u8, params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerror!void {
try writer.print("try jw.write({s}); // {s}\n\n", .{ params.field_value, comment });
}
fn writeMemberJson(params: WriteMemberJsonParams, writer: std.io.AnyWriter) anyerror!void {
fn writeMemberJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerror!void {
const shape_id = params.shape_id;
const state = params.state;
const shape_info = try smithy_tools.getShapeInfo(shape_id, state.file_state.shapes);

View file

@ -15,15 +15,17 @@ pub fn build(b: *std.Build) void {
// set a preferred release mode, allowing the user to decide how to optimize.
const optimize = b.standardOptimizeOption(.{});
const exe = b.addExecutable(.{
.name = "tmp",
// In this case the main source file is merely a path, however, in more
// complicated build scripts, this could be a generated file.
const mod_exe = b.createModule(.{
.root_source_file = b.path("src/main.zig"),
.target = target,
.optimize = optimize,
});
const exe = b.addExecutable(.{
.name = "tmp",
.root_module = mod_exe,
});
const aws_dep = b.dependency("aws", .{
// These are the two arguments to the dependency. It expects a target and optimization level.
.target = target,
@ -59,13 +61,16 @@ pub fn build(b: *std.Build) void {
const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step);
// Creates a step for unit testing. This only builds the test executable
// but does not run it.
const unit_tests = b.addTest(.{
const mod_unit_tests = b.createModule(.{
.root_source_file = b.path("src/main.zig"),
.target = target,
.optimize = optimize,
});
// Creates a step for unit testing. This only builds the test executable
// but does not run it.
const unit_tests = b.addTest(.{
.root_module = mod_unit_tests,
});
const run_unit_tests = b.addRunArtifact(unit_tests);

View file

@ -6,8 +6,8 @@
.dependencies = .{
.aws = .{
.url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/7a6086447c1249b0e5b5b5f3873d2f7932bea56d/7a6086447c1249b0e5b5b5f3873d2f7932bea56d-with-models.tar.gz",
.hash = "aws-0.0.1-SbsFcGN_CQCBjurpc2GEMw4c_qAkGu6KpuVnLBLY4L4q",
.url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/cfc8aee1a6b54eac4a58893674361f1ad58e8595/cfc8aee1a6b54eac4a58893674361f1ad58e8595-with-models.tar.gz",
.hash = "aws-0.0.1-SbsFcK8HCgA-P7sjZP5z7J7ZfZLTkQ4osD0qgbyUgTzG",
},
},
}

View file

@ -15,10 +15,10 @@ pub fn main() anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
const stdout_raw = std.io.getStdOut().writer();
var bw = std.io.bufferedWriter(stdout_raw);
defer bw.flush() catch unreachable;
const stdout = bw.writer();
var stdout_buffer: [1024]u8 = undefined;
var stdout_raw = std.fs.File.stdout().writer(&stdout_buffer);
const stdout = &stdout_raw.interface;
defer stdout.flush() catch unreachable;
// To use a proxy, uncomment the following with your own configuration
// const proxy = std.http.Proxy{

View file

@ -5,8 +5,8 @@
.minimum_zig_version = "0.14.0",
.dependencies = .{
.zeit = .{
.url = "git+https://github.com/rockorager/zeit#f86d568b89a5922f084dae524a1eaf709855cd5e",
.hash = "zeit-0.6.0-5I6bkzt5AgC1_BCuSzXkV0JHeF4Mhti1Z_jFC7E_nmD2",
.url = "git+https://github.com/rockorager/zeit?ref=zig-0.15#ed2ca60db118414bda2b12df2039e33bad3b0b88",
.hash = "zeit-0.6.0-5I6bk0J9AgCVa0nnyL0lNY9Xa9F68hHq-ZarhuXNV-Jb",
},
.json = .{
.path = "../json",

View file

@ -17,10 +17,10 @@ pub const Timestamp = enum(zeit.Nanoseconds) {
}) catch std.debug.panic("Failed to parse timestamp to instant: {d}", .{value});
const fmt = "Mon, 02 Jan 2006 15:04:05 GMT";
var buf = std.mem.zeroes([fmt.len]u8);
var buf: [fmt.len]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buf);
instant.time().gofmt(fbs.writer(), fmt) catch std.debug.panic("Failed to format instant: {d}", .{instant.timestamp});
var fbs = std.Io.Writer.fixed(&buf);
instant.time().gofmt(&fbs, fmt) catch std.debug.panic("Failed to format instant: {d}", .{instant.timestamp});
try jw.write(&buf);
}

View file

@ -1772,12 +1772,12 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
.slice => {
switch (token) {
.ArrayBegin => {
var arraylist = std.ArrayList(ptrInfo.child).init(allocator);
var arraylist = std.ArrayList(ptrInfo.child){};
errdefer {
while (arraylist.pop()) |v| {
parseFree(ptrInfo.child, v, options);
}
arraylist.deinit();
arraylist.deinit(allocator);
}
while (true) {
@ -1787,11 +1787,11 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
else => {},
}
try arraylist.ensureTotalCapacity(arraylist.items.len + 1);
try arraylist.ensureTotalCapacity(allocator, arraylist.items.len + 1);
const v = try parseInternal(ptrInfo.child, tok, tokens, options);
arraylist.appendAssumeCapacity(v);
}
return arraylist.toOwnedSlice();
return arraylist.toOwnedSlice(allocator);
},
.String => |stringToken| {
if (ptrInfo.child != u8) return error.UnexpectedToken;
@ -1817,12 +1817,12 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
if (key_type == null) return error.UnexpectedToken;
const value_type = typeForField(ptrInfo.child, "value");
if (value_type == null) return error.UnexpectedToken;
var arraylist = std.ArrayList(ptrInfo.child).init(allocator);
var arraylist = std.ArrayList(ptrInfo.child){};
errdefer {
while (arraylist.pop()) |v| {
parseFree(ptrInfo.child, v, options);
}
arraylist.deinit();
arraylist.deinit(allocator);
}
while (true) {
const key = (try tokens.next()) orelse return error.UnexpectedEndOfJson;
@ -1831,13 +1831,13 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
else => {},
}
try arraylist.ensureTotalCapacity(arraylist.items.len + 1);
try arraylist.ensureTotalCapacity(allocator, arraylist.items.len + 1);
const key_val = try parseInternal(key_type.?, key, tokens, options);
const val = (try tokens.next()) orelse return error.UnexpectedEndOfJson;
const val_val = try parseInternal(value_type.?, val, tokens, options);
arraylist.appendAssumeCapacity(.{ .key = key_val, .value = val_val });
}
return arraylist.toOwnedSlice();
return arraylist.toOwnedSlice(allocator);
},
else => return error.UnexpectedToken,
}

File diff suppressed because it is too large Load diff

View file

@ -25,7 +25,7 @@ pub const Credentials = struct {
};
}
pub fn deinit(self: Self) void {
std.crypto.utils.secureZero(u8, self.secret_key);
std.crypto.secureZero(u8, self.secret_key);
self.allocator.free(self.secret_key);
self.allocator.free(self.access_key);
if (self.session_token) |t| self.allocator.free(t);

View file

@ -173,11 +173,12 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
var cl = std.http.Client{ .allocator = allocator };
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
var resp_payload = std.ArrayList(u8).init(allocator);
defer resp_payload.deinit();
var aw: std.Io.Writer.Allocating = .init(allocator);
defer aw.deinit();
const response_payload = &aw.writer;
const req = try cl.fetch(.{
.location = .{ .url = container_uri },
.response_storage = .{ .dynamic = &resp_payload },
.response_writer = response_payload,
});
if (req.status != .ok and req.status != .not_found) {
log.warn("Bad status code received from container credentials endpoint: {}", .{@intFromEnum(req.status)});
@ -185,8 +186,8 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
}
if (req.status == .not_found) return null;
log.debug("Read {d} bytes from container credentials endpoint", .{resp_payload.items.len});
if (resp_payload.items.len == 0) return null;
log.debug("Read {d} bytes from container credentials endpoint", .{aw.written().len});
if (aw.written().len == 0) return null;
const CredsResponse = struct {
AccessKeyId: []const u8,
@ -196,8 +197,8 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
Token: []const u8,
};
const creds_response = blk: {
const res = std.json.parseFromSlice(CredsResponse, allocator, resp_payload.items, .{}) catch |e| {
log.err("Unexpected Json response from container credentials endpoint: {s}", .{resp_payload.items});
const res = std.json.parseFromSlice(CredsResponse, allocator, aw.written(), .{}) catch |e| {
log.err("Unexpected Json response from container credentials endpoint: {s}", .{aw.written()});
log.err("Error parsing json: {}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
@ -224,26 +225,27 @@ fn getImdsv2Credentials(allocator: std.mem.Allocator) !?auth.Credentials {
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
// Get token
{
var resp_payload = std.ArrayList(u8).init(allocator);
defer resp_payload.deinit();
var aw: std.Io.Writer.Allocating = .init(allocator);
defer aw.deinit();
const response_payload = &aw.writer;
const req = try cl.fetch(.{
.method = .PUT,
.location = .{ .url = "http://169.254.169.254/latest/api/token" },
.extra_headers = &[_]std.http.Header{
.{ .name = "X-aws-ec2-metadata-token-ttl-seconds", .value = "21600" },
},
.response_storage = .{ .dynamic = &resp_payload },
.response_writer = response_payload,
});
if (req.status != .ok) {
log.warn("Bad status code received from IMDS v2: {}", .{@intFromEnum(req.status)});
return null;
}
if (resp_payload.items.len == 0) {
if (aw.written().len == 0) {
log.warn("Unexpected zero response from IMDS v2", .{});
return null;
}
token = try resp_payload.toOwnedSlice();
token = try aw.toOwnedSlice();
errdefer if (token) |t| allocator.free(t);
}
std.debug.assert(token != null);
@ -265,15 +267,16 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
// "InstanceProfileArn" : "arn:aws:iam::550620852718:instance-profile/ec2-dev",
// "InstanceProfileId" : "AIPAYAM4POHXCFNKZ7HU2"
// }
var resp_payload = std.ArrayList(u8).init(allocator);
defer resp_payload.deinit();
var aw: std.Io.Writer.Allocating = .init(allocator);
defer aw.deinit();
const response_payload = &aw.writer;
const req = try client.fetch(.{
.method = .GET,
.location = .{ .url = "http://169.254.169.254/latest/meta-data/iam/info" },
.extra_headers = &[_]std.http.Header{
.{ .name = "X-aws-ec2-metadata-token", .value = imds_token },
},
.response_storage = .{ .dynamic = &resp_payload },
.response_writer = response_payload,
});
if (req.status != .ok and req.status != .not_found) {
@ -281,7 +284,7 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
return null;
}
if (req.status == .not_found) return null;
if (resp_payload.items.len == 0) {
if (aw.written().len == 0) {
log.warn("Unexpected empty response from IMDS endpoint post token", .{});
return null;
}
@ -292,8 +295,8 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
InstanceProfileArn: []const u8,
InstanceProfileId: []const u8,
};
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, resp_payload.items, .{}) catch |e| {
log.err("Unexpected Json response from IMDS endpoint: {s}", .{resp_payload.items});
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, aw.written(), .{}) catch |e| {
log.err("Unexpected Json response from IMDS endpoint: {s}", .{aw.written()});
log.err("Error parsing json: {}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
@ -315,15 +318,16 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, role_name: []const u8, imds_token: []u8) !?auth.Credentials {
const url = try std.fmt.allocPrint(allocator, "http://169.254.169.254/latest/meta-data/iam/security-credentials/{s}/", .{role_name});
defer allocator.free(url);
var resp_payload = std.ArrayList(u8).init(allocator);
defer resp_payload.deinit();
var aw: std.Io.Writer.Allocating = .init(allocator);
defer aw.deinit();
const response_payload = &aw.writer;
const req = try client.fetch(.{
.method = .GET,
.location = .{ .url = url },
.extra_headers = &[_]std.http.Header{
.{ .name = "X-aws-ec2-metadata-token", .value = imds_token },
},
.response_storage = .{ .dynamic = &resp_payload },
.response_writer = response_payload,
});
if (req.status != .ok and req.status != .not_found) {
@ -331,7 +335,7 @@ fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, ro
return null;
}
if (req.status == .not_found) return null;
if (resp_payload.items.len == 0) {
if (aw.written().len == 0) {
log.warn("Unexpected empty response from IMDS role endpoint", .{});
return null;
}
@ -346,8 +350,8 @@ fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, ro
Token: []const u8,
Expiration: []const u8,
};
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, resp_payload.items, .{}) catch |e| {
log.err("Unexpected Json response from IMDS endpoint: {s}", .{resp_payload.items});
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, aw.written(), .{}) catch |e| {
log.err("Unexpected Json response from IMDS endpoint: {s}", .{aw.written()});
log.err("Error parsing json: {}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);

View file

@ -90,8 +90,37 @@ pub const Options = struct {
dualstack: bool = false,
sigv4_service_name: ?[]const u8 = null,
/// Used for testing to provide consistent signing. If null, will use current time
signing_time: ?i64 = null,
mock: ?Mock = null,
};
/// mocking methods for isolated testing
pub const Mock = struct {
/// Used to provide consistent signing
signing_time: ?i64,
/// context is desiged to be type-erased pointer (@intFromPtr)
context: usize = 0,
request_fn: *const fn (
usize,
std.http.Method,
std.Uri,
std.http.Client.RequestOptions,
) std.http.Client.RequestError!std.http.Client.Request,
send_body_complete: *const fn (usize, []u8) std.Io.Writer.Error!void,
receive_head: *const fn (usize) std.http.Client.Request.ReceiveHeadError!std.http.Client.Response,
reader_decompressing: *const fn (usize) *std.Io.Reader,
fn request(m: Mock, method: std.http.Method, uri: std.Uri, options: std.http.Client.RequestOptions) std.http.Client.RequestError!std.http.Client.Request {
return m.request_fn(m.context, method, uri, options);
}
fn sendBodyComplete(m: Mock, body: []u8) std.Io.Writer.Error!void {
return m.send_body_complete(m.context, body);
}
fn receiveHead(m: Mock) std.http.Client.Request.ReceiveHeadError!std.http.Client.Response {
return m.receive_head(m.context);
}
fn readerDecompressing(m: Mock) *std.Io.Reader {
return m.reader_decompressing(m.context);
}
};
pub const Header = std.http.Header;
@ -163,9 +192,9 @@ pub const AwsHttp = struct {
.region = getRegion(service, options.region),
.service = options.sigv4_service_name orelse service,
.credentials = creds,
.signing_time = options.signing_time,
.signing_time = if (options.mock) |m| m.signing_time else null,
};
return try self.makeRequest(endpoint, request, signing_config);
return try self.makeRequest(endpoint, request, signing_config, options);
}
/// makeRequest is a low level http/https function that can be used inside
@ -184,7 +213,13 @@ pub const AwsHttp = struct {
/// Content-Length: (length of body)
///
/// Return value is an HttpResult, which will need the caller to deinit().
pub fn makeRequest(self: Self, endpoint: EndPoint, request: HttpRequest, signing_config: ?signing.Config) !HttpResult {
pub fn makeRequest(
self: Self,
endpoint: EndPoint,
request: HttpRequest,
signing_config: ?signing.Config,
options: Options,
) !HttpResult {
var request_cp = request;
log.debug("Request Path: {s}", .{request_cp.path});
@ -199,8 +234,8 @@ pub const AwsHttp = struct {
// We will use endpoint instead
request_cp.path = endpoint.path;
var request_headers = std.ArrayList(std.http.Header).init(self.allocator);
defer request_headers.deinit();
var request_headers = std.ArrayList(std.http.Header){};
defer request_headers.deinit(self.allocator);
const len = try addHeaders(self.allocator, &request_headers, endpoint.host, request_cp.body, request_cp.content_type, request_cp.headers);
defer if (len) |l| self.allocator.free(l);
@ -213,10 +248,10 @@ pub const AwsHttp = struct {
}
}
var headers = std.ArrayList(std.http.Header).init(self.allocator);
defer headers.deinit();
var headers = std.ArrayList(std.http.Header){};
defer headers.deinit(self.allocator);
for (request_cp.headers) |header|
try headers.append(.{ .name = header.name, .value = header.value });
try headers.append(self.allocator, .{ .name = header.name, .value = header.value });
log.debug("All Request Headers:", .{});
for (headers.items) |h| {
log.debug("\t{s}: {s}", .{ h.name, h.value });
@ -228,18 +263,12 @@ pub const AwsHttp = struct {
// TODO: Fix this proxy stuff. This is all a kludge just to compile, but std.http.Client has it all built in now
var cl = std.http.Client{ .allocator = self.allocator, .https_proxy = if (self.proxy) |*p| @constCast(p) else null };
defer cl.deinit(); // TODO: Connection pooling
const method = std.meta.stringToEnum(std.http.Method, request_cp.method).?;
var server_header_buffer: [16 * 1024]u8 = undefined;
var resp_payload = std.ArrayList(u8).init(self.allocator);
defer resp_payload.deinit();
const req = try cl.fetch(.{
.server_header_buffer = &server_header_buffer,
.method = method,
.payload = if (request_cp.body.len > 0) request_cp.body else null,
.response_storage = .{ .dynamic = &resp_payload },
.raw_uri = true,
.location = .{ .url = url },
// Fetch API in 0.15.1 is insufficient as it does not provide
// server headers. We'll construct and send the request ourselves
const uri = try std.Uri.parse(url);
const req_options: std.http.Client.RequestOptions = .{
// we need full control over most headers. I wish libraries would do a
// better job of having default headers as an opt-in...
.headers = .{
@ -251,7 +280,13 @@ pub const AwsHttp = struct {
.content_type = .omit,
},
.extra_headers = headers.items,
});
};
var req = if (options.mock) |m|
try m.request(method, uri, req_options) // This will call the test harness
else
try cl.request(method, uri, req_options);
defer req.deinit();
// TODO: Need to test for payloads > 2^14. I believe one of our tests does this, but not sure
// if (request_cp.body.len > 0) {
// // Workaround for https://github.com/ziglang/zig/issues/15626
@ -266,33 +301,69 @@ pub const AwsHttp = struct {
// }
// try req.wait();
if (request_cp.body.len > 0) {
// This seems a bit silly, but we can't have a []const u8 here
// because when it sends, it's using a writer, and this becomes
// the buffer of the writer. It's conceivable that something
// in the chain then does actually modify the body of the request
// so we'll need to duplicate it here
const req_body = try self.allocator.dupe(u8, request_cp.body);
defer self.allocator.free(req_body); // docs for sendBodyComplete say it flushes, so no need to outlive this
if (options.mock) |m|
try m.sendBodyComplete(req_body)
else
try req.sendBodyComplete(req_body);
} else if (options.mock == null) try req.sendBodiless();
// if (options.mock == null) log.err("Request sent. Body len {d}, uri {f}", .{ request_cp.body.len, uri });
var response = if (options.mock) |m| try m.receiveHead() else try req.receiveHead(&.{});
// TODO: Timeout - is this now above us?
log.debug(
"Request Complete. Response code {d}: {?s}",
.{ @intFromEnum(req.status), req.status.phrase() },
.{ @intFromEnum(response.head.status), response.head.status.phrase() },
);
log.debug("Response headers:", .{});
var resp_headers = std.ArrayList(Header).init(
self.allocator,
);
defer resp_headers.deinit();
var it = std.http.HeaderIterator.init(server_header_buffer[0..]);
var resp_headers = std.ArrayList(Header){};
defer resp_headers.deinit(self.allocator);
var it = response.head.iterateHeaders();
while (it.next()) |h| { // even though we don't expect to fill the buffer,
// we don't get a length, but looks via stdlib source
// it should be ok to call next on the undefined memory
log.debug(" {s}: {s}", .{ h.name, h.value });
try resp_headers.append(.{
try resp_headers.append(self.allocator, .{
.name = try (self.allocator.dupe(u8, h.name)),
.value = try (self.allocator.dupe(u8, h.value)),
});
}
// This is directly lifted from fetch, as there is no function in
// 0.15.1 client to negotiate decompression
const decompress_buffer: []u8 = switch (response.head.content_encoding) {
.identity => &.{},
.zstd => try self.allocator.alloc(u8, std.compress.zstd.default_window_len),
.deflate, .gzip => try self.allocator.alloc(u8, std.compress.flate.max_window_len),
.compress => return error.UnsupportedCompressionMethod,
};
defer self.allocator.free(decompress_buffer);
log.debug("raw response body:\n{s}", .{resp_payload.items});
var transfer_buffer: [64]u8 = undefined;
var decompress: std.http.Decompress = undefined;
const reader = response.readerDecompressing(&transfer_buffer, &decompress, decompress_buffer);
// Not sure on optimal size here, but should definitely be > 0
var aw = try std.Io.Writer.Allocating.initCapacity(self.allocator, 128);
defer aw.deinit();
const response_writer = &aw.writer;
_ = reader.streamRemaining(response_writer) catch |err| switch (err) {
error.ReadFailed => return response.bodyErr().?,
else => |e| return e,
};
log.debug("raw response body:\n{s}", .{aw.written()});
const rc = HttpResult{
.response_code = @intFromEnum(req.status),
.body = try resp_payload.toOwnedSlice(),
.headers = try resp_headers.toOwnedSlice(),
.response_code = @intFromEnum(response.head.status),
.body = try aw.toOwnedSlice(),
.headers = try resp_headers.toOwnedSlice(self.allocator),
.allocator = self.allocator,
};
return rc;
@ -305,15 +376,21 @@ fn getRegion(service: []const u8, region: []const u8) []const u8 {
return region;
}
fn addHeaders(allocator: std.mem.Allocator, headers: *std.ArrayList(std.http.Header), host: []const u8, body: []const u8, content_type: []const u8, additional_headers: []const Header) !?[]const u8 {
// We don't need allocator and body because they were to add a
// Content-Length header. But that is being added by the client send()
// function, so we don't want it on the request twice. But I also feel
// pretty strongly that send() should be providing us control, because
// I think if we don't add it here, it won't get signed, and we would
// really prefer it to be signed. So, we will wait and watch for this
// situation to change in stdlib
_ = allocator;
fn addHeaders(
allocator: std.mem.Allocator,
headers: *std.ArrayList(std.http.Header),
host: []const u8,
body: []const u8,
content_type: []const u8,
additional_headers: []const Header,
) !?[]const u8 {
// We don't need body because they were to add a Content-Length header. But
// that is being added by the client send() function, so we don't want it
// on the request twice. But I also feel pretty strongly that send() should
// be providing us control, because I think if we don't add it here, it
// won't get signed, and we would really prefer it to be signed. So, we
// will wait and watch for this situation to change in stdlib
_ = body;
var has_content_type = false;
for (additional_headers) |h| {
@ -322,12 +399,12 @@ fn addHeaders(allocator: std.mem.Allocator, headers: *std.ArrayList(std.http.Hea
break;
}
}
try headers.append(.{ .name = "Accept", .value = "application/json" });
try headers.append(.{ .name = "Host", .value = host });
try headers.append(.{ .name = "User-Agent", .value = "zig-aws 1.0" });
try headers.append(allocator, .{ .name = "Accept", .value = "application/json" });
try headers.append(allocator, .{ .name = "Host", .value = host });
try headers.append(allocator, .{ .name = "User-Agent", .value = "zig-aws 1.0" });
if (!has_content_type)
try headers.append(.{ .name = "Content-Type", .value = content_type });
try headers.appendSlice(additional_headers);
try headers.append(allocator, .{ .name = "Content-Type", .value = content_type });
try headers.appendSlice(allocator, additional_headers);
return null;
}

View file

@ -157,7 +157,7 @@ pub const SigningError = error{
XAmzExpiresHeaderInRequest,
/// Used if the request headers already includes x-amz-region-set
XAmzRegionSetHeaderInRequest,
} || std.fmt.AllocPrintError;
} || error{OutOfMemory};
const forbidden_headers = .{
.{ .name = "x-amz-content-sha256", .err = SigningError.XAmzContentSha256HeaderInRequest },
@ -240,6 +240,10 @@ pub fn signRequest(allocator: std.mem.Allocator, request: base.Request, config:
// regardless of whether we're sticking the header on the request
std.debug.assert(config.signed_body_header == .none or
config.signed_body_header == .sha256);
log.debug(
"Request body len: {d}. First 5 bytes (max): {s}",
.{ request.body.len, request.body[0..@min(request.body.len, 5)] },
);
const payload_hash = try hash(allocator, request.body, .sha256);
if (config.signed_body_header == .sha256) {
// From the AWS nitro enclaves SDK, it appears that there is no reason
@ -312,12 +316,12 @@ pub fn signRequest(allocator: std.mem.Allocator, request: base.Request, config:
.name = "Authorization",
.value = try std.fmt.allocPrint(
allocator,
"AWS4-HMAC-SHA256 Credential={s}/{s}, SignedHeaders={s}, Signature={s}",
"AWS4-HMAC-SHA256 Credential={s}/{s}, SignedHeaders={s}, Signature={x}",
.{
config.credentials.access_key,
scope,
canonical_request.headers.signed_headers,
std.fmt.fmtSliceHexLower(signature),
signature,
},
),
};
@ -348,7 +352,7 @@ pub fn freeSignedRequest(allocator: std.mem.Allocator, request: *base.Request, c
pub const credentialsFn = *const fn ([]const u8) ?Credentials;
pub fn verifyServerRequest(allocator: std.mem.Allocator, request: *std.http.Server.Request, request_body_reader: anytype, credentials_fn: credentialsFn) !bool {
pub fn verifyServerRequest(allocator: std.mem.Allocator, request: *std.http.Server.Request, request_body_reader: *std.Io.Reader, credentials_fn: credentialsFn) !bool {
var unverified_request = try UnverifiedRequest.init(allocator, request);
defer unverified_request.deinit();
return verify(allocator, unverified_request, request_body_reader, credentials_fn);
@ -359,17 +363,19 @@ pub const UnverifiedRequest = struct {
target: []const u8,
method: std.http.Method,
allocator: std.mem.Allocator,
raw: *std.http.Server.Request,
pub fn init(allocator: std.mem.Allocator, request: *std.http.Server.Request) !UnverifiedRequest {
var al = std.ArrayList(std.http.Header).init(allocator);
defer al.deinit();
var al = std.ArrayList(std.http.Header){};
defer al.deinit(allocator);
var it = request.iterateHeaders();
while (it.next()) |h| try al.append(h);
while (it.next()) |h| try al.append(allocator, h);
return .{
.target = request.head.target,
.method = request.head.method,
.headers = try al.toOwnedSlice(),
.headers = try al.toOwnedSlice(allocator),
.allocator = allocator,
.raw = request,
};
}
@ -387,7 +393,7 @@ pub const UnverifiedRequest = struct {
}
};
pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_body_reader: anytype, credentials_fn: credentialsFn) !bool {
pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_body_reader: *std.Io.Reader, credentials_fn: credentialsFn) !bool {
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
const aa = arena.allocator();
@ -420,10 +426,10 @@ pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_
return verifyParsedAuthorization(
aa,
request,
request_body_reader,
credential.?,
signed_headers.?,
signature.?,
request_body_reader,
credentials_fn,
);
}
@ -431,10 +437,10 @@ pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_
fn verifyParsedAuthorization(
allocator: std.mem.Allocator,
request: UnverifiedRequest,
request_body_reader: anytype,
credential: []const u8,
signed_headers: []const u8,
signature: []const u8,
request_body_reader: *std.Io.Reader,
credentials_fn: credentialsFn,
) !bool {
// AWS4-HMAC-SHA256
@ -494,7 +500,7 @@ fn verifyParsedAuthorization(
.content_type = request.getFirstHeaderValue("content-type").?,
};
signed_request.query = request.target[signed_request.path.len..]; // TODO: should this be +1? query here would include '?'
signed_request.body = try request_body_reader.readAllAlloc(allocator, std.math.maxInt(usize));
signed_request.body = try request_body_reader.allocRemaining(allocator, .unlimited);
defer allocator.free(signed_request.body);
signed_request = try signRequest(allocator, signed_request, config);
defer freeSignedRequest(allocator, &signed_request, config);
@ -545,7 +551,7 @@ fn getSigningKey(allocator: std.mem.Allocator, signing_date: []const u8, config:
defer {
// secureZero avoids compiler optimizations that may say
// "WTF are you doing this thing? Looks like nothing to me. It's silly and we will remove it"
std.crypto.utils.secureZero(u8, secret); // zero our copy of secret
std.crypto.secureZero(u8, secret); // zero our copy of secret
allocator.free(secret);
}
// log.debug("secret: {s}", .{secret});
@ -673,7 +679,7 @@ fn canonicalUri(allocator: std.mem.Allocator, path: []const u8, double_encode: b
fn encodeParamPart(allocator: std.mem.Allocator, path: []const u8) ![]const u8 {
const unreserved_marks = "-_.!~*'()";
var encoded = try std.ArrayList(u8).initCapacity(allocator, path.len);
defer encoded.deinit();
defer encoded.deinit(allocator);
for (path) |c| {
var should_encode = true;
for (unreserved_marks) |r|
@ -685,16 +691,16 @@ fn encodeParamPart(allocator: std.mem.Allocator, path: []const u8) ![]const u8 {
should_encode = false;
if (!should_encode) {
try encoded.append(c);
try encoded.append(allocator, c);
continue;
}
// Whatever remains, encode it
try encoded.append('%');
const hex = try std.fmt.allocPrint(allocator, "{s}", .{std.fmt.fmtSliceHexUpper(&[_]u8{c})});
try encoded.append(allocator, '%');
const hex = try std.fmt.allocPrint(allocator, "{X}", .{&[_]u8{c}});
defer allocator.free(hex);
try encoded.appendSlice(hex);
try encoded.appendSlice(allocator, hex);
}
return encoded.toOwnedSlice();
return encoded.toOwnedSlice(allocator);
}
// URI encode every byte except the unreserved characters:
@ -715,7 +721,7 @@ fn encodeUri(allocator: std.mem.Allocator, path: []const u8) ![]u8 {
const reserved_characters = ";,/?:@&=+$#";
const unreserved_marks = "-_.!~*'()";
var encoded = try std.ArrayList(u8).initCapacity(allocator, path.len);
defer encoded.deinit();
defer encoded.deinit(allocator);
// if (std.mem.startsWith(u8, path, "/2017-03-31/tags/arn")) {
// try encoded.appendSlice("/2017-03-31/tags/arn%25253Aaws%25253Alambda%25253Aus-west-2%25253A550620852718%25253Afunction%25253Aawsome-lambda-LambdaStackawsomeLambda");
// return encoded.toOwnedSlice();
@ -738,16 +744,16 @@ fn encodeUri(allocator: std.mem.Allocator, path: []const u8) ![]u8 {
should_encode = false;
if (!should_encode) {
try encoded.append(c);
try encoded.append(allocator, c);
continue;
}
// Whatever remains, encode it
try encoded.append('%');
const hex = try std.fmt.allocPrint(allocator, "{s}", .{std.fmt.fmtSliceHexUpper(&[_]u8{c})});
try encoded.append(allocator, '%');
const hex = try std.fmt.allocPrint(allocator, "{X}", .{&[_]u8{c}});
defer allocator.free(hex);
try encoded.appendSlice(hex);
try encoded.appendSlice(allocator, hex);
}
return encoded.toOwnedSlice();
return encoded.toOwnedSlice(allocator);
}
fn canonicalQueryString(allocator: std.mem.Allocator, path: []const u8) ![]const u8 {
@ -800,25 +806,25 @@ fn canonicalQueryString(allocator: std.mem.Allocator, path: []const u8) ![]const
// Split this by component
var portions = std.mem.splitScalar(u8, query, '&');
var sort_me = std.ArrayList([]const u8).init(allocator);
defer sort_me.deinit();
var sort_me = std.ArrayList([]const u8){};
defer sort_me.deinit(allocator);
while (portions.next()) |item|
try sort_me.append(item);
try sort_me.append(allocator, item);
std.sort.pdq([]const u8, sort_me.items, {}, lessThanBinary);
var normalized = try std.ArrayList(u8).initCapacity(allocator, path.len);
defer normalized.deinit();
defer normalized.deinit(allocator);
var first = true;
for (sort_me.items) |i| {
if (!first) try normalized.append('&');
if (!first) try normalized.append(allocator, '&');
first = false;
const first_equals = std.mem.indexOf(u8, i, "=");
if (first_equals == null) {
// Rare. This is "foo="
const normed_item = try encodeUri(allocator, i);
defer allocator.free(normed_item);
try normalized.appendSlice(i); // This should be encoded
try normalized.append('=');
try normalized.appendSlice(allocator, i); // This should be encoded
try normalized.append(allocator, '=');
continue;
}
@ -831,12 +837,12 @@ fn canonicalQueryString(allocator: std.mem.Allocator, path: []const u8) ![]const
// Double-encode any = in the value. But not anything else?
const weird_equals_in_value_thing = try replace(allocator, value, "%3D", "%253D");
defer allocator.free(weird_equals_in_value_thing);
try normalized.appendSlice(key);
try normalized.append('=');
try normalized.appendSlice(weird_equals_in_value_thing);
try normalized.appendSlice(allocator, key);
try normalized.append(allocator, '=');
try normalized.appendSlice(allocator, weird_equals_in_value_thing);
}
return normalized.toOwnedSlice();
return normalized.toOwnedSlice(allocator);
}
fn replace(allocator: std.mem.Allocator, haystack: []const u8, needle: []const u8, replacement_value: []const u8) ![]const u8 {
@ -875,7 +881,7 @@ fn canonicalHeaders(allocator: std.mem.Allocator, headers: []const std.http.Head
allocator.free(h.name);
allocator.free(h.value);
}
dest.deinit();
dest.deinit(allocator);
}
var total_len: usize = 0;
var total_name_len: usize = 0;
@ -905,15 +911,15 @@ fn canonicalHeaders(allocator: std.mem.Allocator, headers: []const std.http.Head
defer allocator.free(value);
const n = try std.ascii.allocLowerString(allocator, h.name);
const v = try std.fmt.allocPrint(allocator, "{s}", .{value});
try dest.append(.{ .name = n, .value = v });
try dest.append(allocator, .{ .name = n, .value = v });
}
std.sort.pdq(std.http.Header, dest.items, {}, lessThan);
var dest_str = try std.ArrayList(u8).initCapacity(allocator, total_len);
defer dest_str.deinit();
defer dest_str.deinit(allocator);
var signed_headers = try std.ArrayList(u8).initCapacity(allocator, total_name_len);
defer signed_headers.deinit();
defer signed_headers.deinit(allocator);
var first = true;
for (dest.items) |h| {
dest_str.appendSliceAssumeCapacity(h.name);
@ -926,8 +932,8 @@ fn canonicalHeaders(allocator: std.mem.Allocator, headers: []const std.http.Head
signed_headers.appendSliceAssumeCapacity(h.name);
}
return CanonicalHeaders{
.str = try dest_str.toOwnedSlice(),
.signed_headers = try signed_headers.toOwnedSlice(),
.str = try dest_str.toOwnedSlice(allocator),
.signed_headers = try signed_headers.toOwnedSlice(allocator),
};
}
@ -972,7 +978,7 @@ fn hash(allocator: std.mem.Allocator, payload: []const u8, sig_type: SignatureTy
};
var out: [std.crypto.hash.sha2.Sha256.digest_length]u8 = undefined;
std.crypto.hash.sha2.Sha256.hash(to_hash, &out, .{});
return try std.fmt.allocPrint(allocator, "{s}", .{std.fmt.fmtSliceHexLower(&out)});
return try std.fmt.allocPrint(allocator, "{x}", .{out});
}
// SignedHeaders + '\n' +
// HexEncode(Hash(RequestPayload))
@ -1010,13 +1016,13 @@ test "canonical query" {
test "canonical headers" {
const allocator = std.testing.allocator;
var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5);
defer headers.deinit();
try headers.append(.{ .name = "Host", .value = "iam.amazonaws.com" });
try headers.append(.{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" });
try headers.append(.{ .name = "User-Agent", .value = "This header should be skipped" });
try headers.append(.{ .name = "My-header1", .value = " a b c " });
try headers.append(.{ .name = "X-Amz-Date", .value = "20150830T123600Z" });
try headers.append(.{ .name = "My-header2", .value = " \"a b c\" " });
defer headers.deinit(allocator);
try headers.append(allocator, .{ .name = "Host", .value = "iam.amazonaws.com" });
try headers.append(allocator, .{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" });
try headers.append(allocator, .{ .name = "User-Agent", .value = "This header should be skipped" });
try headers.append(allocator, .{ .name = "My-header1", .value = " a b c " });
try headers.append(allocator, .{ .name = "X-Amz-Date", .value = "20150830T123600Z" });
try headers.append(allocator, .{ .name = "My-header2", .value = " \"a b c\" " });
const expected =
\\content-type:application/x-www-form-urlencoded; charset=utf-8
\\host:iam.amazonaws.com
@ -1035,12 +1041,12 @@ test "canonical headers" {
test "canonical request" {
const allocator = std.testing.allocator;
var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5);
defer headers.deinit();
try headers.append(.{ .name = "User-agent", .value = "c sdk v1.0" });
defer headers.deinit(allocator);
try headers.append(allocator, .{ .name = "User-agent", .value = "c sdk v1.0" });
// In contrast to AWS CRT (aws-c-auth), we add the date as part of the
// signing operation. They add it as part of the canonicalization
try headers.append(.{ .name = "X-Amz-Date", .value = "20150830T123600Z" });
try headers.append(.{ .name = "Host", .value = "example.amazonaws.com" });
try headers.append(allocator, .{ .name = "X-Amz-Date", .value = "20150830T123600Z" });
try headers.append(allocator, .{ .name = "Host", .value = "example.amazonaws.com" });
const req = base.Request{
.path = "/",
.method = "GET",
@ -1095,10 +1101,10 @@ test "can sign" {
const allocator = std.testing.allocator;
var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5);
defer headers.deinit();
try headers.append(.{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" });
try headers.append(.{ .name = "Content-Length", .value = "13" });
try headers.append(.{ .name = "Host", .value = "example.amazonaws.com" });
defer headers.deinit(allocator);
try headers.append(allocator, .{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" });
try headers.append(allocator, .{ .name = "Content-Length", .value = "13" });
try headers.append(allocator, .{ .name = "Host", .value = "example.amazonaws.com" });
const req = base.Request{
.path = "/",
.query = "",
@ -1165,25 +1171,27 @@ test "can verify server request" {
"X-Amz-Date: 20230908T170252Z\r\n" ++
"x-amz-content-sha256: fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9\r\n" ++
"Authorization: AWS4-HMAC-SHA256 Credential=ACCESS/20230908/us-west-2/s3/aws4_request, SignedHeaders=accept;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class, Signature=fcc43ce73a34c9bd1ddf17e8a435f46a859812822f944f9eeb2aabcd64b03523\r\n\r\nbar";
var read_buffer: [1024]u8 = undefined;
@memcpy(read_buffer[0..req.len], req);
var reader = std.Io.Reader.fixed(req);
var body_reader = std.Io.Reader.fixed("bar");
var server: std.http.Server = .{
.connection = undefined,
.state = .ready,
.read_buffer = &read_buffer,
.read_buffer_len = req.len,
.next_request_start = 0,
.out = undefined, // We're not sending a response here
.reader = .{
.in = &reader,
.interface = undefined,
.state = .received_head,
.max_head_len = req.len,
},
};
var request: std.http.Server.Request = .{
.server = &server,
.head_end = req.len - 3,
.head = try std.http.Server.Request.Head.parse(read_buffer[0 .. req.len - 3]),
.reader_state = undefined,
.head = try std.http.Server.Request.Head.parse(req),
.head_buffer = req,
};
// const old_level = std.testing.log_level;
// std.testing.log_level = .debug;
var fbs = std.io.fixedBufferStream("bar");
try std.testing.expect(try verifyServerRequest(allocator, &request, fbs.reader(), struct {
// defer std.testing.log_level = old_level;
try std.testing.expect(try verifyServerRequest(allocator, &request, &body_reader, struct {
cred: Credentials,
const Self = @This();
@ -1221,22 +1229,25 @@ test "can verify server request without x-amz-content-sha256" {
const req_data = head ++ body;
var read_buffer: [2048]u8 = undefined;
@memcpy(read_buffer[0..req_data.len], req_data);
var reader = std.Io.Reader.fixed(&read_buffer);
var body_reader = std.Io.Reader.fixed(body);
var server: std.http.Server = .{
.connection = undefined,
.state = .ready,
.read_buffer = &read_buffer,
.read_buffer_len = req_data.len,
.next_request_start = 0,
.out = undefined, // We're not sending a response here
.reader = .{
.interface = undefined,
.in = &reader,
.state = .received_head,
.max_head_len = 1024,
},
};
var request: std.http.Server.Request = .{
.server = &server,
.head_end = head.len,
.head = try std.http.Server.Request.Head.parse(read_buffer[0..head.len]),
.reader_state = undefined,
.head = try std.http.Server.Request.Head.parse(head),
.head_buffer = head,
};
{
var h = std.ArrayList(std.http.Header).init(allocator);
defer h.deinit();
var h = try std.ArrayList(std.http.Header).initCapacity(allocator, 4);
defer h.deinit(allocator);
const signed_headers = &[_][]const u8{ "content-type", "host", "x-amz-date", "x-amz-target" };
var it = request.iterateHeaders();
while (it.next()) |source| {
@ -1245,7 +1256,7 @@ test "can verify server request without x-amz-content-sha256" {
match = std.ascii.eqlIgnoreCase(s, source.name);
if (match) break;
}
if (match) try h.append(.{ .name = source.name, .value = source.value });
if (match) try h.append(allocator, .{ .name = source.name, .value = source.value });
}
const req = base.Request{
.path = "/",
@ -1282,9 +1293,7 @@ test "can verify server request without x-amz-content-sha256" {
}
{ // verification
var fis = std.io.fixedBufferStream(body[0..]);
try std.testing.expect(try verifyServerRequest(allocator, &request, fis.reader(), struct {
try std.testing.expect(try verifyServerRequest(allocator, &request, &body_reader, struct {
cred: Credentials,
const Self = @This();

1446
src/aws_test.zig Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,47 +0,0 @@
const std = @import("std");
const expectEqualStrings = std.testing.expectEqualStrings;
pub fn snakeToCamel(allocator: std.mem.Allocator, name: []const u8) ![]u8 {
var utf8_name = (std.unicode.Utf8View.init(name) catch unreachable).iterator();
var target_inx: usize = 0;
var previous_ascii: u8 = 0;
var rc = try allocator.alloc(u8, name.len);
while (utf8_name.nextCodepoint()) |cp| {
if (cp > 0xff) return error.UnicodeNotSupported;
const ascii_char: u8 = @truncate(cp);
if (ascii_char != '_') {
if (previous_ascii == '_' and ascii_char >= 'a' and ascii_char <= 'z') {
const uppercase_char = ascii_char - ('a' - 'A');
rc[target_inx] = uppercase_char;
} else {
rc[target_inx] = ascii_char;
}
target_inx = target_inx + 1;
}
previous_ascii = ascii_char;
}
// Do we care if the allocator refuses resize?
_ = allocator.resize(rc, target_inx);
return rc[0..target_inx];
}
pub fn snakeToPascal(allocator: std.mem.Allocator, name: []const u8) ![]u8 {
const rc = try snakeToCamel(allocator, name);
if (rc[0] >= 'a' and rc[0] <= 'z') {
const uppercase_char = rc[0] - ('a' - 'A');
rc[0] = uppercase_char;
}
return rc;
}
test "converts from snake to camelCase" {
const allocator = std.testing.allocator;
const camel = try snakeToCamel(allocator, "access_key_id");
defer allocator.free(camel);
try expectEqualStrings("accessKeyId", camel);
}
test "single word" {
const allocator = std.testing.allocator;
const camel = try snakeToCamel(allocator, "word");
defer allocator.free(camel);
try expectEqualStrings("word", camel);
}

View file

@ -34,7 +34,8 @@ pub fn log(
// Print the message to stderr, silently ignoring any errors
std.debug.lockStdErr();
defer std.debug.unlockStdErr();
const stderr = std.io.getStdErr().writer();
var stderr_writer = std.fs.File.stderr().writer(&.{});
const stderr = &stderr_writer.interface;
nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return;
}
@ -62,14 +63,14 @@ pub fn main() anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
var tests = std.ArrayList(Tests).init(allocator);
defer tests.deinit();
var tests = try std.ArrayList(Tests).initCapacity(allocator, @typeInfo(Tests).@"enum".fields.len);
defer tests.deinit(allocator);
var args = try std.process.argsWithAllocator(allocator);
defer args.deinit();
const stdout_raw = std.io.getStdOut().writer();
var bw = std.io.bufferedWriter(stdout_raw);
defer bw.flush() catch unreachable;
const stdout = bw.writer();
var stdout_buf: [4096]u8 = undefined;
const stdout_raw = std.fs.File.stdout().writer(&stdout_buf);
var stdout = stdout_raw.interface;
defer stdout.flush() catch @panic("could not flush stdout");
var arg0: ?[]const u8 = null;
var proxy: ?std.http.Client.Proxy = null;
while (args.next()) |arg| {
@ -99,14 +100,14 @@ pub fn main() anyerror!void {
}
inline for (@typeInfo(Tests).@"enum".fields) |f| {
if (std.mem.eql(u8, f.name, arg)) {
try tests.append(@field(Tests, f.name));
try tests.append(allocator, @field(Tests, f.name));
break;
}
}
}
if (tests.items.len == 0) {
inline for (@typeInfo(Tests).@"enum".fields) |f|
try tests.append(@field(Tests, f.name));
try tests.append(allocator, @field(Tests, f.name));
}
std.log.info("Start\n", .{});
@ -193,7 +194,7 @@ pub fn main() anyerror!void {
const arn = func.function_arn.?;
// This is a bit ugly. Maybe a helper function in the library would help?
var tags = try std.ArrayList(aws.services.lambda.TagKeyValue).initCapacity(allocator, 1);
defer tags.deinit();
defer tags.deinit(allocator);
tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" });
const req = services.lambda.tag_resource.Request{ .resource = arn, .tags = tags.items };
const addtag = try aws.Request(services.lambda.tag_resource).call(req, options);
@ -262,7 +263,7 @@ pub fn main() anyerror!void {
defer result.deinit();
std.log.info("request id: {s}", .{result.response_metadata.request_id});
const list = result.response.key_group_list.?;
std.log.info("key group list max: {?d}", .{list.max_items});
std.log.info("key group list max: {d}", .{list.max_items});
std.log.info("key group quantity: {d}", .{list.quantity});
},
.rest_xml_work_with_s3 => {

View file

@ -14,7 +14,7 @@ pub fn Services(comptime service_imports: anytype) type {
.type = @TypeOf(import_field),
.default_value_ptr = &import_field,
.is_comptime = false,
.alignment = 0,
.alignment = std.meta.alignment(@TypeOf(import_field)),
};
}

View file

@ -11,7 +11,7 @@ pub const EncodingOptions = struct {
field_name_transformer: fieldNameTransformerFn = defaultTransformer,
};
pub fn encode(allocator: std.mem.Allocator, obj: anytype, writer: anytype, comptime options: EncodingOptions) !void {
pub fn encode(allocator: std.mem.Allocator, obj: anytype, writer: *std.Io.Writer, comptime options: EncodingOptions) !void {
_ = try encodeInternal(allocator, "", "", true, obj, writer, options);
}
@ -20,7 +20,7 @@ fn encodeStruct(
parent: []const u8,
first: bool,
obj: anytype,
writer: anytype,
writer: *std.Io.Writer,
comptime options: EncodingOptions,
) !bool {
var rc = first;
@ -41,7 +41,7 @@ pub fn encodeInternal(
field_name: []const u8,
first: bool,
obj: anytype,
writer: anytype,
writer: *std.Io.Writer,
comptime options: EncodingOptions,
) !bool {
// @compileLog(@typeName(@TypeOf(obj)));
@ -56,10 +56,19 @@ pub fn encodeInternal(
} else {
if (!first) _ = try writer.write("&");
// @compileLog(@typeInfo(@TypeOf(obj)));
if (ti.child == []const u8 or ti.child == u8)
try writer.print("{s}{s}={s}", .{ parent, field_name, obj })
else
try writer.print("{s}{s}={any}", .{ parent, field_name, obj });
switch (ti.child) {
// TODO: not sure this first one is valid. How should [][]const u8 be serialized here?
[]const u8 => {
// if (true) @panic("panic at the disco!");
std.log.warn(
"encoding object of type [][]const u8...pretty sure this is wrong {s}{s}={any}",
.{ parent, field_name, obj },
);
try writer.print("{s}{s}={any}", .{ parent, field_name, obj });
},
u8 => try writer.print("{s}{s}={s}", .{ parent, field_name, obj }),
else => try writer.print("{s}{s}={any}", .{ parent, field_name, obj }),
}
rc = false;
},
.@"struct" => if (std.mem.eql(u8, "", field_name)) {
@ -95,78 +104,29 @@ pub fn encodeInternal(
return rc;
}
fn testencode(allocator: std.mem.Allocator, expected: []const u8, value: anytype, comptime options: EncodingOptions) !void {
const ValidationWriter = struct {
const Self = @This();
pub const Writer = std.io.Writer(*Self, Error, write);
pub const Error = error{
TooMuchData,
DifferentData,
};
expected_remaining: []const u8,
fn init(exp: []const u8) Self {
return .{ .expected_remaining = exp };
}
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
fn write(self: *Self, bytes: []const u8) Error!usize {
// std.debug.print("{s}\n", .{bytes});
if (self.expected_remaining.len < bytes.len) {
std.log.warn(
\\====== expected this output: =========
\\{s}
\\======== instead found this: =========
\\{s}
\\======================================
, .{
self.expected_remaining,
bytes,
});
return error.TooMuchData;
}
if (!std.mem.eql(u8, self.expected_remaining[0..bytes.len], bytes)) {
std.log.warn(
\\====== expected this output: =========
\\{s}
\\======== instead found this: =========
\\{s}
\\======================================
, .{
self.expected_remaining[0..bytes.len],
bytes,
});
return error.DifferentData;
}
self.expected_remaining = self.expected_remaining[bytes.len..];
return bytes.len;
}
};
var vos = ValidationWriter.init(expected);
try encode(allocator, value, vos.writer(), options);
if (vos.expected_remaining.len > 0) return error.NotEnoughData;
}
test "can urlencode an object" {
try testencode(
const expected = "Action=GetCallerIdentity&Version=2021-01-01";
var aw = std.Io.Writer.Allocating.init(std.testing.allocator);
defer aw.deinit();
try encode(
std.testing.allocator,
"Action=GetCallerIdentity&Version=2021-01-01",
.{ .Action = "GetCallerIdentity", .Version = "2021-01-01" },
&aw.writer,
.{},
);
try std.testing.expectEqualStrings(expected, aw.written());
}
test "can urlencode an object with integer" {
try testencode(
const expected = "Action=GetCallerIdentity&Duration=32";
var aw = std.Io.Writer.Allocating.init(std.testing.allocator);
defer aw.deinit();
try encode(
std.testing.allocator,
"Action=GetCallerIdentity&Duration=32",
.{ .Action = "GetCallerIdentity", .Duration = 32 },
&aw.writer,
.{},
);
try std.testing.expectEqualStrings(expected, aw.written());
}
const UnsetValues = struct {
action: ?[]const u8 = null,
@ -175,30 +135,28 @@ const UnsetValues = struct {
val2: ?[]const u8 = null,
};
test "can urlencode an object with unset values" {
// var buffer = std.ArrayList(u8).init(std.testing.allocator);
// defer buffer.deinit();
// const writer = buffer.writer();
// try encode(
// std.testing.allocator,
// UnsetValues{ .action = "GetCallerIdentity", .duration = 32 },
// writer,
// .{},
// );
// std.debug.print("\n\nEncoded as '{s}'\n", .{buffer.items});
try testencode(
const expected = "action=GetCallerIdentity&duration=32";
var aw = std.Io.Writer.Allocating.init(std.testing.allocator);
defer aw.deinit();
try encode(
std.testing.allocator,
"action=GetCallerIdentity&duration=32",
UnsetValues{ .action = "GetCallerIdentity", .duration = 32 },
&aw.writer,
.{},
);
try std.testing.expectEqualStrings(expected, aw.written());
}
test "can urlencode a complex object" {
try testencode(
const expected = "Action=GetCallerIdentity&Version=2021-01-01&complex.innermember=foo";
var aw = std.Io.Writer.Allocating.init(std.testing.allocator);
defer aw.deinit();
try encode(
std.testing.allocator,
"Action=GetCallerIdentity&Version=2021-01-01&complex.innermember=foo",
.{ .Action = "GetCallerIdentity", .Version = "2021-01-01", .complex = .{ .innermember = "foo" } },
&aw.writer,
.{},
);
try std.testing.expectEqualStrings(expected, aw.written());
}
const Filter = struct {
@ -221,26 +179,28 @@ const Request: type = struct {
all_regions: ?bool = null,
};
test "can urlencode an EC2 Filter" {
// TODO: Fix this encoding...
testencode(
// TODO: This is a strange test, mainly to document current behavior
// EC2 filters are supposed to be something like
// Filter.Name=foo&Filter.Values=bar or, when there is more, something like
// Filter.1.Name=instance-type&Filter.1.Value.1=m1.small&Filter.1.Value.2=m1.large&Filter.2.Name=block-device-mapping.status&Filter.2.Value.1=attached
//
// This looks like a real PITA, so until it is actually needed, this is
// a placeholder test to track what actual encoding is happening. This
// changed between zig 0.14.x and 0.15.1, and I'm not entirely sure why
// yet, but because the remaining functionality is fine, we're going with
// this
const zig_14x_expected = "filters={ url.Filter{ .name = { 102, 111, 111 }, .values = { { ... } } } }";
_ = zig_14x_expected;
const expected = "filters={ .{ .name = { 102, 111, 111 }, .values = { { ... } } } }";
var aw = std.Io.Writer.Allocating.init(std.testing.allocator);
defer aw.deinit();
try encode(
std.testing.allocator,
"filters={ url.Filter{ .name = { 102, 111, 111 }, .values = { { ... } } } }",
Request{
.filters = @constCast(&[_]Filter{.{ .name = "foo", .values = @constCast(&[_][]const u8{"bar"}) }}),
},
&aw.writer,
.{},
) catch |err| {
var al = std.ArrayList(u8).init(std.testing.allocator);
defer al.deinit();
try encode(
std.testing.allocator,
Request{
.filters = @constCast(&[_]Filter{.{ .name = "foo", .values = @constCast(&[_][]const u8{"bar"}) }}),
},
al.writer(),
.{},
);
std.log.warn("Error found. Full encoding is '{s}'", .{al.items});
return err;
};
);
try std.testing.expectEqualStrings(expected, aw.written());
}

View file

@ -26,12 +26,14 @@ pub const Element = struct {
attributes: AttributeList,
children: ContentList,
next_sibling: ?*Element = null,
allocator: std.mem.Allocator,
fn init(tag: []const u8, alloc: Allocator) Element {
return .{
.tag = tag,
.attributes = AttributeList.init(alloc),
.children = ContentList.init(alloc),
.attributes = AttributeList{},
.children = ContentList{},
.allocator = alloc,
};
}
@ -454,7 +456,7 @@ fn tryParseElement(ctx: *ParseContext, alloc: Allocator, parent: ?*Element) !?*E
while (ctx.eatWs()) {
const attr = (try tryParseAttr(ctx, alloc)) orelse break;
try element.attributes.append(attr);
try element.attributes.append(element.allocator, attr);
}
if (ctx.eatStr("/>")) {
@ -471,7 +473,7 @@ fn tryParseElement(ctx: *ParseContext, alloc: Allocator, parent: ?*Element) !?*E
}
const content = try parseContent(ctx, alloc, element);
try element.children.append(content);
try element.children.append(element.allocator, content);
}
const closing_tag = try parseNameNoDupe(ctx);

View file

@ -53,7 +53,7 @@ pub const XmlSerializeError = error{
pub fn stringify(
value: anytype,
options: StringifyOptions,
writer: anytype,
writer: *std.Io.Writer,
) !void {
// Write XML declaration if requested
if (options.include_declaration)
@ -62,9 +62,9 @@ pub fn stringify(
// Start serialization with the root element
const root_name = options.root_name;
if (@typeInfo(@TypeOf(value)) != .optional or value == null)
try serializeValue(value, root_name, options, writer.any(), 0)
try serializeValue(value, root_name, options, writer, 0)
else
try serializeValue(value.?, root_name, options, writer.any(), 0);
try serializeValue(value.?, root_name, options, writer, 0);
}
/// Serializes a value to XML and returns an allocated string
@ -73,10 +73,10 @@ pub fn stringifyAlloc(
value: anytype,
options: StringifyOptions,
) ![]u8 {
var list = std.ArrayList(u8).init(allocator);
errdefer list.deinit();
var list = std.Io.Writer.Allocating.init(allocator);
defer list.deinit();
try stringify(value, options, list.writer());
try stringify(value, options, &list.writer);
return list.toOwnedSlice();
}
@ -85,7 +85,7 @@ fn serializeValue(
value: anytype,
element_name: ?[]const u8,
options: StringifyOptions,
writer: anytype,
writer: *std.Io.Writer,
depth: usize,
) !void {
const T = @TypeOf(value);
@ -274,7 +274,7 @@ fn serializeValue(
try writeClose(writer, element_name);
}
fn writeClose(writer: anytype, element_name: ?[]const u8) !void {
fn writeClose(writer: *std.Io.Writer, element_name: ?[]const u8) !void {
// Close element tag
if (element_name) |n| {
try writer.writeAll("</");
@ -284,7 +284,7 @@ fn writeClose(writer: anytype, element_name: ?[]const u8) !void {
}
/// Writes indentation based on depth and indent level
fn writeIndent(writer: anytype, depth: usize, whitespace: StringifyOptions.Whitespace) @TypeOf(writer).Error!void {
fn writeIndent(writer: *std.Io.Writer, depth: usize, whitespace: StringifyOptions.Whitespace) std.Io.Writer.Error!void {
var char: u8 = ' ';
const n_chars = switch (whitespace) {
.minified => return,
@ -298,16 +298,16 @@ fn writeIndent(writer: anytype, depth: usize, whitespace: StringifyOptions.White
break :blk depth;
},
};
try writer.writeByteNTimes(char, n_chars);
try writer.splatBytesAll(&.{char}, n_chars);
}
fn serializeString(
writer: anytype,
writer: *std.Io.Writer,
element_name: ?[]const u8,
value: []const u8,
options: StringifyOptions,
depth: usize,
) @TypeOf(writer).Error!void {
) error{ WriteFailed, OutOfMemory }!void {
if (options.emit_strings_as_arrays) {
// if (true) return error.seestackrun;
for (value) |c| {
@ -333,7 +333,7 @@ fn serializeString(
try escapeString(writer, value);
}
/// Escapes special characters in XML strings
fn escapeString(writer: anytype, value: []const u8) @TypeOf(writer).Error!void {
fn escapeString(writer: *std.Io.Writer, value: []const u8) std.Io.Writer.Error!void {
for (value) |c| {
switch (c) {
'&' => try writer.writeAll("&amp;"),
@ -413,7 +413,8 @@ test "stringify basic types" {
{
const result = try stringifyAlloc(allocator, 3.14, .{});
defer allocator.free(result);
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root>3.14e0</root>", result);
// zig 0.14.x outputs 3.14e0, but zig 0.15.1 outputs 3.14. Either *should* be acceptable
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root>3.14</root>", result);
}
// Test string

View file

@ -381,14 +381,17 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
log.debug("type = {s}, style = {s}, ptr_info.child == {s}, element = {s}", .{ @typeName(T), @tagName(array_style), @typeName(ptr_info.child), element.tag });
var children = std.ArrayList(ptr_info.child).init(allocator);
defer children.deinit();
var children = std.ArrayList(ptr_info.child){};
defer children.deinit(allocator);
switch (array_style) {
.collection => {
var iterator = element.elements();
while (iterator.next()) |child_element| {
try children.append(try parseInternal(ptr_info.child, child_element, options));
try children.append(
allocator,
try parseInternal(ptr_info.child, child_element, options),
);
}
},
.repeated_root => {
@ -396,12 +399,15 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
while (current) |el| : (current = el.next_sibling) {
if (!std.mem.eql(u8, el.tag, element.tag)) continue;
try children.append(try parseInternal(ptr_info.child, el, options));
try children.append(
allocator,
try parseInternal(ptr_info.child, el, options),
);
}
},
}
return children.toOwnedSlice();
return children.toOwnedSlice(allocator);
}
return try allocator.dupe(u8, element.children.items[0].CharData);
},

3
zlint.json Normal file
View file

@ -0,0 +1,3 @@
{
"ignore": ["lib/json/src/json.zig"]
}