Compare commits

...
Sign in to create a new pull request.

17 commits

Author SHA1 Message Date
0171cfc1f1
zls now has 0.16.0 support
All checks were successful
AWS-Zig Build / build-zig-amd64-host (push) Successful in 8m16s
2026-04-17 09:06:15 -07:00
7e6b360610
address "failed command" on codegen from within zig build verbose mode 2026-04-17 09:05:33 -07:00
242d4b5f44
update example 2026-04-16 17:28:44 -07:00
d697df6fd6
zlint does not support async properly, skip the file 2026-04-16 17:10:59 -07:00
9ac1052e01
remove legacy Makefile 2026-04-16 17:10:17 -07:00
2fddf4f122
update master branch to zig 0.16.0
Some checks failed
AWS-Zig Build / build-zig-amd64-host (push) Failing after 18m30s
2026-04-16 16:35:14 -07:00
abd422edb7
changes for zig 0.16.0
Some checks failed
aws-zig nightly build / build-zig-nightly (push) Failing after 23m34s
2026-04-16 16:22:48 -07:00
a3d0718e7b
Merge branch 'master' into zig-develop 2026-04-16 09:30:43 -07:00
1cff425ff6
update example dependency
All checks were successful
aws-zig nightly build / build-zig-nightly (push) Successful in 14m2s
2025-11-25 16:12:43 -08:00
9b870aa969
replace usages of @Type
Some checks failed
aws-zig nightly build / build-zig-nightly (push) Failing after 6m14s
2025-11-25 15:02:11 -08:00
b9a18d30b4
revert workarounds for zig issue 25811
All checks were successful
aws-zig nightly build / build-zig-nightly (push) Successful in 8m4s
2025-11-15 11:10:57 -08:00
f15887b550
temporary force nightly to home server
Some checks failed
aws-zig nightly build / build-zig-nightly (push) Failing after 25m28s
On the home server, git.lerch.org will resolve to
an A record (split-horizon DNS). This works around
https://github.com/ziglang/zig/issues/25811.
2025-11-06 13:09:25 -08:00
aec39b2103
update example for std.Io interface
Some checks failed
aws-zig nightly build / build-zig-nightly (push) Failing after 37s
2025-11-06 12:27:59 -08:00
d400e50a9c
temporarily switch to github (believe this is workaround for https://github.com/ziglang/zig/issues/25811)
Some checks failed
aws-zig nightly build / build-zig-nightly (push) Failing after 6m9s
2025-11-06 12:16:37 -08:00
3f5d9d9542
updates for PR 25592 (std.Io interface) and 25706 (remove Oracle Solaris)
Some checks failed
aws-zig nightly build / build-zig-nightly (push) Failing after 26s
2025-11-06 12:05:49 -08:00
ef74739b9b
update dependency in example
All checks were successful
aws-zig nightly build / build-zig-nightly (push) Successful in 6m15s
2025-10-02 08:38:36 -07:00
10a0e0ab99
adjust stack trace changes in zig 0.16.0-dev.565+f50c64797
Some checks failed
aws-zig nightly build / build-zig-nightly (push) Failing after 20m56s
2025-10-02 08:15:38 -07:00
28 changed files with 470 additions and 438 deletions

View file

@ -3,7 +3,7 @@ on:
workflow_dispatch:
push:
branches:
- 'zig-0.14.x'
- 'zig-0.15.x'
env:
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
@ -18,11 +18,9 @@ jobs:
- name: Check out repository code
uses: actions/checkout@v4
with:
ref: zig-0.14.x
ref: zig-0.15.x
- name: Setup Zig
uses: https://codeberg.org/mlugg/setup-zig@v2.2.1
with:
version: 0.14.0
- name: Run smoke test
run: zig build smoke-test --verbose
- name: Run full tests

1
.gitignore vendored
View file

@ -12,3 +12,4 @@ src/git_version.zig
zig-out
core
.zig-cache
zig-pkg/

View file

@ -1,5 +1,5 @@
[tools]
prek = "0.3.1"
"ubi:DonIsaac/zlint" = "0.7.9"
zig = "0.15.2"
zls = "0.15.1"
zig = "0.16.0"
zls = "0.16.0"

View file

@ -1,15 +0,0 @@
start-hand-test: src/main.zig src/aws.zig src/xml.zig
@zig build-exe -static -I/usr/local/include -Isrc/ -lc --strip \
--name start-hand-test src/main.zig src/bitfield-workaround.c \
/usr/local/lib64/libaws-c-*.a \
/usr/local/lib64/libs2n.a \
/usr/local/lib/libcrypto.a \
/usr/local/lib/libssl.a
elasticurl: curl.c
@zig build-exe -static -I/usr/local/include -Isrc/ -lc --strip \
--name elasticurl curl.c \
/usr/local/lib64/libaws-c-*.a \
/usr/local/lib64/libs2n.a \
/usr/local/lib/libcrypto.a \
/usr/local/lib/libssl.a

View file

@ -1,17 +1,17 @@
AWS SDK for Zig
===============
[Zig 0.15.1](https://ziglang.org/download/#release-0.15.1):
[Zig 0.16.0](https://ziglang.org/download/#release-0.16.0):
[![Build Status: Zig 0.15.1](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/build.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=build.yaml&state=closed)
[![Build Status: Zig 0.16.0](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/build.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=build.yaml&state=closed)
[Nightly Zig](https://ziglang.org/download/):
[![Build Status: Zig Nightly](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-nightly.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-nightly.yaml&state=closed)
[Zig 0.14.1](https://ziglang.org/download/#release-0.14.1):
[Zig 0.15.2](https://ziglang.org/download/#release-0.15.2):
[![Build Status: Zig 0.14.x](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-previous.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-previous.yaml&state=closed)
[![Build Status: Zig 0.15.2](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-previous.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-previous.yaml&state=closed)
Current executable size for the demo is 980k after compiling with -Doptimize=ReleaseSmall
in x86_64-linux, and will vary based on services used. Tested targets:
@ -34,7 +34,7 @@ Branches
a new zig release appears. Expect significant delays in any
build failures (PRs always welcome!).
* **master**: This branch tracks the latest released zig version
* **zig-0.14.x**: This branch tracks the 0.14/0.14.1 released zig versions.
* **zig-0.15.x**: This branch tracks the 0.15.2 released zig version.
Support for these previous version is best effort, generally
degrading over time. Fixes will generally appear in master, then
backported into the previous version.

View file

@ -47,7 +47,7 @@ pub fn build(b: *Builder) !void {
.target = target,
.optimize = optimize,
});
configure(mod_exe, dep_mods, true);
configure(mod_exe, dep_mods);
const exe = b.addExecutable(.{
.name = "demo",
@ -72,7 +72,7 @@ pub fn build(b: *Builder) !void {
.target = b.graph.host,
.optimize = if (b.verbose) .Debug else .ReleaseSafe,
});
configure(cg_mod, dep_mods, false);
configure(cg_mod, dep_mods);
const cg_exe = b.addExecutable(.{
.name = "codegen",
@ -133,7 +133,7 @@ pub fn build(b: *Builder) !void {
// consuming build.zig files to be able to use the SDK at build time for
// things like code deployments, e.g. https://git.lerch.org/lobo/lambda-zig
const has_pre_generated =
if (b.build_root.handle.access("src/models/service_manifest.zig", .{})) true else |_| false;
if (b.build_root.handle.access(b.graph.io, "src/models/service_manifest.zig", .{})) true else |_| false;
// Only depend on codegen if we don't have pre-generated models
if (!has_pre_generated)
@ -150,7 +150,7 @@ pub fn build(b: *Builder) !void {
.target = target,
.optimize = optimize,
});
configure(service_manifest_module, dep_mods, true);
configure(service_manifest_module, dep_mods);
mod_exe.addImport("service_manifest", service_manifest_module);
@ -161,13 +161,13 @@ pub fn build(b: *Builder) !void {
.optimize = optimize,
});
mod_aws.addImport("service_manifest", service_manifest_module);
configure(mod_aws, dep_mods, true);
configure(mod_aws, dep_mods);
// Expose module to others
const mod_aws_signing = b.addModule("aws-signing", .{
.root_source_file = b.path("src/aws_signing.zig"),
});
configure(mod_aws_signing, dep_mods, false);
configure(mod_aws_signing, dep_mods);
// Similar to creating the run step earlier, this exposes a `test` step to
// the `zig build --help` menu, providing a way for the user to request
@ -197,7 +197,7 @@ pub fn build(b: *Builder) !void {
.optimize = optimize,
});
mod_unit_tests.addImport("service_manifest", service_manifest_module);
configure(mod_unit_tests, dep_mods, true);
configure(mod_unit_tests, dep_mods);
// Creates a step for unit testing. This only builds the test executable
// but does not run it.
@ -250,12 +250,11 @@ pub fn build(b: *Builder) !void {
package.dependOn(&pkg_step.step);
}
fn configure(compile: *std.Build.Module, modules: std.StringHashMap(*std.Build.Module), include_time: bool) void {
fn configure(compile: *std.Build.Module, modules: std.StringHashMap(*std.Build.Module)) void {
compile.addImport("smithy", modules.get("smithy").?);
compile.addImport("date", modules.get("date").?);
compile.addImport("json", modules.get("json").?);
compile.addImport("case", modules.get("case").?);
if (include_time) compile.addImport("zeit", modules.get("zeit").?);
}
fn getDependencyModules(b: *std.Build, args: anytype) !std.StringHashMap(*std.Build.Module) {
@ -266,10 +265,6 @@ fn getDependencyModules(b: *std.Build, args: anytype) !std.StringHashMap(*std.Bu
const mod_smithy = dep_smithy.module("smithy");
try result.putNoClobber("smithy", mod_smithy);
const dep_zeit = b.dependency("zeit", args);
const mod_zeit = dep_zeit.module("zeit");
try result.putNoClobber("zeit", mod_zeit);
const dep_case = b.dependency("case", args);
const mod_case = dep_case.module("case");
try result.putNoClobber("case", mod_case);
@ -329,6 +324,7 @@ const PackageStep = struct {
_ = options;
const self: *PackageStep = @fieldParentPtr("step", step);
const b = step.owner;
const io = b.graph.io;
// Get the path to generated models
const models_path = self.cg_output_dir.getPath2(b, &self.step);
@ -336,17 +332,17 @@ const PackageStep = struct {
// Create output directory for packaging
const package_dir = b.pathJoin(&.{ "zig-out", "package" });
const models_dest_dir = b.pathJoin(&.{ package_dir, "src", "models" });
std.fs.cwd().makePath(models_dest_dir) catch |err| {
std.Io.Dir.cwd().createDirPath(io, models_dest_dir) catch |err| {
return step.fail("Failed to create package directory: {}", .{err});
};
// Copy all source files to package directory
for (package_files) |file_name|
copyFile(b, b.build_root.handle, file_name, package_dir) catch {};
copyFile(io, b, b.build_root.handle, file_name, package_dir) catch {};
// Copy directories
for (package_dirs) |dir_name|
copyDirRecursive(b, b.build_root.handle, dir_name, package_dir) catch |err| {
copyDirRecursive(io, b, b.build_root.handle, dir_name, package_dir) catch |err| {
return step.fail("Failed to copy directory '{s}': {}", .{ dir_name, err });
};
@ -358,24 +354,24 @@ const PackageStep = struct {
step.result_cached = false;
}
fn copyFile(b: *std.Build, src_dir: std.fs.Dir, file_path: []const u8, dest_prefix: []const u8) !void {
fn copyFile(io: std.Io, b: *std.Build, src_dir: std.Io.Dir, file_path: []const u8, dest_prefix: []const u8) !void {
const dest_path = b.pathJoin(&.{ dest_prefix, file_path });
// Ensure parent directory exists
if (std.fs.path.dirname(dest_path)) |parent|
std.fs.cwd().makePath(parent) catch {};
std.Io.Dir.cwd().createDirPath(io, parent) catch {};
src_dir.copyFile(file_path, std.fs.cwd(), dest_path, .{}) catch return;
src_dir.copyFile(file_path, std.Io.Dir.cwd(), dest_path, io, .{}) catch return;
}
fn copyDirRecursive(b: *std.Build, src_base: std.fs.Dir, dir_path: []const u8, dest_prefix: []const u8) !void {
var src_dir = src_base.openDir(dir_path, .{ .iterate = true }) catch return;
defer src_dir.close();
fn copyDirRecursive(io: std.Io, b: *std.Build, src_base: std.Io.Dir, dir_path: []const u8, dest_prefix: []const u8) !void {
var src_dir = src_base.openDir(io, dir_path, .{ .iterate = true }) catch return;
defer src_dir.close(io);
var walker = try src_dir.walk(b.allocator);
defer walker.deinit();
while (try walker.next()) |entry| {
while (try walker.next(io)) |entry| {
// Skip zig build artifact directories
if (std.mem.indexOf(u8, entry.path, "zig-out") != null or
std.mem.indexOf(u8, entry.path, ".zig-cache") != null or
@ -386,22 +382,22 @@ const PackageStep = struct {
const dest_path = b.pathJoin(&.{ dest_prefix, dir_path, entry.path });
switch (entry.kind) {
.directory => std.fs.cwd().makePath(dest_path) catch {},
.directory => std.Io.Dir.cwd().createDirPath(io, dest_path) catch {},
.file => {
// Ensure parent directory exists
if (std.fs.path.dirname(dest_path)) |parent| {
std.fs.cwd().makePath(parent) catch {};
std.Io.Dir.cwd().createDirPath(io, parent) catch {};
}
src_base.copyFile(src_path, std.fs.cwd(), dest_path, .{}) catch {};
src_base.copyFile(src_path, std.Io.Dir.cwd(), dest_path, io, .{}) catch {};
},
.sym_link => {
var link_buf: [std.fs.max_path_bytes]u8 = undefined;
const link_target = entry.dir.readLink(entry.basename, &link_buf) catch continue;
const link_target = entry.dir.readLink(io, entry.basename, &link_buf) catch continue;
// Ensure parent directory exists
if (std.fs.path.dirname(dest_path)) |parent| {
std.fs.cwd().makePath(parent) catch {};
std.Io.Dir.cwd().createDirPath(io, parent) catch {};
}
std.fs.cwd().symLink(link_target, dest_path, .{}) catch {};
std.Io.Dir.cwd().symLink(io, link_buf[0..link_target], dest_path, .{}) catch {};
},
else => {},
}
@ -409,16 +405,17 @@ const PackageStep = struct {
}
fn copyGeneratedModels(b: *std.Build, models_path: []const u8, models_dest_dir: []const u8) !void {
var models_dir = std.fs.cwd().openDir(models_path, .{ .iterate = true }) catch
const io = b.graph.io;
var models_dir = std.Io.Dir.cwd().openDir(io, models_path, .{ .iterate = true }) catch
return error.ModelsNotFound;
defer models_dir.close();
defer models_dir.close(io);
var iter = models_dir.iterate();
while (try iter.next()) |entry| {
while (try iter.next(io)) |entry| {
if (entry.kind != .file) continue;
const dest_path = b.pathJoin(&.{ models_dest_dir, entry.name });
models_dir.copyFile(entry.name, std.fs.cwd(), dest_path, .{}) catch continue;
models_dir.copyFile(entry.name, std.Io.Dir.cwd(), dest_path, io, .{}) catch continue;
}
}
};

View file

@ -11,7 +11,7 @@
"README.md",
"LICENSE",
},
.minimum_zig_version = "0.15.1",
.minimum_zig_version = "0.16.0",
.dependencies = .{
.smithy = .{
@ -22,10 +22,6 @@
.url = "https://github.com/aws/aws-sdk-go-v2/archive/refs/tags/release-2025-05-05.tar.gz",
.hash = "N-V-__8AAKWdeiawujEcrfukQbb8lLAiQIRT0uG5gCcm4b7W",
},
.zeit = .{
.url = "git+https://github.com/rockorager/zeit?ref=zig-0.15#ed2ca60db118414bda2b12df2039e33bad3b0b88",
.hash = "zeit-0.6.0-5I6bk0J9AgCVa0nnyL0lNY9Xa9F68hHq-ZarhuXNV-Jb",
},
.date = .{
.path = "lib/date",
},
@ -33,8 +29,8 @@
.path = "lib/json",
},
.case = .{
.url = "git+https://github.com/travisstaloch/case.git#f8003fe5f93b65f673d10d41323e347225e8cb87",
.hash = "case-0.0.1-chGYqx_EAADaGJjmoln5M1iMBDTrMdd8to5wdEVpfXm4",
.url = "git+https://github.com/elerch/case?ref=zig-0.16.0#82017a92e179031f21896d02262ae1e216459e4f",
.hash = "case-0.0.1-chGYq1fEAAAN4h3YRmkh9OTDvuUyjmoXr6PhYAUzjU0D",
},
},
}

View file

@ -39,7 +39,7 @@ pub fn indent(self: @This()) GenerationState {
pub fn deindent(self: @This()) GenerationState {
var new_state = self.clone();
new_state.indent_level = @max(0, new_state.indent_level - 1);
new_state.indent_level = if (new_state.indent_level == 0) 0 else new_state.indent_level - 1;
return new_state;
}

View file

@ -8,7 +8,7 @@ pub const HashedFile = struct {
hash: [Hash.digest_length]u8,
failure: Error!void,
const Error = std.fs.File.OpenError || std.fs.File.ReadError || std.fs.File.StatError;
const Error = std.Io.File.OpenError || std.Io.File.ReadStreamingError || std.Io.File.StatError;
fn lessThan(context: void, lhs: *const HashedFile, rhs: *const HashedFile) bool {
_ = context;
@ -76,13 +76,13 @@ pub fn hex64(x: u64) [16]u8 {
return result;
}
pub const walkerFn = *const fn (std.fs.Dir.Walker.Entry) bool;
pub const walkerFn = *const fn (std.Io.Dir.Walker.Entry) bool;
fn included(entry: std.fs.Dir.Walker.Entry) bool {
fn included(entry: std.Io.Dir.Walker.Entry) bool {
_ = entry;
return true;
}
fn excluded(entry: std.fs.Dir.Walker.Entry) bool {
fn excluded(entry: std.Io.Dir.Walker.Entry) bool {
_ = entry;
return false;
}
@ -94,33 +94,33 @@ pub const ComputeDirectoryOptions = struct {
};
pub fn computeDirectoryHash(
thread_pool: *std.Thread.Pool,
dir: std.fs.Dir,
allocator: std.mem.Allocator,
io: std.Io,
dir: std.Io.Dir,
options: *ComputeDirectoryOptions,
) ![Hash.digest_length]u8 {
const gpa = thread_pool.allocator;
// We'll use an arena allocator for the path name strings since they all
// need to be in memory for sorting.
var arena_instance = std.heap.ArenaAllocator.init(gpa);
var arena_instance = std.heap.ArenaAllocator.init(allocator);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
// Collect all files, recursively, then sort.
// Normally we're looking at around 300 model files
var all_files = try std.ArrayList(*HashedFile).initCapacity(gpa, 300);
defer all_files.deinit(gpa);
var all_files = try std.ArrayList(*HashedFile).initCapacity(allocator, 300);
defer all_files.deinit(allocator);
var walker = try dir.walk(gpa);
var walker = try dir.walk(allocator);
defer walker.deinit();
{
// The final hash will be a hash of each file hashed independently. This
// allows hashing in parallel.
var wait_group: std.Thread.WaitGroup = .{};
defer wait_group.wait();
var g: std.Io.Group = .init;
errdefer g.cancel(io);
while (try walker.next()) |entry| {
while (try walker.next(io)) |entry| {
switch (entry.kind) {
.directory => continue,
.file => {},
@ -128,7 +128,7 @@ pub fn computeDirectoryHash(
}
if (options.isExcluded(entry) or !options.isIncluded(entry))
continue;
const alloc = if (options.needFileHashes) gpa else arena;
const alloc = if (options.needFileHashes) allocator else arena;
const hashed_file = try alloc.create(HashedFile);
const fs_path = try alloc.dupe(u8, entry.path);
hashed_file.* = .{
@ -137,11 +137,11 @@ pub fn computeDirectoryHash(
.hash = undefined, // to be populated by the worker
.failure = undefined, // to be populated by the worker
};
wait_group.start();
try thread_pool.spawn(workerHashFile, .{ dir, hashed_file, &wait_group });
g.async(io, workerHashFile, .{ io, dir, hashed_file, &g });
try all_files.append(gpa, hashed_file);
try all_files.append(allocator, hashed_file);
}
try g.await(io);
}
std.mem.sort(*HashedFile, all_files.items, {}, HashedFile.lessThan);
@ -156,23 +156,26 @@ pub fn computeDirectoryHash(
hasher.update(&hashed_file.hash);
}
if (any_failures) return error.DirectoryHashUnavailable;
if (options.needFileHashes) options.fileHashes = try all_files.toOwnedSlice(gpa);
if (options.needFileHashes) options.fileHashes = try all_files.toOwnedSlice(allocator);
return hasher.finalResult();
}
fn workerHashFile(dir: std.fs.Dir, hashed_file: *HashedFile, wg: *std.Thread.WaitGroup) void {
defer wg.finish();
hashed_file.failure = hashFileFallible(dir, hashed_file);
fn workerHashFile(io: std.Io, dir: std.Io.Dir, hashed_file: *HashedFile, wg: *std.Io.Group) void {
_ = wg; // assume here that 0.16.0 Io.Group no longer needs to be notified at the time of completion
hashed_file.failure = hashFileFallible(io, dir, hashed_file);
}
fn hashFileFallible(dir: std.fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
fn hashFileFallible(io: std.Io, dir: std.Io.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
var buf: [8000]u8 = undefined;
var file = try dir.openFile(hashed_file.fs_path, .{});
defer file.close();
var file = try dir.openFile(io, hashed_file.fs_path, .{});
defer file.close(io);
var hasher = Hash.init(.{});
hasher.update(hashed_file.normalized_path);
hasher.update(&.{ 0, @intFromBool(try isExecutable(file)) });
hasher.update(&.{ 0, @intFromBool(try isExecutable(io, file)) });
while (true) {
const bytes_read = try file.read(&buf);
const bytes_read = file.readStreaming(io, &.{&buf}) catch |err| switch (err) {
error.EndOfStream => break,
else => return err,
};
if (bytes_read == 0) break;
hasher.update(buf[0..bytes_read]);
}
@ -197,7 +200,7 @@ fn normalizePath(arena: std.mem.Allocator, fs_path: []const u8) ![]const u8 {
return normalized;
}
fn isExecutable(file: std.fs.File) !bool {
fn isExecutable(io: std.Io, file: std.Io.File) !bool {
if (builtin.os.tag == .windows) {
// TODO check the ACL on Windows.
// Until this is implemented, this could be a false negative on
@ -205,7 +208,7 @@ fn isExecutable(file: std.fs.File) !bool {
// when unpacking the tarball.
return false;
} else {
const stat = try file.stat();
return (stat.mode & std.posix.S.IXUSR) != 0;
const stat = try file.stat(io);
return stat.kind == .file and (stat.permissions.toMode() & std.posix.S.IXUSR != 0);
}
}

View file

@ -25,23 +25,21 @@ const next_version = std.SemanticVersion.parse(next_version_str) catch unreachab
const zig_version = @import("builtin").zig_version;
const is_next = zig_version.order(next_version) == .eq or zig_version.order(next_version) == .gt;
pub fn main() anyerror!void {
const root_progress_node = std.Progress.start(.{});
pub fn main(init: std.process.Init) anyerror!void {
const io = init.io;
const root_progress_node = std.Progress.start(io, .{});
defer root_progress_node.end();
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
const allocator = init.arena.allocator();
const args = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, args);
var stdout_writer = std.fs.File.stdout().writer(&.{});
const args = try init.minimal.args.toSlice(allocator);
var stdout_writer = std.Io.File.stdout().writer(io, &.{});
const stdout = &stdout_writer.interface;
var output_dir = std.fs.cwd();
defer if (output_dir.fd > 0) output_dir.close();
var models_dir: ?std.fs.Dir = null;
defer if (models_dir) |*m| m.close();
var output_dir = std.Io.Dir.cwd();
defer if (output_dir.handle > 0) output_dir.close(io);
var models_dir: ?std.Io.Dir = null;
defer if (models_dir) |*m| m.close(io);
for (args, 0..) |arg, i| {
if (std.mem.eql(u8, "--help", arg) or
std.mem.eql(u8, "-h", arg))
@ -51,36 +49,40 @@ pub fn main() anyerror!void {
try stdout.print(" --output specifies an output directory, otherwise the current working directory will be used\n", .{});
std.process.exit(0);
}
if (std.mem.eql(u8, "--output", arg))
output_dir = try output_dir.makeOpenPath(args[i + 1], .{});
if (std.mem.eql(u8, "--output", arg)) {
try output_dir.createDirPath(io, args[i + 1]);
output_dir = try std.Io.Dir.cwd().openDir(io, args[i + 1], .{ .iterate = true });
}
if (std.mem.eql(u8, "--models", arg))
models_dir = try std.fs.cwd().openDir(args[i + 1], .{ .iterate = true });
models_dir = try std.Io.Dir.cwd().openDir(io, args[i + 1], .{ .iterate = true });
}
var manifest_file = try output_dir.createFile("service_manifest.zig", .{});
defer manifest_file.close();
var manifest = manifest_file.writer(&manifest_buf).interface;
defer manifest.flush() catch @panic("Could not flush service manifest");
var files_processed: usize = 0;
var skip_next = true;
for (args) |arg| {
if (skip_next) {
skip_next = false;
continue;
}
if (std.mem.eql(u8, "--verbose", arg)) {
verbose = true;
continue;
}
{
var manifest_file = try output_dir.createFile(io, "service_manifest.zig", .{});
defer manifest_file.close(io);
var manifest = manifest_file.writer(io, &manifest_buf).interface;
defer manifest.flush() catch @panic("Could not flush service manifest");
var skip_next = true;
for (args) |arg| {
if (skip_next) {
skip_next = false;
continue;
}
if (std.mem.eql(u8, "--verbose", arg)) {
verbose = true;
continue;
}
if (std.mem.eql(u8, "--models", arg) or
std.mem.eql(u8, "--output", arg))
{
skip_next = true;
continue;
if (std.mem.eql(u8, "--models", arg) or
std.mem.eql(u8, "--output", arg))
{
skip_next = true;
continue;
}
try processFile(io, arg, output_dir, &manifest);
files_processed += 1;
}
try processFile(arg, output_dir, &manifest);
files_processed += 1;
}
if (files_processed == 0) {
// no files specified, look for json files in models directory or cwd
@ -88,21 +90,29 @@ pub fn main() anyerror!void {
// can be made
if (models_dir) |m| {
var cwd = try std.fs.cwd().openDir(".", .{});
defer cwd.close();
defer cwd.setAsCwd() catch unreachable;
var cwd = try std.Io.Dir.cwd().openDir(io, ".", .{});
defer cwd.close(io);
defer std.process.setCurrentDir(io, cwd) catch unreachable;
try m.setAsCwd();
try processDirectories(m, output_dir, &root_progress_node);
try std.process.setCurrentDir(io, m);
try processDirectories(io, m, output_dir, &root_progress_node);
}
}
if (args.len == 0)
_ = try generateServices(allocator, ";", std.fs.File.stdin(), stdout);
_ = try generateServices(allocator, io, ";", std.Io.File.stdin(), stdout);
if (verbose) {
const output_path = try output_dir.realpathAlloc(allocator, ".");
const output_path = try output_dir.realPathFileAlloc(io, ".", allocator);
// Build system suppresses stdout, we have to send this to stderr
std.debug.print("Output path: {s}\n", .{output_path});
std.debug.print(
\\Note: if this is run from within zig build, output from verbose mode will
\\ trigger zig to say 'failed command'. This program has succeeded,
\\ and the message from the build system will not effect actual processing
\\ of the build. It is simply indicative of the build runner detecting
\\ output
, .{});
}
}
@ -110,25 +120,23 @@ const OutputManifest = struct {
model_dir_hash_digest: [Hasher.hex_multihash_len]u8,
output_dir_hash_digest: [Hasher.hex_multihash_len]u8,
};
fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir, parent_progress: *const std.Progress.Node) !void {
fn processDirectories(io: std.Io, models_dir: std.Io.Dir, output_dir: std.Io.Dir, parent_progress: *const std.Progress.Node) !void {
// Let's get ready to hash!!
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
var thread_pool: std.Thread.Pool = undefined;
try thread_pool.init(.{ .allocator = allocator });
defer thread_pool.deinit();
const count, var calculated_manifest =
try calculateDigests(
allocator,
io,
models_dir,
output_dir,
&thread_pool,
);
const output_stored_manifest = if (is_next)
output_dir.readFileAlloc("output_manifest.json", allocator, .unlimited) catch null
output_dir.readFileAlloc(io, "output_manifest.json", allocator, .unlimited) catch null
else
output_dir.readFileAlloc(allocator, "output_manifest.json", std.math.maxInt(usize)) catch null;
output_dir.readFileAlloc(io, allocator, "output_manifest.json", std.math.maxInt(usize)) catch null;
if (output_stored_manifest) |o| {
// we have a stored manifest. Parse it and compare to our calculations
// we can leak as we're using an arena allocator
@ -143,43 +151,43 @@ fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir, parent_pro
}
}
// Do this in a brain dead fashion from here, no optimization
const manifest_file = try output_dir.createFile("service_manifest.zig", .{});
defer manifest_file.close();
var manifest = manifest_file.writer(&manifest_buf);
const manifest_file = try output_dir.createFile(io, "service_manifest.zig", .{});
defer manifest_file.close(io);
var manifest = manifest_file.writer(io, &manifest_buf);
defer manifest.interface.flush() catch @panic("Error flushing service_manifest.zig");
var mi = models_dir.iterate();
const generating_models_progress = parent_progress.start("generating models", count);
defer generating_models_progress.end();
while (try mi.next()) |e| {
while (try mi.next(io)) |e| {
if ((e.kind == .file or e.kind == .sym_link) and std.mem.endsWith(u8, e.name, ".json")) {
try processFile(e.name, output_dir, &manifest.interface);
try processFile(io, e.name, output_dir, &manifest.interface);
generating_models_progress.completeOne();
}
}
// re-calculate so we can store the manifest
model_digest = calculated_manifest.model_dir_hash_digest;
_, calculated_manifest = try calculateDigests(models_dir, output_dir, &thread_pool);
_, calculated_manifest = try calculateDigests(allocator, io, models_dir, output_dir);
const data = try std.fmt.allocPrint(allocator, "{f}", .{std.json.fmt(calculated_manifest, .{ .whitespace = .indent_2 })});
try output_dir.writeFile(.{ .sub_path = "output_manifest.json", .data = data });
try output_dir.writeFile(io, .{ .sub_path = "output_manifest.json", .data = data });
}
var model_digest: ?[Hasher.hex_multihash_len]u8 = null;
fn calculateDigests(models_dir: std.fs.Dir, output_dir: std.fs.Dir, thread_pool: *std.Thread.Pool) !struct { usize, OutputManifest } {
fn calculateDigests(allocator: std.mem.Allocator, io: std.Io, models_dir: std.Io.Dir, output_dir: std.Io.Dir) !struct { usize, OutputManifest } {
const Include = struct {
threadlocal var count: usize = 0;
pub fn include(entry: std.fs.Dir.Walker.Entry) bool {
pub fn include(entry: std.Io.Dir.Walker.Entry) bool {
const included = std.mem.endsWith(u8, entry.basename, ".json");
if (included) count += 1;
return included;
}
};
const model_hash = if (model_digest) |m| m[0..Hasher.digest_len].* else try Hasher.computeDirectoryHash(thread_pool, models_dir, @constCast(&Hasher.ComputeDirectoryOptions{
const model_hash = if (model_digest) |m| m[0..Hasher.digest_len].* else try Hasher.computeDirectoryHash(allocator, io, models_dir, @constCast(&Hasher.ComputeDirectoryOptions{
.isIncluded = Include.include,
.isExcluded = struct {
pub fn exclude(entry: std.fs.Dir.Walker.Entry) bool {
pub fn exclude(entry: std.Io.Dir.Walker.Entry) bool {
_ = entry;
return false;
}
@ -188,14 +196,18 @@ fn calculateDigests(models_dir: std.fs.Dir, output_dir: std.fs.Dir, thread_pool:
}));
if (verbose) std.log.info("Model directory hash: {s}", .{model_digest orelse Hasher.hexDigest(model_hash)});
const output_hash = try Hasher.computeDirectoryHash(thread_pool, try output_dir.openDir(".", .{ .iterate = true }), @constCast(&Hasher.ComputeDirectoryOptions{
const output_hash = try Hasher.computeDirectoryHash(allocator, io, try output_dir.openDir(
io,
".",
.{ .iterate = true },
), @constCast(&Hasher.ComputeDirectoryOptions{
.isIncluded = struct {
pub fn include(entry: std.fs.Dir.Walker.Entry) bool {
pub fn include(entry: std.Io.Dir.Walker.Entry) bool {
return std.mem.endsWith(u8, entry.basename, ".zig");
}
}.include,
.isExcluded = struct {
pub fn exclude(entry: std.fs.Dir.Walker.Entry) bool {
pub fn exclude(entry: std.Io.Dir.Walker.Entry) bool {
_ = entry;
return false;
}
@ -204,13 +216,14 @@ fn calculateDigests(models_dir: std.fs.Dir, output_dir: std.fs.Dir, thread_pool:
}));
if (verbose) std.log.info("Output directory hash: {s}", .{Hasher.hexDigest(output_hash)});
return .{
Include.count, .{
Include.count,
.{
.model_dir_hash_digest = model_digest orelse Hasher.hexDigest(model_hash),
.output_dir_hash_digest = Hasher.hexDigest(output_hash),
},
};
}
fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: *std.Io.Writer) !void {
fn processFile(io: std.Io, file_name: []const u8, output_dir: std.Io.Dir, manifest: *std.Io.Writer) !void {
// It's probably best to create our own allocator here so we can deint at the end and
// toss all allocations related to the services in this file
// I can't guarantee we're not leaking something, and at the end of the
@ -228,7 +241,6 @@ fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: *std.Io.
_ = try writer.write("const smithy = @import(\"smithy\");\n");
_ = try writer.write("const json = @import(\"json\");\n");
_ = try writer.write("const date = @import(\"date\");\n");
_ = try writer.write("const zeit = @import(\"zeit\");\n");
_ = try writer.write("\n");
_ = try writer.write("const serializeMap = json.serializeMap;\n");
_ = try writer.write("\n");
@ -237,6 +249,7 @@ fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: *std.Io.
const service_names = generateServicesForFilePath(
allocator,
io,
";",
file_name,
writer,
@ -267,9 +280,9 @@ fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: *std.Io.
const formatted = try zigFmt(allocator, unformatted);
// Dump our buffer out to disk
var file = try output_dir.createFile(output_file_name, .{ .truncate = true });
defer file.close();
try file.writeAll(formatted);
var file = try output_dir.createFile(io, output_file_name, .{ .truncate = true });
defer file.close(io);
try file.writeStreamingAll(io, formatted);
for (service_names) |name| {
try manifest.print("pub const {s} = @import(\"{s}\");\n", .{ name, std.fs.path.basename(output_file_name) });
@ -288,13 +301,14 @@ fn zigFmt(allocator: std.mem.Allocator, buffer: [:0]const u8) ![]const u8 {
fn generateServicesForFilePath(
allocator: std.mem.Allocator,
io: std.Io,
comptime terminator: []const u8,
path: []const u8,
writer: *std.Io.Writer,
) ![][]const u8 {
const file = try std.fs.cwd().openFile(path, .{});
defer file.close();
return try generateServices(allocator, terminator, file, writer);
const file = try std.Io.Dir.cwd().openFile(io, path, .{});
defer file.close(io);
return try generateServices(allocator, io, terminator, file, writer);
}
fn addReference(id: []const u8, map: *std.StringHashMap(u64)) !void {
@ -396,12 +410,13 @@ fn countReferences(
fn generateServices(
allocator: std.mem.Allocator,
io: std.Io,
comptime _: []const u8,
file: std.fs.File,
file: std.Io.File,
writer: *std.Io.Writer,
) ![][]const u8 {
var fbuf: [1024]u8 = undefined;
var freader = file.reader(&fbuf);
var freader = file.reader(io, &fbuf);
var reader = &freader.interface;
const json = try reader.allocRemaining(allocator, .limited(1024 * 1024 * 1024));
defer allocator.free(json);
@ -422,14 +437,14 @@ fn generateServices(
// a reference count in case there are recursive data structures
var shape_references = std.StringHashMap(u64).init(allocator);
defer shape_references.deinit();
var stack: std.ArrayList([]const u8) = .{};
var stack: std.ArrayList([]const u8) = .empty;
defer stack.deinit(allocator);
for (services.items) |service|
try countReferences(allocator, service, shapes, &shape_references, &stack);
var constant_names = try std.ArrayList([]const u8).initCapacity(allocator, services.items.len);
defer constant_names.deinit(allocator);
var unresolved: std.ArrayList(smithy.ShapeInfo) = .{};
var unresolved: std.ArrayList(smithy.ShapeInfo) = .empty;
defer unresolved.deinit(allocator);
var generated = std.StringHashMap(void).init(allocator);
defer generated.deinit();
@ -519,7 +534,7 @@ fn generateAdditionalTypes(allocator: std.mem.Allocator, file_state: FileGenerat
while (file_state.additional_types_to_generate.pop()) |t| {
if (file_state.additional_types_generated.getEntry(t.name) != null) continue;
// std.log.info("\t\t{s}", .{t.name});
var type_stack: std.ArrayList(*const smithy.ShapeInfo) = .{};
var type_stack: std.ArrayList(*const smithy.ShapeInfo) = .empty;
defer type_stack.deinit(allocator);
const state = GenerationState{
.type_stack = &type_stack,
@ -570,7 +585,7 @@ fn generateOperation(allocator: std.mem.Allocator, operation: smithy.ShapeInfo,
const snake_case_name = try support.constantName(allocator, operation.name, .snake);
defer allocator.free(snake_case_name);
var type_stack: std.ArrayList(*const smithy.ShapeInfo) = .{};
var type_stack: std.ArrayList(*const smithy.ShapeInfo) = .empty;
defer type_stack.deinit(allocator);
const state = GenerationState{
.type_stack = &type_stack,
@ -578,8 +593,7 @@ fn generateOperation(allocator: std.mem.Allocator, operation: smithy.ShapeInfo,
.allocator = allocator,
.indent_level = 1,
};
var child_state = state;
child_state.indent_level += 1;
const child_state = state.indent();
// indent should start at 4 spaces here
const operation_name = avoidReserved(snake_case_name);
@ -680,17 +694,17 @@ fn generateMetadataFunction(operation_name: []const u8, state: GenerationState,
// }
// We want to add a short "get my parents" function into the response
var child_state = state;
child_state.indent_level += 1;
child_state = child_state.indent();
try outputIndent(child_state, writer);
_ = try writer.write("pub fn metaInfo() struct { ");
try writer.print("service_metadata: @TypeOf(service_metadata), action: @TypeOf({s})", .{operation_name});
_ = try writer.write(" } {\n");
child_state.indent_level += 1;
child_state = child_state.indent();
try outputIndent(child_state, writer);
_ = try writer.write("return .{ .service_metadata = service_metadata, ");
try writer.print(".action = {s}", .{operation_name});
_ = try writer.write(" };\n");
child_state.indent_level -= 1;
child_state = child_state.deindent();
try outputIndent(child_state, writer);
_ = try writer.write("}\n");
try outputIndent(state, writer);
@ -866,8 +880,7 @@ fn generateMapTypeFor(map: anytype, writer: *std.Io.Writer, state: GenerationSta
try writer.writeAll("pub const is_map_type = true;\n\n");
var child_state = state;
child_state.indent_level += 1;
const child_state = state.indent();
_ = try writer.write("key: ");
_ = try generateTypeFor(map.key, writer, child_state, options.endStructure(true));
@ -916,8 +929,7 @@ fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, ty
// prolog. We'll rely on caller to get the spacing correct here
_ = try writer.write(type_type_name);
_ = try writer.write(" {\n");
var child_state = state;
child_state.indent_level += 1;
const child_state = state.indent();
var payload: ?[]const u8 = null;
for (members) |member| {
// This is our mapping
@ -1004,8 +1016,7 @@ fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, ty
try writer.writeByte('\n');
try outputIndent(child_state, writer);
_ = try writer.write("pub fn fieldNameFor(_: @This(), comptime field_name: []const u8) []const u8 {\n");
var grandchild_state = child_state;
grandchild_state.indent_level += 1;
const grandchild_state = child_state.indent();
// We need to force output here becaseu we're referencing the field in the return statement below
try writeMappings(grandchild_state, "", "mappings", field_name_mappings, true, writer);
try outputIndent(grandchild_state, writer);
@ -1031,8 +1042,7 @@ fn writeMappings(
}
try writer.print("{s}const {s} = .", .{ @"pub", mapping_name });
_ = try writer.write("{\n");
var child_state = state;
child_state.indent_level += 1;
const child_state = state.indent();
for (mappings.items) |mapping| {
try outputIndent(child_state, writer);
try writer.print(".{s} = \"{s}\",\n", .{ avoidReserved(mapping.snake), mapping.original });

View file

@ -87,7 +87,7 @@ fn getJsonMembers(allocator: Allocator, shape: Shape, state: GenerationState) !?
return null;
}
var json_members = std.ArrayListUnmanaged(JsonMember){};
var json_members: std.ArrayListUnmanaged(JsonMember) = .empty;
var iter = hash_map.iterator();
while (iter.next()) |kvp| {

View file

@ -6,8 +6,8 @@
.dependencies = .{
.aws = .{
.url = "git+https://git.lerch.org/lobo/aws-sdk-for-zig.git?ref=master#efdef66fdbb2500d33a79a0b8d1855dd1bb20d56",
.hash = "aws-0.0.1-SbsFcLgtCgAndtGhoOyzQfmFtUux4tadFZv0tC6TAnL8",
.url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/2fddf4f122198ba64fbb2320e702b317b0b86837/2fddf4f122198ba64fbb2320e702b317b0b86837-with-models.tar.gz",
.hash = "aws-0.0.1-SbsFcLhoAwQ5TcclMwXhIljwW0Zz_Kcjd4yrIeQq5uHt",
},
},
}

View file

@ -11,12 +11,11 @@ pub const std_options: std.Options = .{
},
};
pub fn main() anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
pub fn main(init: std.process.Init) anyerror!void {
const allocator = init.gpa;
const io = init.io;
var stdout_buffer: [1024]u8 = undefined;
var stdout_raw = std.fs.File.stdout().writer(&stdout_buffer);
var stdout_raw = std.Io.File.stdout().writer(io, &stdout_buffer);
const stdout = &stdout_raw.interface;
defer stdout.flush() catch unreachable;
@ -28,7 +27,7 @@ pub fn main() anyerror!void {
// };
//
// var client = aws.Client.init(allocator, .{ .proxy = proxy });
var client = aws.Client.init(allocator, .{});
var client = aws.Client.init(allocator, .{ .io = io, .map = init.environ_map });
defer client.deinit();
const options = aws.Options{

View file

@ -5,8 +5,8 @@
.minimum_zig_version = "0.14.0",
.dependencies = .{
.zeit = .{
.url = "git+https://github.com/rockorager/zeit?ref=zig-0.15#ed2ca60db118414bda2b12df2039e33bad3b0b88",
.hash = "zeit-0.6.0-5I6bk0J9AgCVa0nnyL0lNY9Xa9F68hHq-ZarhuXNV-Jb",
.url = "git+https://github.com/rockorager/zeit#2a79678e05e4e82cd4efd4fd6b754dcf029c3a64",
.hash = "zeit-0.6.0-5I6bk7q6AgBdMJxze3D4l9ylQhkviQ_BX9FigDt13MFn",
},
.json = .{
.path = "../json",

View file

@ -1,6 +1,7 @@
const std = @import("std");
const log = std.log.scoped(.date);
const zeit = @import("zeit");
const instantWithoutIo = @import("timestamp.zig").instantWithoutIo;
pub const DateTime = struct {
day: u8,
@ -37,12 +38,13 @@ pub const DateTime = struct {
}
pub fn instant(self: DateTime) !zeit.Instant {
return try zeit.instant(.{ .source = .{ .time = self.time() } });
return try instantWithoutIo(.{ .source = .{ .time = self.time() } });
}
};
pub fn timestampToDateTime(timestamp: zeit.Seconds) DateTime {
const ins = zeit.instant(.{ .source = .{ .unix_timestamp = timestamp } }) catch @panic("Failed to create instant from timestamp");
pub fn timestampToDateTime(timestamp: i64) DateTime {
// zeit.Seconds is i64, so this should be identical
const ins = instantWithoutIo(.{ .source = .{ .unix_timestamp = timestamp } }) catch @panic("Failed to create instant from timestamp");
return DateTime.fromInstant(ins);
}
@ -53,7 +55,7 @@ pub fn parseEnglishToTimestamp(data: []const u8) !i64 {
/// Converts a string to a timestamp value. May not handle dates before the
/// epoch. Dates should look like "Fri, 03 Jun 2022 18:12:36 GMT"
pub fn parseEnglishToDateTime(data: []const u8) !DateTime {
const ins = try zeit.instant(.{ .source = .{ .rfc1123 = data } });
const ins = try instantWithoutIo(.{ .source = .{ .rfc1123 = data } });
return DateTime.fromInstant(ins);
}
@ -64,7 +66,7 @@ pub fn parseIso8601ToTimestamp(data: []const u8) !i64 {
/// Converts a string to a timestamp value. May not handle dates before the
/// epoch
pub fn parseIso8601ToDateTime(data: []const u8) !DateTime {
const ins = try zeit.instant(.{ .source = .{ .iso8601 = data } });
const ins = try instantWithoutIo(.{ .source = .{ .iso8601 = data } });
return DateTime.fromInstant(ins);
}
@ -83,8 +85,10 @@ fn printDateTime(dt: DateTime) void {
});
}
pub fn printNowUtc() void {
printDateTime(timestampToDateTime(std.time.timestamp()));
pub fn printNowUtc(io: std.Io) void {
const now = std.Io.Clock.Timestamp.now(io, .awake);
const timestamp = @as(i64, @intCast(@divFloor(now.raw.nanoseconds, std.time.ns_per_s)));
printDateTime(timestampToDateTime(timestamp));
}
test "Convert timestamp to datetime" {

View file

@ -10,7 +10,7 @@ pub const Timestamp = enum(zeit.Nanoseconds) {
_,
pub fn jsonStringify(value: Timestamp, jw: anytype) !void {
const instant = zeit.instant(.{
const instant = instantWithoutIo(.{
.source = .{
.unix_nano = @intFromEnum(value),
},
@ -34,7 +34,7 @@ pub const Timestamp = enum(zeit.Nanoseconds) {
}
};
const ins = try zeit.instant(.{
const ins = try instantWithoutIo(.{
.source = switch (date_format) {
DateFormat.iso8601 => .{
.iso8601 = val,
@ -49,6 +49,36 @@ pub const Timestamp = enum(zeit.Nanoseconds) {
}
};
/// create a new Instant
pub fn instantWithoutIo(cfg: zeit.Instant.Config) !zeit.Instant {
const ts: zeit.Nanoseconds = switch (cfg.source) {
.now => return error.UseZeitInstantWithIoForNowInstants,
.unix_timestamp => |unix| @as(i128, unix) * std.time.ns_per_s,
.unix_nano => |nano| nano,
.time => |time| time.instant().timestamp,
.iso8601,
.rfc3339,
=> |iso| blk: {
const t = try zeit.Time.fromISO8601(iso);
break :blk t.instant().timestamp;
},
.rfc2822,
.rfc5322,
=> |eml| blk: {
const t = try zeit.Time.fromRFC5322(eml);
break :blk t.instant().timestamp;
},
.rfc1123 => |http_date| blk: {
const t = try zeit.Time.fromRFC1123(http_date);
break :blk t.instant().timestamp;
},
};
return .{
.timestamp = ts,
.timezone = cfg.timezone,
};
}
test Timestamp {
const in_date = "Wed, 23 Apr 2025 11:23:45 GMT";

View file

@ -1346,7 +1346,7 @@ test "json.validate" {
const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
const ArrayList = std.ArrayList;
const StringArrayHashMap = std.StringArrayHashMap;
const StringArrayHashMap = std.array_hash_map.String;
pub const ValueTree = struct {
arena: ArenaAllocator,
@ -1580,11 +1580,11 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
if (!numberToken.is_integer) {
// probably is in scientific notation
const n = try std.fmt.parseFloat(f128, numberToken.slice(tokens.slice, tokens.i - 1));
return try std.meta.intToEnum(T, @as(i128, @intFromFloat(n)));
return std.enums.fromInt(T, @as(i128, @intFromFloat(n))) orelse error.InvalidEnumTag;
}
const n = try std.fmt.parseInt(enumInfo.tag_type, numberToken.slice(tokens.slice, tokens.i - 1), 10);
return try std.meta.intToEnum(T, n);
return std.enums.fromInt(T, n) orelse error.InvalidEnumTag;
},
.String => |stringToken| {
const source_slice = stringToken.slice(tokens.slice, tokens.i - 1);
@ -1772,7 +1772,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
.slice => {
switch (token) {
.ArrayBegin => {
var arraylist = std.ArrayList(ptrInfo.child){};
var arraylist = std.ArrayList(ptrInfo.child).empty;
errdefer {
while (arraylist.pop()) |v| {
parseFree(ptrInfo.child, v, options);
@ -1817,7 +1817,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
if (key_type == null) return error.UnexpectedToken;
const value_type = typeForField(ptrInfo.child, "value");
if (value_type == null) return error.UnexpectedToken;
var arraylist = std.ArrayList(ptrInfo.child){};
var arraylist = std.ArrayList(ptrInfo.child).empty;
errdefer {
while (arraylist.pop()) |v| {
parseFree(ptrInfo.child, v, options);

View file

@ -4,7 +4,6 @@ const std = @import("std");
const case = @import("case");
const date = @import("date");
const json = @import("json");
const zeit = @import("zeit");
const credentials = @import("aws_credentials.zig");
const awshttp = @import("aws_http.zig");
@ -114,6 +113,8 @@ pub const Services = servicemodel.Services;
pub const ClientOptions = struct {
proxy: ?std.http.Client.Proxy = null,
io: std.Io,
map: *const std.process.Environ.Map,
};
pub const Client = struct {
allocator: std.mem.Allocator,
@ -124,7 +125,7 @@ pub const Client = struct {
pub fn init(allocator: std.mem.Allocator, options: ClientOptions) Self {
return Self{
.allocator = allocator,
.aws_http = awshttp.AwsHttp.init(allocator, options.proxy),
.aws_http = awshttp.AwsHttp.init(allocator, options.io, options.map, options.proxy),
};
}
pub fn deinit(self: *Client) void {
@ -195,7 +196,7 @@ pub fn Request(comptime request_action: anytype) type {
log.debug("Rest method: '{s}'", .{aws_request.method});
log.debug("Rest success code: '{d}'", .{Action.http_config.success_code});
log.debug("Rest raw uri: '{s}'", .{Action.http_config.uri});
var al = std.ArrayList([]const u8){};
var al = std.ArrayList([]const u8).empty;
defer al.deinit(options.client.allocator);
aws_request.path = try buildPath(
options.client.allocator,
@ -480,9 +481,7 @@ pub fn Request(comptime request_action: anytype) type {
) catch |e| {
log.err("Could not set header value: Response header {s}. Field {s}. Value {s}", .{ header.name, f.?.name, header.value });
log.err("Error: {}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
std.debug.dumpCurrentStackTrace(.{});
};
break;
@ -1082,45 +1081,37 @@ fn ServerResponse(comptime action: anytype) type {
const ResponseMetadata = struct {
RequestId: []u8,
};
const Result = @Type(.{
.@"struct" = .{
.layout = .auto,
.fields = &[_]std.builtin.Type.StructField{
.{
.name = action.action_name ++ "Result",
.type = T,
.default_value_ptr = null,
.is_comptime = false,
.alignment = std.meta.alignment(T),
},
.{
.name = "ResponseMetadata",
.type = ResponseMetadata,
.default_value_ptr = null,
.is_comptime = false,
.alignment = std.meta.alignment(ResponseMetadata),
},
const Result = @Struct(
.auto,
null,
&[_][]const u8{ action.action_name ++ "Result", "ResponseMetadata" },
&[_]type{ T, ResponseMetadata },
&[_]std.builtin.Type.StructField.Attributes{
.{
.default_value_ptr = null,
.@"comptime" = false,
.@"align" = std.meta.alignment(T),
},
.decls = &[_]std.builtin.Type.Declaration{},
.is_tuple = false,
},
});
return @Type(.{
.@"struct" = .{
.layout = .auto,
.fields = &[_]std.builtin.Type.StructField{
.{
.name = action.action_name ++ "Response",
.type = Result,
.default_value_ptr = null,
.is_comptime = false,
.alignment = std.meta.alignment(Result),
},
.{
.default_value_ptr = null,
.@"comptime" = false,
.@"align" = std.meta.alignment(ResponseMetadata),
},
.decls = &[_]std.builtin.Type.Declaration{},
.is_tuple = false,
},
});
);
return @Struct(
.auto,
null,
&[_][]const u8{action.action_name ++ "Response"},
&[_]type{Result},
&[_]std.builtin.Type.StructField.Attributes{
.{
.default_value_ptr = null,
.@"comptime" = false,
.@"align" = std.meta.alignment(Result),
},
},
);
}
fn FullResponse(comptime action: anytype) type {
return struct {
@ -1441,6 +1432,7 @@ fn reportTraffic(
test {
_ = @import("aws_test.zig");
_ = @import("servicemodel.zig");
}
// buildQuery/buildPath tests, which are here as they are a) generic and b) private
@ -1480,7 +1472,7 @@ test "REST Json v1 serializes lists in queries" {
}
test "REST Json v1 buildpath substitutes" {
const allocator = std.testing.allocator;
var al = std.ArrayList([]const u8){};
var al = std.ArrayList([]const u8).empty;
defer al.deinit(allocator);
const svs = Services(.{.lambda}){};
const request = svs.lambda.list_functions.Request{
@ -1493,7 +1485,7 @@ test "REST Json v1 buildpath substitutes" {
}
test "REST Json v1 buildpath handles restricted characters" {
const allocator = std.testing.allocator;
var al = std.ArrayList([]const u8){};
var al = std.ArrayList([]const u8).empty;
defer al.deinit(allocator);
const svs = Services(.{.lambda}){};
const request = svs.lambda.list_functions.Request{

View file

@ -82,18 +82,18 @@ pub const Options = struct {
pub var static_credentials: ?auth.Credentials = null;
pub fn getCredentials(allocator: std.mem.Allocator, options: Options) !auth.Credentials {
pub fn getCredentials(allocator: std.mem.Allocator, map: *const std.process.Environ.Map, io: std.Io, options: Options) !auth.Credentials {
if (static_credentials) |c| return c;
if (options.profile.prefer_profile_from_file) {
log.debug(
"Command line profile specified. Checking credentials file first. Profile name {s}",
.{options.profile.profile_name orelse "default"},
);
if (try getProfileCredentials(allocator, options.profile)) |cred| return cred;
if (try getProfileCredentials(allocator, io, map, options.profile)) |cred| return cred;
// Profile not found. We'll mirror the cli here and bail early
return error.CredentialsNotFound;
}
if (try getEnvironmentCredentials(allocator)) |cred| {
if (try getEnvironmentCredentials(allocator, map)) |cred| {
log.debug("Found credentials in environment. Access key: {s}", .{cred.access_key});
return cred;
}
@ -101,32 +101,31 @@ pub fn getCredentials(allocator: std.mem.Allocator, options: Options) !auth.Cred
// GetWebIdentity is not currently implemented. The rest are tested and gtg
// Note: Lambda just sets environment variables
if (try getWebIdentityToken(allocator)) |cred| return cred;
if (try getProfileCredentials(allocator, options.profile)) |cred| return cred;
if (try getProfileCredentials(allocator, io, map, options.profile)) |cred| return cred;
if (try getContainerCredentials(allocator)) |cred| return cred;
if (try getContainerCredentials(allocator, io, map)) |cred| return cred;
// I don't think we need v1 at all?
if (try getImdsv2Credentials(allocator)) |cred| return cred;
if (try getImdsv2Credentials(allocator, io)) |cred| return cred;
return error.CredentialsNotFound;
}
fn getEnvironmentCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
const secret_key = (try getEnvironmentVariable(allocator, "AWS_SECRET_ACCESS_KEY")) orelse return null;
defer allocator.free(secret_key); //yes, we're not zeroing. But then, the secret key is in an environment var anyway
fn getEnvironmentCredentials(allocator: std.mem.Allocator, map: *const std.process.Environ.Map) !?auth.Credentials {
const secret_key = getEnvironmentVariable(map, "AWS_SECRET_ACCESS_KEY") orelse return null;
const access_key = getEnvironmentVariable(map, "AWS_ACCESS_KEY_ID") orelse return null;
const token = getEnvironmentVariable(map, "AWS_SESSION_TOKEN") orelse
getEnvironmentVariable(map, "AWS_SECURITY_TOKEN"); // Security token is backward compat only
// Use cross-platform API (requires allocation)
return auth.Credentials.init(
allocator,
(try getEnvironmentVariable(allocator, "AWS_ACCESS_KEY_ID")) orelse return null,
try allocator.dupe(u8, access_key),
try allocator.dupe(u8, secret_key),
(try getEnvironmentVariable(allocator, "AWS_SESSION_TOKEN")) orelse
try getEnvironmentVariable(allocator, "AWS_SECURITY_TOKEN"), // Security token is backward compat only
if (token) |t| try allocator.dupe(u8, t) else null,
);
}
fn getEnvironmentVariable(allocator: std.mem.Allocator, key: []const u8) !?[]const u8 {
return std.process.getEnvVarOwned(allocator, key) catch |e| switch (e) {
std.process.GetEnvVarOwnedError.EnvironmentVariableNotFound => return null,
else => return e,
};
fn getEnvironmentVariable(map: *const std.process.Environ.Map, key: []const u8) ?[]const u8 {
if (!map.contains(key)) return null;
return map.get(key);
}
fn getWebIdentityToken(allocator: std.mem.Allocator) !?auth.Credentials {
@ -139,7 +138,7 @@ fn getWebIdentityToken(allocator: std.mem.Allocator) !?auth.Credentials {
// TODO: implement
return null;
}
fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
fn getContainerCredentials(allocator: std.mem.Allocator, io: std.Io, map: *const std.process.Environ.Map) !?auth.Credentials {
// A note on testing: The best way I have found to test this process is
// the following. Setup an ECS Fargate cluster and create a task definition
// with the command ["/bin/bash","-c","while true; do sleep 10; done"].
@ -180,12 +179,11 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
//
// Compile code, copy to S3, install AWS CLI within the session, download
// from s3 and run
const container_relative_uri = (try getEnvironmentVariable(allocator, "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")) orelse return null;
defer allocator.free(container_relative_uri);
const container_relative_uri = getEnvironmentVariable(map, "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") orelse return null;
const container_uri = try std.fmt.allocPrint(allocator, "http://169.254.170.2{s}", .{container_relative_uri});
defer allocator.free(container_uri);
var cl = std.http.Client{ .allocator = allocator };
var cl = std.http.Client{ .allocator = allocator, .io = io };
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
var aw: std.Io.Writer.Allocating = .init(allocator);
defer aw.deinit();
@ -214,9 +212,7 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
const res = std.json.parseFromSlice(CredsResponse, allocator, aw.written(), .{}) catch |e| {
log.err("Unexpected Json response from container credentials endpoint: {s}", .{aw.written()});
log.err("Error parsing json: {}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
std.debug.dumpCurrentStackTrace(.{});
return null;
};
@ -232,10 +228,10 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
);
}
fn getImdsv2Credentials(allocator: std.mem.Allocator) !?auth.Credentials {
fn getImdsv2Credentials(allocator: std.mem.Allocator, io: std.Io) !?auth.Credentials {
var token: ?[]u8 = null;
defer if (token) |t| allocator.free(t);
var cl = std.http.Client{ .allocator = allocator };
var cl = std.http.Client{ .allocator = allocator, .io = io };
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
// Get token
{
@ -312,9 +308,7 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, aw.written(), .{}) catch |e| {
log.err("Unexpected Json response from IMDS endpoint: {s}", .{aw.written()});
log.err("Error parsing json: {}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
std.debug.dumpCurrentStackTrace(.{});
return null;
};
defer imds_response.deinit();
@ -367,10 +361,7 @@ fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, ro
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, aw.written(), .{}) catch |e| {
log.err("Unexpected Json response from IMDS endpoint: {s}", .{aw.written()});
log.err("Error parsing json: {}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
std.debug.dumpCurrentStackTrace(.{});
return null;
};
defer imds_response.deinit();
@ -397,12 +388,13 @@ fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, ro
}
fn getProfileCredentials(allocator: std.mem.Allocator, options: Profile) !?auth.Credentials {
fn getProfileCredentials(allocator: std.mem.Allocator, io: std.Io, map: *const std.process.Environ.Map, options: Profile) !?auth.Credentials {
var default_path: ?[]const u8 = null;
defer if (default_path) |p| allocator.free(p);
const creds_file_path = try filePath(
allocator,
map,
options.credential_file,
"AWS_SHARED_CREDENTIALS_FILE",
default_path,
@ -412,6 +404,7 @@ fn getProfileCredentials(allocator: std.mem.Allocator, options: Profile) !?auth.
default_path = default_path orelse creds_file_path.home;
const config_file_path = try filePath(
allocator,
map,
options.config_file,
"AWS_CONFIG_FILE",
default_path,
@ -421,22 +414,22 @@ fn getProfileCredentials(allocator: std.mem.Allocator, options: Profile) !?auth.
default_path = default_path orelse config_file_path.home;
// Get active profile
const profile = (try getEnvironmentVariable(allocator, "AWS_PROFILE")) orelse
try allocator.dupe(u8, options.profile_name orelse "default");
const profile = try allocator.dupe(u8, getEnvironmentVariable(map, "AWS_PROFILE") orelse
options.profile_name orelse "default");
defer allocator.free(profile);
log.debug("Looking for file credentials using profile '{s}'", .{profile});
log.debug("Checking credentials file: {s}", .{creds_file_path.evaluated_path});
const credentials_file = std.fs.openFileAbsolute(creds_file_path.evaluated_path, .{}) catch null;
defer if (credentials_file) |f| f.close();
const credentials_file = std.Io.Dir.openFileAbsolute(io, creds_file_path.evaluated_path, .{}) catch null;
defer if (credentials_file) |f| f.close(io);
// It's much more likely that we'll find credentials in the credentials file
// so we'll try that first
const creds_file_creds = try credsForFile(allocator, credentials_file, profile);
const creds_file_creds = try credsForFile(allocator, io, credentials_file, profile);
var conf_file_creds = PartialCredentials{};
if (creds_file_creds.access_key == null or creds_file_creds.secret_key == null) {
log.debug("Checking config file: {s}", .{config_file_path.evaluated_path});
const config_file = std.fs.openFileAbsolute(creds_file_path.evaluated_path, .{}) catch null;
defer if (config_file) |f| f.close();
conf_file_creds = try credsForFile(allocator, config_file, profile);
const config_file = std.Io.Dir.openFileAbsolute(io, creds_file_path.evaluated_path, .{}) catch null;
defer if (config_file) |f| f.close(io);
conf_file_creds = try credsForFile(allocator, io, config_file, profile);
}
const access_key = keyFrom(allocator, creds_file_creds.access_key, conf_file_creds.access_key);
const secret_key = keyFrom(allocator, creds_file_creds.secret_key, conf_file_creds.secret_key);
@ -475,10 +468,10 @@ const PartialCredentials = struct {
access_key: ?[]const u8 = null,
secret_key: ?[]const u8 = null,
};
fn credsForFile(allocator: std.mem.Allocator, file: ?std.fs.File, profile: []const u8) !PartialCredentials {
fn credsForFile(allocator: std.mem.Allocator, io: std.Io, file: ?std.Io.File, profile: []const u8) !PartialCredentials {
if (file == null) return PartialCredentials{};
var fbuf: [1024]u8 = undefined;
var freader = file.?.reader(&fbuf);
var freader = file.?.reader(io, &fbuf);
var reader = &freader.interface;
const text = try reader.allocRemaining(allocator, .unlimited);
defer allocator.free(text);
@ -610,6 +603,7 @@ fn trimmed(text: []const u8, start: ?usize, end: ?usize) []const u8 {
fn filePath(
allocator: std.mem.Allocator,
map: *const std.process.Environ.Map,
specified_path: ?[]const u8,
env_var_name: []const u8,
config_dir: ?[]const u8,
@ -617,39 +611,28 @@ fn filePath(
) !EvaluatedPath {
if (specified_path) |p| return EvaluatedPath{ .evaluated_path = try allocator.dupe(u8, p) };
// Not specified. Check environment variable, otherwise, hard coded default
if (try getEnvironmentVariable(allocator, env_var_name)) |v| return EvaluatedPath{ .evaluated_path = v };
if (getEnvironmentVariable(map, env_var_name)) |v| return EvaluatedPath{ .evaluated_path = try allocator.dupe(u8, v) };
// Not in environment variable either. Go fish
return try getDefaultPath(allocator, config_dir, ".aws", config_file_name);
return try getDefaultPath(allocator, map, config_dir, ".aws", config_file_name);
}
const EvaluatedPath = struct {
home: ?[]const u8 = null,
evaluated_path: []const u8,
};
fn getDefaultPath(allocator: std.mem.Allocator, home_dir: ?[]const u8, dir: []const u8, file: []const u8) !EvaluatedPath {
const home = home_dir orelse try getHomeDir(allocator);
fn getDefaultPath(allocator: std.mem.Allocator, map: *const std.process.Environ.Map, home_dir: ?[]const u8, dir: []const u8, file: []const u8) !EvaluatedPath {
const home = home_dir orelse try getHomeDir(allocator, map);
log.debug("Home directory: {s}", .{home});
const rc = try std.fs.path.join(allocator, &[_][]const u8{ home, dir, file });
log.debug("Path evaluated as: {s}", .{rc});
return EvaluatedPath{ .home = home, .evaluated_path = rc };
}
fn getHomeDir(allocator: std.mem.Allocator) ![]const u8 {
switch (builtin.os.tag) {
.windows => {
return std.process.getEnvVarOwned(allocator, "USERPROFILE") catch |err| switch (err) {
error.OutOfMemory => |e| return e,
else => return error.HomeDirUnavailable,
};
},
.macos, .linux, .freebsd, .netbsd, .dragonfly, .openbsd, .solaris => {
const home_dir = std.posix.getenv("HOME") orelse {
// TODO look in /etc/passwd
return error.HomeDirUnavailable;
};
return allocator.dupe(u8, home_dir);
},
fn getHomeDir(allocator: std.mem.Allocator, map: *const std.process.Environ.Map) ![]const u8 {
const env_key = switch (builtin.os.tag) {
.windows => "USERPROFILE",
.macos, .linux, .freebsd, .netbsd, .dragonfly, .openbsd, .illumos => "HOME",
// Code from https://github.com/ziglang/zig/blob/9f9f215305389c08a21730859982b68bf2681932/lib/std/fs/get_app_data_dir.zig
// be_user_settings magic number is probably different for home directory
// .haiku => {
@ -665,17 +648,26 @@ fn getHomeDir(allocator: std.mem.Allocator) ![]const u8 {
// }
// },
else => @compileError("Unsupported OS"),
}
};
return try allocator.dupe(u8, getEnvironmentVariable(map, env_key) orelse return error.HomeDirUnavailable);
}
test "filePath" {
const allocator = std.testing.allocator;
var map = std.process.Environ.Map.init(allocator);
defer map.deinit();
try map.put("USERPROFILE", "c:\\users\\myuser");
try map.put("HOME", "/home/user");
// std.testing.log_level = .debug;
// log.debug("\n", .{});
const path = try filePath(allocator, null, "NOTHING", null, "hello");
const path = try filePath(allocator, &map, null, "NOTHING", null, "hello");
defer allocator.free(path.evaluated_path);
defer allocator.free(path.home.?);
try std.testing.expect(path.evaluated_path.len > 10);
// try std.testing.expect(path.evaluated_path.len > 10);
if (builtin.os.tag == .windows)
try std.testing.expectEqualStrings("c:\\users\\myuser\\.aws\\hello", path.evaluated_path)
else
try std.testing.expectEqualStrings("/home/user/.aws/hello", path.evaluated_path);
try std.testing.expectEqualStrings("hello", path.evaluated_path[path.evaluated_path.len - 5 ..]);
try std.testing.expect(path.home != null);
}

View file

@ -103,6 +103,7 @@ pub const Mock = struct {
context: usize = 0,
request_fn: *const fn (
usize,
std.Io,
std.http.Method,
std.Uri,
std.http.Client.RequestOptions,
@ -111,8 +112,8 @@ pub const Mock = struct {
receive_head: *const fn (usize) std.http.Client.Request.ReceiveHeadError!std.http.Client.Response,
reader_decompressing: *const fn (usize) *std.Io.Reader,
fn request(m: Mock, method: std.http.Method, uri: std.Uri, options: std.http.Client.RequestOptions) std.http.Client.RequestError!std.http.Client.Request {
return m.request_fn(m.context, method, uri, options);
fn request(m: Mock, io: std.Io, method: std.http.Method, uri: std.Uri, options: std.http.Client.RequestOptions) std.http.Client.RequestError!std.http.Client.Request {
return m.request_fn(m.context, io, method, uri, options);
}
fn sendBodyComplete(m: Mock, body: []u8) std.Io.Writer.Error!void {
return m.send_body_complete(m.context, body);
@ -146,13 +147,17 @@ const EndPoint = struct {
pub const AwsHttp = struct {
allocator: std.mem.Allocator,
proxy: ?std.http.Client.Proxy,
io: std.Io,
map: *const std.process.Environ.Map,
const Self = @This();
pub fn init(allocator: std.mem.Allocator, proxy: ?std.http.Client.Proxy) Self {
pub fn init(allocator: std.mem.Allocator, io: std.Io, map: *const std.process.Environ.Map, proxy: ?std.http.Client.Proxy) Self {
return Self{
.allocator = allocator,
.proxy = proxy,
.io = io,
.map = map,
// .credentialsProvider = // creds provider could be useful
};
}
@ -184,11 +189,11 @@ pub const AwsHttp = struct {
// S3 control uses <account-id>.s3-control.<region>.amazonaws.com
//
// So this regionSubDomain call needs to handle generic customization
const endpoint = try endpointForRequest(self.allocator, service, request, options);
const endpoint = try endpointForRequest(self.allocator, service, request, self.map, options);
defer endpoint.deinit();
log.debug("Calling endpoint {s}", .{endpoint.uri});
// TODO: Should we allow customization here?
const creds = try credentials.getCredentials(self.allocator, options.credential_options);
const creds = try credentials.getCredentials(self.allocator, self.map, self.io, options.credential_options);
defer creds.deinit();
const signing_config: signing.Config = .{
.region = getRegion(service, options.region),
@ -236,21 +241,21 @@ pub const AwsHttp = struct {
// We will use endpoint instead
request_cp.path = endpoint.path;
var request_headers = std.ArrayList(std.http.Header){};
var request_headers: std.ArrayList(std.http.Header) = .empty;
defer request_headers.deinit(self.allocator);
const len = try addHeaders(self.allocator, &request_headers, endpoint.host, request_cp.body, request_cp.content_type, request_cp.headers);
defer if (len) |l| self.allocator.free(l);
request_cp.headers = request_headers.items;
if (signing_config) |opts| request_cp = try signing.signRequest(self.allocator, request_cp, opts);
if (signing_config) |opts| request_cp = try signing.signRequest(self.allocator, self.io, request_cp, opts);
defer {
if (signing_config) |opts| {
signing.freeSignedRequest(self.allocator, &request_cp, opts);
}
}
var headers = std.ArrayList(std.http.Header){};
var headers: std.ArrayList(std.http.Header) = .empty;
defer headers.deinit(self.allocator);
for (request_cp.headers) |header|
try headers.append(self.allocator, .{ .name = header.name, .value = header.value });
@ -263,7 +268,7 @@ pub const AwsHttp = struct {
defer self.allocator.free(url);
log.debug("Request url: {s}", .{url});
// TODO: Fix this proxy stuff. This is all a kludge just to compile, but std.http.Client has it all built in now
var cl = std.http.Client{ .allocator = self.allocator, .https_proxy = if (self.proxy) |*p| @constCast(p) else null };
var cl = std.http.Client{ .allocator = self.allocator, .io = self.io, .https_proxy = if (self.proxy) |*p| @constCast(p) else null };
defer cl.deinit(); // TODO: Connection pooling
const method = std.meta.stringToEnum(std.http.Method, request_cp.method).?;
@ -285,7 +290,7 @@ pub const AwsHttp = struct {
};
var req = if (options.mock) |m|
try m.request(method, uri, req_options) // This will call the test harness
try m.request(self.io, method, uri, req_options) // This will call the test harness
else
try cl.request(method, uri, req_options);
defer req.deinit();
@ -326,7 +331,7 @@ pub const AwsHttp = struct {
.{ @intFromEnum(response.head.status), response.head.status.phrase() },
);
log.debug("Response headers:", .{});
var resp_headers = std.ArrayList(Header){};
var resp_headers: std.ArrayList(Header) = .empty;
defer resp_headers.deinit(self.allocator);
var it = response.head.iterateHeaders();
while (it.next()) |h| { // even though we don't expect to fill the buffer,
@ -367,6 +372,7 @@ pub const AwsHttp = struct {
.body = try aw.toOwnedSlice(),
.headers = try resp_headers.toOwnedSlice(self.allocator),
.allocator = self.allocator,
.io = self.io,
};
return rc;
}
@ -410,25 +416,22 @@ fn addHeaders(
return null;
}
fn getEnvironmentVariable(allocator: std.mem.Allocator, key: []const u8) !?[]const u8 {
return std.process.getEnvVarOwned(allocator, key) catch |e| switch (e) {
std.process.GetEnvVarOwnedError.EnvironmentVariableNotFound => return null,
else => return e,
};
fn getEnvironmentVariable(map: *const std.process.Environ.Map, key: []const u8) ?[]const u8 {
if (!map.contains(key)) return null;
return map.get(key);
}
/// override endpoint url. Intended for use in testing. Normally, you should
/// rely on AWS_ENDPOINT_URL environment variable for this
pub var endpoint_override: ?[]const u8 = null;
fn endpointForRequest(allocator: std.mem.Allocator, service: []const u8, request: HttpRequest, options: Options) !EndPoint {
fn endpointForRequest(allocator: std.mem.Allocator, service: []const u8, request: HttpRequest, map: *const std.process.Environ.Map, options: Options) !EndPoint {
if (endpoint_override) |override| {
const uri = try allocator.dupe(u8, override);
return endPointFromUri(allocator, uri, request.path);
}
const environment_override = try getEnvironmentVariable(allocator, "AWS_ENDPOINT_URL");
const environment_override = getEnvironmentVariable(map, "AWS_ENDPOINT_URL");
if (environment_override) |override| {
defer allocator.free(override);
const uri = try allocator.dupe(u8, override);
return endPointFromUri(allocator, uri, request.path);
}
@ -575,7 +578,8 @@ test "endpointForRequest standard operation" {
const allocator = std.testing.allocator;
const service = "dynamodb";
const endpoint = try endpointForRequest(allocator, service, request, options);
const map = std.process.Environ.Map.init(allocator);
const endpoint = try endpointForRequest(allocator, service, request, &map, options);
defer endpoint.deinit();
try std.testing.expectEqualStrings("https://dynamodb.us-west-2.amazonaws.com", endpoint.uri);
}
@ -590,7 +594,8 @@ test "endpointForRequest for cloudfront" {
const allocator = std.testing.allocator;
const service = "cloudfront";
const endpoint = try endpointForRequest(allocator, service, request, options);
const map = std.process.Environ.Map.init(allocator);
const endpoint = try endpointForRequest(allocator, service, request, &map, options);
defer endpoint.deinit();
try std.testing.expectEqualStrings("https://cloudfront.amazonaws.com", endpoint.uri);
}
@ -605,7 +610,8 @@ test "endpointForRequest for s3" {
const allocator = std.testing.allocator;
const service = "s3";
const endpoint = try endpointForRequest(allocator, service, request, options);
const map = std.process.Environ.Map.init(allocator);
const endpoint = try endpointForRequest(allocator, service, request, &map, options);
defer endpoint.deinit();
try std.testing.expectEqualStrings("https://s3.us-east-2.amazonaws.com", endpoint.uri);
}
@ -621,7 +627,8 @@ test "endpointForRequest for s3 - specific bucket" {
const allocator = std.testing.allocator;
const service = "s3";
const endpoint = try endpointForRequest(allocator, service, request, options);
const map = std.process.Environ.Map.init(allocator);
const endpoint = try endpointForRequest(allocator, service, request, &map, options);
defer endpoint.deinit();
try std.testing.expectEqualStrings("https://bucket.s3.us-east-2.amazonaws.com", endpoint.uri);
try std.testing.expectEqualStrings("/key", endpoint.path);

View file

@ -13,6 +13,8 @@ pub const Result = struct {
body: []const u8,
headers: []const std.http.Header,
allocator: std.mem.Allocator,
/// The io that was used for the request
io: std.Io,
pub fn deinit(self: Result) void {
self.allocator.free(self.body);

View file

@ -157,7 +157,7 @@ pub const SigningError = error{
XAmzExpiresHeaderInRequest,
/// Used if the request headers already includes x-amz-region-set
XAmzRegionSetHeaderInRequest,
} || error{OutOfMemory};
} || error{OutOfMemory}; // || std.Io.Clock.Error;
const forbidden_headers = .{
.{ .name = "x-amz-content-sha256", .err = SigningError.XAmzContentSha256HeaderInRequest },
@ -185,7 +185,7 @@ const skipped_headers = .{
/// Signs a request. Only header signing is currently supported. Note that
/// This adds two headers to the request, which will need to be freed by the
/// caller. Use freeSignedRequest with the same parameters to free
pub fn signRequest(allocator: std.mem.Allocator, request: base.Request, config: Config) SigningError!base.Request {
pub fn signRequest(allocator: std.mem.Allocator, io: std.Io, request: base.Request, config: Config) SigningError!base.Request {
try validateConfig(config);
for (request.headers) |h| {
inline for (forbidden_headers) |f| {
@ -195,7 +195,10 @@ pub fn signRequest(allocator: std.mem.Allocator, request: base.Request, config:
}
var rc = request;
const signing_time = config.signing_time orelse std.time.timestamp();
const signing_time = config.signing_time orelse blk: {
const now = std.Io.Clock.Timestamp.now(io, .real);
break :blk @as(i64, @intCast(@divFloor(now.raw.nanoseconds, std.time.ns_per_s)));
};
const signed_date = date.timestampToDateTime(signing_time);
@ -333,9 +336,7 @@ pub fn signRequest(allocator: std.mem.Allocator, request: base.Request, config:
pub fn freeSignedRequest(allocator: std.mem.Allocator, request: *base.Request, config: Config) void {
validateConfig(config) catch |e| {
log.err("Signing validation failed during signature free: {}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
std.debug.dumpCurrentStackTrace(.{});
return;
};
@ -352,10 +353,10 @@ pub fn freeSignedRequest(allocator: std.mem.Allocator, request: *base.Request, c
pub const credentialsFn = *const fn ([]const u8) ?Credentials;
pub fn verifyServerRequest(allocator: std.mem.Allocator, request: *std.http.Server.Request, request_body_reader: *std.Io.Reader, credentials_fn: credentialsFn) !bool {
pub fn verifyServerRequest(allocator: std.mem.Allocator, io: std.Io, request: *std.http.Server.Request, request_body_reader: *std.Io.Reader, credentials_fn: credentialsFn) !bool {
var unverified_request = try UnverifiedRequest.init(allocator, request);
defer unverified_request.deinit();
return verify(allocator, unverified_request, request_body_reader, credentials_fn);
return verify(allocator, io, unverified_request, request_body_reader, credentials_fn);
}
pub const UnverifiedRequest = struct {
@ -366,7 +367,7 @@ pub const UnverifiedRequest = struct {
raw: *std.http.Server.Request,
pub fn init(allocator: std.mem.Allocator, request: *std.http.Server.Request) !UnverifiedRequest {
var al = std.ArrayList(std.http.Header){};
var al = std.ArrayList(std.http.Header).empty;
defer al.deinit(allocator);
var it = request.iterateHeaders();
while (it.next()) |h| try al.append(allocator, h);
@ -393,7 +394,7 @@ pub const UnverifiedRequest = struct {
}
};
pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_body_reader: *std.Io.Reader, credentials_fn: credentialsFn) !bool {
pub fn verify(allocator: std.mem.Allocator, io: std.Io, request: UnverifiedRequest, request_body_reader: *std.Io.Reader, credentials_fn: credentialsFn) !bool {
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
const aa = arena.allocator();
@ -425,6 +426,7 @@ pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_
if (signature == null) return error.AuthorizationHeaderMissingSignature;
return verifyParsedAuthorization(
aa,
io,
request,
credential.?,
signed_headers.?,
@ -436,6 +438,7 @@ pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_
fn verifyParsedAuthorization(
allocator: std.mem.Allocator,
io: std.Io,
request: UnverifiedRequest,
credential: []const u8,
signed_headers: []const u8,
@ -502,7 +505,7 @@ fn verifyParsedAuthorization(
signed_request.query = request.target[signed_request.path.len..]; // TODO: should this be +1? query here would include '?'
signed_request.body = try request_body_reader.allocRemaining(allocator, .unlimited);
defer allocator.free(signed_request.body);
signed_request = try signRequest(allocator, signed_request, config);
signed_request = try signRequest(allocator, io, signed_request, config);
defer freeSignedRequest(allocator, &signed_request, config);
return verifySignedRequest(signed_request, signature);
}
@ -806,7 +809,7 @@ fn canonicalQueryString(allocator: std.mem.Allocator, path: []const u8) ![]const
// Split this by component
var portions = std.mem.splitScalar(u8, query, '&');
var sort_me = std.ArrayList([]const u8){};
var sort_me = std.ArrayList([]const u8).empty;
defer sort_me.deinit(allocator);
while (portions.next()) |item|
try sort_me.append(allocator, item);
@ -1100,6 +1103,7 @@ test "can sign" {
// [debug] (awshttp): Content-Length: 43
const allocator = std.testing.allocator;
const io = std.testing.io;
var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5);
defer headers.deinit(allocator);
try headers.append(allocator, .{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" });
@ -1131,7 +1135,7 @@ test "can sign" {
.signing_time = 1440938160, // 20150830T123600Z
};
// TODO: There is an x-amz-content-sha256. Investigate
var signed_req = try signRequest(allocator, req, config);
var signed_req = try signRequest(allocator, io, req, config);
defer freeSignedRequest(allocator, &signed_req, config);
try std.testing.expectEqualStrings("X-Amz-Date", signed_req.headers[signed_req.headers.len - 3].name);
@ -1151,6 +1155,7 @@ test "can sign" {
var test_credential: ?Credentials = null;
test "can verify server request" {
const allocator = std.testing.allocator;
const io = std.testing.io;
const access_key = try allocator.dupe(u8, "ACCESS");
const secret_key = try allocator.dupe(u8, "SECRET");
@ -1191,7 +1196,7 @@ test "can verify server request" {
// const old_level = std.testing.log_level;
// std.testing.log_level = .debug;
// defer std.testing.log_level = old_level;
try std.testing.expect(try verifyServerRequest(allocator, &request, &body_reader, struct {
try std.testing.expect(try verifyServerRequest(allocator, io, &request, &body_reader, struct {
cred: Credentials,
const Self = @This();
@ -1203,6 +1208,7 @@ test "can verify server request" {
}
test "can verify server request without x-amz-content-sha256" {
const allocator = std.testing.allocator;
const io = std.testing.io;
const access_key = try allocator.dupe(u8, "ACCESS");
const secret_key = try allocator.dupe(u8, "SECRET");
@ -1293,7 +1299,7 @@ test "can verify server request without x-amz-content-sha256" {
}
{ // verification
try std.testing.expect(try verifyServerRequest(allocator, &request, &body_reader, struct {
try std.testing.expect(try verifyServerRequest(allocator, io, &request, &body_reader, struct {
cred: Credentials,
const Self = @This();

View file

@ -253,6 +253,8 @@ const TestOptions = struct {
};
const TestSetup = struct {
allocator: std.mem.Allocator,
io: std.Io,
map: *const std.process.Environ.Map,
options: TestOptions,
creds: aws_auth.Credentials,
client: aws.Client,
@ -263,7 +265,6 @@ const TestSetup = struct {
pub const RequestActuals = struct {
request: *std.http.Client.Request,
trace: []const u8,
// Looks like uri might be getting trounced before deinit
request_uri: []const u8,
@ -302,9 +303,9 @@ const TestSetup = struct {
}
allocator.free(self.extra_headers);
allocator.free(self.trace);
allocator.free(self.request_uri);
allocator.destroy(self.request.reader.in);
allocator.destroy(self.request.client);
allocator.destroy(self.request);
}
};
@ -322,32 +323,39 @@ const TestSetup = struct {
fn request(
self_ptr: usize,
io: std.Io,
method: std.http.Method,
uri: std.Uri,
options: std.http.Client.RequestOptions,
) std.http.Client.RequestError!std.http.Client.Request {
_ = io;
const self: *Self = @ptrFromInt(self_ptr);
if (self.request_actuals) |r| {
std.debug.print("request has been called twice. Previous stack trace:\n", .{});
var stderr = std.fs.File.stderr().writer(&.{});
stderr.interface.writeAll(r.trace) catch @panic("could not write to stderr");
if (self.request_actuals) |_| {
std.debug.print("request has been called twice:\n", .{});
std.debug.print("Current stack trace:\n", .{});
std.debug.dumpCurrentStackTrace(null);
std.debug.dumpCurrentStackTrace(.{});
return error.ConnectionRefused; // we should not be called twice
}
const acts = try self.allocator.create(RequestActuals);
errdefer self.allocator.destroy(acts);
var aw = std.Io.Writer.Allocating.init(self.allocator);
defer aw.deinit();
std.debug.dumpCurrentStackTraceToWriter(null, &aw.writer) catch return error.OutOfMemory;
const req = try self.allocator.create(std.http.Client.Request);
errdefer self.allocator.destroy(req);
const reader = try self.allocator.create(std.Io.Reader);
errdefer self.allocator.destroy(reader);
reader.* = .fixed(self.options.server_response);
// Create a minimal mock client that only provides io for deinit
// By creating it with the allocator, we leave critical fields like
// connection_pool as undefined, which will fail spectacularly if
// a real request were to be attempted
const mock_client = try self.allocator.create(std.http.Client);
errdefer self.allocator.destroy(mock_client);
mock_client.* = .{
.allocator = self.allocator,
.io = self.io,
};
req.* = .{
.uri = uri,
.client = undefined,
.client = mock_client,
.connection = options.connection,
.reader = .{
.in = reader,
@ -373,7 +381,6 @@ const TestSetup = struct {
});
acts.* = .{
.trace = try self.allocator.dupe(u8, aw.written()),
.request = req,
.request_uri = try std.fmt.allocPrint(self.allocator, "{f}", .{uri}),
.extra_headers = try al.toOwnedSlice(self.allocator),
@ -432,7 +439,10 @@ const TestSetup = struct {
return self.request_actuals.?.request.reader.in;
}
fn init(options: TestOptions) !*Self {
const client = aws.Client.init(options.allocator, .{});
const io = std.testing.io;
const map = try options.allocator.create(std.process.Environ.Map);
map.* = std.process.Environ.Map.init(options.allocator);
const client = aws.Client.init(options.allocator, .{ .io = io, .map = map });
const call_options = try options.allocator.create(aws.Options);
const self = try options.allocator.create(Self);
call_options.* = .{
@ -452,6 +462,8 @@ const TestSetup = struct {
self.* = .{
.options = options,
.allocator = options.allocator,
.io = io,
.map = map,
.creds = aws_auth.Credentials.init(
options.allocator,
try options.allocator.dupe(u8, "ACCESS"),
@ -465,6 +477,7 @@ const TestSetup = struct {
return self;
}
fn deinit(self: *Self) void {
self.options.allocator.destroy(self.map);
if (self.response_actuals) |r| {
self.allocator.free(r.body);
self.allocator.destroy(r);
@ -1164,13 +1177,9 @@ test "json_1_1: ECR timestamps" {
try std.testing.expectEqualStrings("https://146325435496.dkr.ecr.us-west-2.amazonaws.com", call.response.authorization_data.?[0].proxy_endpoint.?);
// try std.testing.expectEqual(@as(i64, 1.73859841557E9), call.response.authorization_data.?[0].expires_at.?);
const zeit = @import("zeit");
const expected_ins = try zeit.instant(.{
.source = .{ .iso8601 = "2022-05-17T06:56:13.652000+00:00" },
});
const expected_ts: date.Timestamp = @enumFromInt(expected_ins.timestamp);
try std.testing.expectEqual(expected_ts, call.response.authorization_data.?[0].expires_at.?);
const expected_ts = try date.Timestamp.parse("2022-05-17T06:56:13.652000+00:00");
const actual = call.response.authorization_data.?[0].expires_at.?;
try std.testing.expectEqual(expected_ts, actual);
}
test "jsonStringify: structure + enums" {
@ -1321,7 +1330,8 @@ test "jsonStringify nullable object" {
test "works against a live server" {
const Server = struct {
allocator: std.mem.Allocator,
ready: std.Thread.Semaphore = .{},
io: std.Io,
ready: std.Io.Semaphore = .{},
requests_received: usize = 0,
thread: ?std.Thread = null,
listening_uri: []const u8 = undefined,
@ -1343,7 +1353,7 @@ test "works against a live server" {
threadMain,
.{self},
);
try self.ready.timedWait(1000 * std.time.ns_per_ms);
try self.ready.wait(self.io); // This could hang the test...
awshttp.endpoint_override = self.listening_uri;
if (awshttp.endpoint_override == null) return error.TestSetupStartFailure;
std.log.debug("endpoint override set to {?s}", .{awshttp.endpoint_override});
@ -1352,7 +1362,7 @@ test "works against a live server" {
pub fn stop(self: *Server) !void {
if (self.thread == null) return; // thread not started, nothing to do
// post stop message
var client = std.http.Client{ .allocator = self.allocator };
var client = std.http.Client{ .allocator = self.allocator, .io = self.io };
_ = try client.fetch(.{ // we ignore return because that should just shut down
.method = .POST,
.payload = "quit",
@ -1363,24 +1373,24 @@ test "works against a live server" {
}
fn threadMain(self: *Server) !void {
const address = try std.net.Address.parseIp("127.0.0.1", 0);
var server = try address.listen(.{});
defer server.deinit();
const server_port = server.listen_address.in.getPort();
const address = try std.Io.net.IpAddress.parseLiteral("127.0.0.1:0");
var server = try address.listen(self.io, .{});
defer server.deinit(self.io);
const server_port = server.socket.address.getPort();
self.listening_uri = try std.fmt.allocPrint(self.allocator, "http://127.0.0.1:{d}", .{server_port});
defer {
self.allocator.free(self.listening_uri);
self.listening_uri = undefined;
}
self.ready.post();
self.ready.post(self.io);
while (true) {
var connection = try server.accept();
defer connection.stream.close();
var connection = try server.accept(self.io);
defer connection.close(self.io);
var recv_buffer: [4000]u8 = undefined;
var send_buffer: [4000]u8 = undefined;
var conn_reader = connection.stream.reader(&recv_buffer);
var conn_writer = connection.stream.writer(&send_buffer);
var http_server = std.http.Server.init(conn_reader.interface(), &conn_writer.interface);
var conn_reader = connection.reader(self.io, &recv_buffer);
var conn_writer = connection.writer(self.io, &send_buffer);
var http_server = std.http.Server.init(&conn_reader.interface, &conn_writer.interface);
while (http_server.reader.state == .ready) {
var req = try http_server.receiveHead();
if (req.head.content_length) |l| {
@ -1405,7 +1415,9 @@ test "works against a live server" {
}
};
const allocator = std.testing.allocator;
var server = Server{ .allocator = allocator };
const io = std.testing.io;
const map = std.process.Environ.Map.init(allocator);
var server = Server{ .allocator = allocator, .io = io };
try server.start();
var stopped = false;
defer if (!stopped) server.stop() catch log.err("error stopping server", .{});
@ -1425,7 +1437,7 @@ test "works against a live server" {
// }
const sts = (Services(.{.sts}){}).sts;
const client = aws.Client.init(std.testing.allocator, .{});
const client = aws.Client.init(std.testing.allocator, .{ .io = io, .map = &map });
const creds = aws_auth.Credentials.init(
allocator,
try allocator.dupe(u8, "ACCESS"),

View file

@ -32,10 +32,9 @@ pub fn log(
const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix;
// Print the message to stderr, silently ignoring any errors
std.debug.lockStdErr();
defer std.debug.unlockStdErr();
var stderr_writer = std.fs.File.stderr().writer(&.{});
const stderr = &stderr_writer.interface;
const locked = std.debug.lockStderr(&.{});
defer std.debug.unlockStderr();
const stderr = &locked.file_writer.interface;
nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return;
}
@ -59,16 +58,15 @@ const Tests = enum {
rest_xml_work_with_s3,
};
pub fn main() anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
pub fn main(init: std.process.Init) anyerror!void {
const allocator = init.gpa;
const io = init.io;
const map = init.environ_map;
var tests = try std.ArrayList(Tests).initCapacity(allocator, @typeInfo(Tests).@"enum".fields.len);
defer tests.deinit(allocator);
var args = try std.process.argsWithAllocator(allocator);
defer args.deinit();
var args = try init.minimal.args.iterateAllocator(init.arena.allocator());
var stdout_buf: [4096]u8 = undefined;
const stdout_raw = std.fs.File.stdout().writer(&stdout_buf);
const stdout_raw = std.Io.File.stdout().writer(io, &stdout_buf);
var stdout = stdout_raw.interface;
defer stdout.flush() catch @panic("could not flush stdout");
var arg0: ?[]const u8 = null;
@ -111,7 +109,7 @@ pub fn main() anyerror!void {
}
std.log.info("Start\n", .{});
const client_options = aws.ClientOptions{ .proxy = proxy };
const client_options = aws.ClientOptions{ .proxy = proxy, .io = io, .map = map };
var client = aws.Client.init(allocator, client_options);
const options = aws.Options{
.region = "us-west-2",
@ -373,7 +371,8 @@ fn proxyFromString(string: []const u8) !std.http.Client.Proxy {
rc.protocol = .tls;
} else return error.InvalidScheme;
var split_iterator = std.mem.splitScalar(u8, remaining, ':');
rc.host = std.mem.trimRight(u8, split_iterator.first(), "/");
const host_str = std.mem.trimEnd(u8, split_iterator.first(), "/");
rc.host = try std.Io.net.HostName.init(host_str);
if (split_iterator.next()) |port|
rc.port = try std.fmt.parseInt(u16, port, 10);
return rc;

View file

@ -1,32 +1,27 @@
const std = @import("std");
const service_list = @import("service_manifest");
const expectEqualStrings = std.testing.expectEqualStrings;
pub fn Services(comptime service_imports: anytype) type {
if (service_imports.len == 0) return services;
// From here, the fields of our structure can be generated at comptime...
var fields: [serviceCount(service_imports)]std.builtin.Type.StructField = undefined;
const fields_len = serviceCount(service_imports);
var field_names: [fields_len][]const u8 = undefined;
var field_types: [fields_len]type = undefined;
var field_attrs: [fields_len]std.builtin.Type.StructField.Attributes = undefined;
for (&fields, 0..) |*item, i| {
for (0..fields_len) |i| {
const import_field = @field(service_list, @tagName(service_imports[i]));
item.* = .{
.name = @tagName(service_imports[i]),
.type = @TypeOf(import_field),
field_names[i] = @tagName(service_imports[i]);
field_types[i] = @TypeOf(import_field);
field_attrs[i] = .{
.default_value_ptr = &import_field,
.is_comptime = false,
.alignment = std.meta.alignment(@TypeOf(import_field)),
.@"comptime" = false,
.@"align" = std.meta.alignment(field_types[i]),
};
}
// finally, generate the type
return @Type(.{
.@"struct" = .{
.layout = .auto,
.fields = &fields,
.decls = &[_]std.builtin.Type.Declaration{},
.is_tuple = false,
},
});
return @Struct(.auto, null, &field_names, &field_types, &field_attrs);
}
fn serviceCount(desired_services: anytype) usize {
@ -39,17 +34,23 @@ fn serviceCount(desired_services: anytype) usize {
pub const services = service_list;
test "services includes sts" {
try expectEqualStrings("2011-06-15", services.sts.version.?);
try std.testing.expectEqualStrings("2011-06-15", services.sts.version.?);
}
test "sts includes get_caller_identity" {
try expectEqualStrings("GetCallerIdentity", services.sts.get_caller_identity.action_name);
try std.testing.expectEqualStrings("GetCallerIdentity", services.sts.get_caller_identity.action_name);
}
test "can get service and action name from request" {
// get request object. This call doesn't have parameters
const metadata = services.sts.get_caller_identity.Request.metaInfo();
try expectEqualStrings("2011-06-15", metadata.service_metadata.version.?);
try std.testing.expectEqualStrings("2011-06-15", metadata.service_metadata.version.?);
}
test "can filter services" {
const filtered_services = Services(.{ .sts, .wafv2 }){};
try expectEqualStrings("2011-06-15", filtered_services.sts.version.?);
try std.testing.expectEqualStrings("2011-06-15", filtered_services.sts.version.?);
}
test "can reify type" {
const F = Services(.{.lambda});
const info = @typeInfo(F).@"struct";
try std.testing.expectEqual(@as(usize, 1), info.fields.len);
try std.testing.expectEqualStrings("lambda", info.fields[0].name);
}

View file

@ -31,8 +31,8 @@ pub const Element = struct {
fn init(tag: []const u8, alloc: Allocator) Element {
return .{
.tag = tag,
.attributes = AttributeList{},
.children = ContentList{},
.attributes = .empty,
.children = .empty,
.allocator = alloc,
};
}

View file

@ -168,7 +168,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
},
);
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
std.debug.dumpStackTrace(trace);
}
}
return e;
@ -192,9 +192,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
e,
},
);
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
std.debug.dumpCurrentStackTrace(.{});
}
return e;
};
@ -381,7 +379,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
log.debug("type = {s}, style = {s}, ptr_info.child == {s}, element = {s}", .{ @typeName(T), @tagName(array_style), @typeName(ptr_info.child), element.tag });
var children = std.ArrayList(ptr_info.child){};
var children = std.ArrayList(ptr_info.child).empty;
defer children.deinit(allocator);
switch (array_style) {

View file

@ -1,3 +1,3 @@
{
"ignore": ["lib/json/src/json.zig"]
"ignore": ["lib/json/src/json.zig", "codegen/src/Hasher.zig"]
}