Compare commits

..

No commits in common. "master" and "zig-0.15.x" have entirely different histories.

28 changed files with 438 additions and 470 deletions

View file

@ -3,7 +3,7 @@ on:
workflow_dispatch: workflow_dispatch:
push: push:
branches: branches:
- 'zig-0.15.x' - 'zig-0.14.x'
env: env:
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }} ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/ ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
@ -18,9 +18,11 @@ jobs:
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
ref: zig-0.15.x ref: zig-0.14.x
- name: Setup Zig - name: Setup Zig
uses: https://codeberg.org/mlugg/setup-zig@v2.2.1 uses: https://codeberg.org/mlugg/setup-zig@v2.2.1
with:
version: 0.14.0
- name: Run smoke test - name: Run smoke test
run: zig build smoke-test --verbose run: zig build smoke-test --verbose
- name: Run full tests - name: Run full tests

1
.gitignore vendored
View file

@ -12,4 +12,3 @@ src/git_version.zig
zig-out zig-out
core core
.zig-cache .zig-cache
zig-pkg/

View file

@ -1,5 +1,5 @@
[tools] [tools]
prek = "0.3.1" prek = "0.3.1"
"ubi:DonIsaac/zlint" = "0.7.9" "ubi:DonIsaac/zlint" = "0.7.9"
zig = "0.16.0" zig = "0.15.2"
zls = "0.16.0" zls = "0.15.1"

15
Makefile Normal file
View file

@ -0,0 +1,15 @@
start-hand-test: src/main.zig src/aws.zig src/xml.zig
@zig build-exe -static -I/usr/local/include -Isrc/ -lc --strip \
--name start-hand-test src/main.zig src/bitfield-workaround.c \
/usr/local/lib64/libaws-c-*.a \
/usr/local/lib64/libs2n.a \
/usr/local/lib/libcrypto.a \
/usr/local/lib/libssl.a
elasticurl: curl.c
@zig build-exe -static -I/usr/local/include -Isrc/ -lc --strip \
--name elasticurl curl.c \
/usr/local/lib64/libaws-c-*.a \
/usr/local/lib64/libs2n.a \
/usr/local/lib/libcrypto.a \
/usr/local/lib/libssl.a

View file

@ -1,17 +1,17 @@
AWS SDK for Zig AWS SDK for Zig
=============== ===============
[Zig 0.16.0](https://ziglang.org/download/#release-0.16.0): [Zig 0.15.1](https://ziglang.org/download/#release-0.15.1):
[![Build Status: Zig 0.16.0](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/build.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=build.yaml&state=closed) [![Build Status: Zig 0.15.1](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/build.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=build.yaml&state=closed)
[Nightly Zig](https://ziglang.org/download/): [Nightly Zig](https://ziglang.org/download/):
[![Build Status: Zig Nightly](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-nightly.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-nightly.yaml&state=closed) [![Build Status: Zig Nightly](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-nightly.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-nightly.yaml&state=closed)
[Zig 0.15.2](https://ziglang.org/download/#release-0.15.2): [Zig 0.14.1](https://ziglang.org/download/#release-0.14.1):
[![Build Status: Zig 0.15.2](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-previous.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-previous.yaml&state=closed) [![Build Status: Zig 0.14.x](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/workflows/zig-previous.yaml/badge.svg)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-previous.yaml&state=closed)
Current executable size for the demo is 980k after compiling with -Doptimize=ReleaseSmall Current executable size for the demo is 980k after compiling with -Doptimize=ReleaseSmall
in x86_64-linux, and will vary based on services used. Tested targets: in x86_64-linux, and will vary based on services used. Tested targets:
@ -34,7 +34,7 @@ Branches
a new zig release appears. Expect significant delays in any a new zig release appears. Expect significant delays in any
build failures (PRs always welcome!). build failures (PRs always welcome!).
* **master**: This branch tracks the latest released zig version * **master**: This branch tracks the latest released zig version
* **zig-0.15.x**: This branch tracks the 0.15.2 released zig version. * **zig-0.14.x**: This branch tracks the 0.14/0.14.1 released zig versions.
Support for these previous version is best effort, generally Support for these previous version is best effort, generally
degrading over time. Fixes will generally appear in master, then degrading over time. Fixes will generally appear in master, then
backported into the previous version. backported into the previous version.

View file

@ -47,7 +47,7 @@ pub fn build(b: *Builder) !void {
.target = target, .target = target,
.optimize = optimize, .optimize = optimize,
}); });
configure(mod_exe, dep_mods); configure(mod_exe, dep_mods, true);
const exe = b.addExecutable(.{ const exe = b.addExecutable(.{
.name = "demo", .name = "demo",
@ -72,7 +72,7 @@ pub fn build(b: *Builder) !void {
.target = b.graph.host, .target = b.graph.host,
.optimize = if (b.verbose) .Debug else .ReleaseSafe, .optimize = if (b.verbose) .Debug else .ReleaseSafe,
}); });
configure(cg_mod, dep_mods); configure(cg_mod, dep_mods, false);
const cg_exe = b.addExecutable(.{ const cg_exe = b.addExecutable(.{
.name = "codegen", .name = "codegen",
@ -133,7 +133,7 @@ pub fn build(b: *Builder) !void {
// consuming build.zig files to be able to use the SDK at build time for // consuming build.zig files to be able to use the SDK at build time for
// things like code deployments, e.g. https://git.lerch.org/lobo/lambda-zig // things like code deployments, e.g. https://git.lerch.org/lobo/lambda-zig
const has_pre_generated = const has_pre_generated =
if (b.build_root.handle.access(b.graph.io, "src/models/service_manifest.zig", .{})) true else |_| false; if (b.build_root.handle.access("src/models/service_manifest.zig", .{})) true else |_| false;
// Only depend on codegen if we don't have pre-generated models // Only depend on codegen if we don't have pre-generated models
if (!has_pre_generated) if (!has_pre_generated)
@ -150,7 +150,7 @@ pub fn build(b: *Builder) !void {
.target = target, .target = target,
.optimize = optimize, .optimize = optimize,
}); });
configure(service_manifest_module, dep_mods); configure(service_manifest_module, dep_mods, true);
mod_exe.addImport("service_manifest", service_manifest_module); mod_exe.addImport("service_manifest", service_manifest_module);
@ -161,13 +161,13 @@ pub fn build(b: *Builder) !void {
.optimize = optimize, .optimize = optimize,
}); });
mod_aws.addImport("service_manifest", service_manifest_module); mod_aws.addImport("service_manifest", service_manifest_module);
configure(mod_aws, dep_mods); configure(mod_aws, dep_mods, true);
// Expose module to others // Expose module to others
const mod_aws_signing = b.addModule("aws-signing", .{ const mod_aws_signing = b.addModule("aws-signing", .{
.root_source_file = b.path("src/aws_signing.zig"), .root_source_file = b.path("src/aws_signing.zig"),
}); });
configure(mod_aws_signing, dep_mods); configure(mod_aws_signing, dep_mods, false);
// Similar to creating the run step earlier, this exposes a `test` step to // Similar to creating the run step earlier, this exposes a `test` step to
// the `zig build --help` menu, providing a way for the user to request // the `zig build --help` menu, providing a way for the user to request
@ -197,7 +197,7 @@ pub fn build(b: *Builder) !void {
.optimize = optimize, .optimize = optimize,
}); });
mod_unit_tests.addImport("service_manifest", service_manifest_module); mod_unit_tests.addImport("service_manifest", service_manifest_module);
configure(mod_unit_tests, dep_mods); configure(mod_unit_tests, dep_mods, true);
// Creates a step for unit testing. This only builds the test executable // Creates a step for unit testing. This only builds the test executable
// but does not run it. // but does not run it.
@ -250,11 +250,12 @@ pub fn build(b: *Builder) !void {
package.dependOn(&pkg_step.step); package.dependOn(&pkg_step.step);
} }
fn configure(compile: *std.Build.Module, modules: std.StringHashMap(*std.Build.Module)) void { fn configure(compile: *std.Build.Module, modules: std.StringHashMap(*std.Build.Module), include_time: bool) void {
compile.addImport("smithy", modules.get("smithy").?); compile.addImport("smithy", modules.get("smithy").?);
compile.addImport("date", modules.get("date").?); compile.addImport("date", modules.get("date").?);
compile.addImport("json", modules.get("json").?); compile.addImport("json", modules.get("json").?);
compile.addImport("case", modules.get("case").?); compile.addImport("case", modules.get("case").?);
if (include_time) compile.addImport("zeit", modules.get("zeit").?);
} }
fn getDependencyModules(b: *std.Build, args: anytype) !std.StringHashMap(*std.Build.Module) { fn getDependencyModules(b: *std.Build, args: anytype) !std.StringHashMap(*std.Build.Module) {
@ -265,6 +266,10 @@ fn getDependencyModules(b: *std.Build, args: anytype) !std.StringHashMap(*std.Bu
const mod_smithy = dep_smithy.module("smithy"); const mod_smithy = dep_smithy.module("smithy");
try result.putNoClobber("smithy", mod_smithy); try result.putNoClobber("smithy", mod_smithy);
const dep_zeit = b.dependency("zeit", args);
const mod_zeit = dep_zeit.module("zeit");
try result.putNoClobber("zeit", mod_zeit);
const dep_case = b.dependency("case", args); const dep_case = b.dependency("case", args);
const mod_case = dep_case.module("case"); const mod_case = dep_case.module("case");
try result.putNoClobber("case", mod_case); try result.putNoClobber("case", mod_case);
@ -324,7 +329,6 @@ const PackageStep = struct {
_ = options; _ = options;
const self: *PackageStep = @fieldParentPtr("step", step); const self: *PackageStep = @fieldParentPtr("step", step);
const b = step.owner; const b = step.owner;
const io = b.graph.io;
// Get the path to generated models // Get the path to generated models
const models_path = self.cg_output_dir.getPath2(b, &self.step); const models_path = self.cg_output_dir.getPath2(b, &self.step);
@ -332,17 +336,17 @@ const PackageStep = struct {
// Create output directory for packaging // Create output directory for packaging
const package_dir = b.pathJoin(&.{ "zig-out", "package" }); const package_dir = b.pathJoin(&.{ "zig-out", "package" });
const models_dest_dir = b.pathJoin(&.{ package_dir, "src", "models" }); const models_dest_dir = b.pathJoin(&.{ package_dir, "src", "models" });
std.Io.Dir.cwd().createDirPath(io, models_dest_dir) catch |err| { std.fs.cwd().makePath(models_dest_dir) catch |err| {
return step.fail("Failed to create package directory: {}", .{err}); return step.fail("Failed to create package directory: {}", .{err});
}; };
// Copy all source files to package directory // Copy all source files to package directory
for (package_files) |file_name| for (package_files) |file_name|
copyFile(io, b, b.build_root.handle, file_name, package_dir) catch {}; copyFile(b, b.build_root.handle, file_name, package_dir) catch {};
// Copy directories // Copy directories
for (package_dirs) |dir_name| for (package_dirs) |dir_name|
copyDirRecursive(io, b, b.build_root.handle, dir_name, package_dir) catch |err| { copyDirRecursive(b, b.build_root.handle, dir_name, package_dir) catch |err| {
return step.fail("Failed to copy directory '{s}': {}", .{ dir_name, err }); return step.fail("Failed to copy directory '{s}': {}", .{ dir_name, err });
}; };
@ -354,24 +358,24 @@ const PackageStep = struct {
step.result_cached = false; step.result_cached = false;
} }
fn copyFile(io: std.Io, b: *std.Build, src_dir: std.Io.Dir, file_path: []const u8, dest_prefix: []const u8) !void { fn copyFile(b: *std.Build, src_dir: std.fs.Dir, file_path: []const u8, dest_prefix: []const u8) !void {
const dest_path = b.pathJoin(&.{ dest_prefix, file_path }); const dest_path = b.pathJoin(&.{ dest_prefix, file_path });
// Ensure parent directory exists // Ensure parent directory exists
if (std.fs.path.dirname(dest_path)) |parent| if (std.fs.path.dirname(dest_path)) |parent|
std.Io.Dir.cwd().createDirPath(io, parent) catch {}; std.fs.cwd().makePath(parent) catch {};
src_dir.copyFile(file_path, std.Io.Dir.cwd(), dest_path, io, .{}) catch return; src_dir.copyFile(file_path, std.fs.cwd(), dest_path, .{}) catch return;
} }
fn copyDirRecursive(io: std.Io, b: *std.Build, src_base: std.Io.Dir, dir_path: []const u8, dest_prefix: []const u8) !void { fn copyDirRecursive(b: *std.Build, src_base: std.fs.Dir, dir_path: []const u8, dest_prefix: []const u8) !void {
var src_dir = src_base.openDir(io, dir_path, .{ .iterate = true }) catch return; var src_dir = src_base.openDir(dir_path, .{ .iterate = true }) catch return;
defer src_dir.close(io); defer src_dir.close();
var walker = try src_dir.walk(b.allocator); var walker = try src_dir.walk(b.allocator);
defer walker.deinit(); defer walker.deinit();
while (try walker.next(io)) |entry| { while (try walker.next()) |entry| {
// Skip zig build artifact directories // Skip zig build artifact directories
if (std.mem.indexOf(u8, entry.path, "zig-out") != null or if (std.mem.indexOf(u8, entry.path, "zig-out") != null or
std.mem.indexOf(u8, entry.path, ".zig-cache") != null or std.mem.indexOf(u8, entry.path, ".zig-cache") != null or
@ -382,22 +386,22 @@ const PackageStep = struct {
const dest_path = b.pathJoin(&.{ dest_prefix, dir_path, entry.path }); const dest_path = b.pathJoin(&.{ dest_prefix, dir_path, entry.path });
switch (entry.kind) { switch (entry.kind) {
.directory => std.Io.Dir.cwd().createDirPath(io, dest_path) catch {}, .directory => std.fs.cwd().makePath(dest_path) catch {},
.file => { .file => {
// Ensure parent directory exists // Ensure parent directory exists
if (std.fs.path.dirname(dest_path)) |parent| { if (std.fs.path.dirname(dest_path)) |parent| {
std.Io.Dir.cwd().createDirPath(io, parent) catch {}; std.fs.cwd().makePath(parent) catch {};
} }
src_base.copyFile(src_path, std.Io.Dir.cwd(), dest_path, io, .{}) catch {}; src_base.copyFile(src_path, std.fs.cwd(), dest_path, .{}) catch {};
}, },
.sym_link => { .sym_link => {
var link_buf: [std.fs.max_path_bytes]u8 = undefined; var link_buf: [std.fs.max_path_bytes]u8 = undefined;
const link_target = entry.dir.readLink(io, entry.basename, &link_buf) catch continue; const link_target = entry.dir.readLink(entry.basename, &link_buf) catch continue;
// Ensure parent directory exists // Ensure parent directory exists
if (std.fs.path.dirname(dest_path)) |parent| { if (std.fs.path.dirname(dest_path)) |parent| {
std.Io.Dir.cwd().createDirPath(io, parent) catch {}; std.fs.cwd().makePath(parent) catch {};
} }
std.Io.Dir.cwd().symLink(io, link_buf[0..link_target], dest_path, .{}) catch {}; std.fs.cwd().symLink(link_target, dest_path, .{}) catch {};
}, },
else => {}, else => {},
} }
@ -405,17 +409,16 @@ const PackageStep = struct {
} }
fn copyGeneratedModels(b: *std.Build, models_path: []const u8, models_dest_dir: []const u8) !void { fn copyGeneratedModels(b: *std.Build, models_path: []const u8, models_dest_dir: []const u8) !void {
const io = b.graph.io; var models_dir = std.fs.cwd().openDir(models_path, .{ .iterate = true }) catch
var models_dir = std.Io.Dir.cwd().openDir(io, models_path, .{ .iterate = true }) catch
return error.ModelsNotFound; return error.ModelsNotFound;
defer models_dir.close(io); defer models_dir.close();
var iter = models_dir.iterate(); var iter = models_dir.iterate();
while (try iter.next(io)) |entry| { while (try iter.next()) |entry| {
if (entry.kind != .file) continue; if (entry.kind != .file) continue;
const dest_path = b.pathJoin(&.{ models_dest_dir, entry.name }); const dest_path = b.pathJoin(&.{ models_dest_dir, entry.name });
models_dir.copyFile(entry.name, std.Io.Dir.cwd(), dest_path, io, .{}) catch continue; models_dir.copyFile(entry.name, std.fs.cwd(), dest_path, .{}) catch continue;
} }
} }
}; };

View file

@ -11,7 +11,7 @@
"README.md", "README.md",
"LICENSE", "LICENSE",
}, },
.minimum_zig_version = "0.16.0", .minimum_zig_version = "0.15.1",
.dependencies = .{ .dependencies = .{
.smithy = .{ .smithy = .{
@ -22,6 +22,10 @@
.url = "https://github.com/aws/aws-sdk-go-v2/archive/refs/tags/release-2025-05-05.tar.gz", .url = "https://github.com/aws/aws-sdk-go-v2/archive/refs/tags/release-2025-05-05.tar.gz",
.hash = "N-V-__8AAKWdeiawujEcrfukQbb8lLAiQIRT0uG5gCcm4b7W", .hash = "N-V-__8AAKWdeiawujEcrfukQbb8lLAiQIRT0uG5gCcm4b7W",
}, },
.zeit = .{
.url = "git+https://github.com/rockorager/zeit?ref=zig-0.15#ed2ca60db118414bda2b12df2039e33bad3b0b88",
.hash = "zeit-0.6.0-5I6bk0J9AgCVa0nnyL0lNY9Xa9F68hHq-ZarhuXNV-Jb",
},
.date = .{ .date = .{
.path = "lib/date", .path = "lib/date",
}, },
@ -29,8 +33,8 @@
.path = "lib/json", .path = "lib/json",
}, },
.case = .{ .case = .{
.url = "git+https://github.com/elerch/case?ref=zig-0.16.0#82017a92e179031f21896d02262ae1e216459e4f", .url = "git+https://github.com/travisstaloch/case.git#f8003fe5f93b65f673d10d41323e347225e8cb87",
.hash = "case-0.0.1-chGYq1fEAAAN4h3YRmkh9OTDvuUyjmoXr6PhYAUzjU0D", .hash = "case-0.0.1-chGYqx_EAADaGJjmoln5M1iMBDTrMdd8to5wdEVpfXm4",
}, },
}, },
} }

View file

@ -39,7 +39,7 @@ pub fn indent(self: @This()) GenerationState {
pub fn deindent(self: @This()) GenerationState { pub fn deindent(self: @This()) GenerationState {
var new_state = self.clone(); var new_state = self.clone();
new_state.indent_level = if (new_state.indent_level == 0) 0 else new_state.indent_level - 1; new_state.indent_level = @max(0, new_state.indent_level - 1);
return new_state; return new_state;
} }

View file

@ -8,7 +8,7 @@ pub const HashedFile = struct {
hash: [Hash.digest_length]u8, hash: [Hash.digest_length]u8,
failure: Error!void, failure: Error!void,
const Error = std.Io.File.OpenError || std.Io.File.ReadStreamingError || std.Io.File.StatError; const Error = std.fs.File.OpenError || std.fs.File.ReadError || std.fs.File.StatError;
fn lessThan(context: void, lhs: *const HashedFile, rhs: *const HashedFile) bool { fn lessThan(context: void, lhs: *const HashedFile, rhs: *const HashedFile) bool {
_ = context; _ = context;
@ -76,13 +76,13 @@ pub fn hex64(x: u64) [16]u8 {
return result; return result;
} }
pub const walkerFn = *const fn (std.Io.Dir.Walker.Entry) bool; pub const walkerFn = *const fn (std.fs.Dir.Walker.Entry) bool;
fn included(entry: std.Io.Dir.Walker.Entry) bool { fn included(entry: std.fs.Dir.Walker.Entry) bool {
_ = entry; _ = entry;
return true; return true;
} }
fn excluded(entry: std.Io.Dir.Walker.Entry) bool { fn excluded(entry: std.fs.Dir.Walker.Entry) bool {
_ = entry; _ = entry;
return false; return false;
} }
@ -94,33 +94,33 @@ pub const ComputeDirectoryOptions = struct {
}; };
pub fn computeDirectoryHash( pub fn computeDirectoryHash(
allocator: std.mem.Allocator, thread_pool: *std.Thread.Pool,
io: std.Io, dir: std.fs.Dir,
dir: std.Io.Dir,
options: *ComputeDirectoryOptions, options: *ComputeDirectoryOptions,
) ![Hash.digest_length]u8 { ) ![Hash.digest_length]u8 {
const gpa = thread_pool.allocator;
// We'll use an arena allocator for the path name strings since they all // We'll use an arena allocator for the path name strings since they all
// need to be in memory for sorting. // need to be in memory for sorting.
var arena_instance = std.heap.ArenaAllocator.init(allocator); var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit(); defer arena_instance.deinit();
const arena = arena_instance.allocator(); const arena = arena_instance.allocator();
// Collect all files, recursively, then sort. // Collect all files, recursively, then sort.
// Normally we're looking at around 300 model files // Normally we're looking at around 300 model files
var all_files = try std.ArrayList(*HashedFile).initCapacity(allocator, 300); var all_files = try std.ArrayList(*HashedFile).initCapacity(gpa, 300);
defer all_files.deinit(allocator); defer all_files.deinit(gpa);
var walker = try dir.walk(allocator); var walker = try dir.walk(gpa);
defer walker.deinit(); defer walker.deinit();
{ {
// The final hash will be a hash of each file hashed independently. This // The final hash will be a hash of each file hashed independently. This
// allows hashing in parallel. // allows hashing in parallel.
var g: std.Io.Group = .init; var wait_group: std.Thread.WaitGroup = .{};
errdefer g.cancel(io); defer wait_group.wait();
while (try walker.next(io)) |entry| { while (try walker.next()) |entry| {
switch (entry.kind) { switch (entry.kind) {
.directory => continue, .directory => continue,
.file => {}, .file => {},
@ -128,7 +128,7 @@ pub fn computeDirectoryHash(
} }
if (options.isExcluded(entry) or !options.isIncluded(entry)) if (options.isExcluded(entry) or !options.isIncluded(entry))
continue; continue;
const alloc = if (options.needFileHashes) allocator else arena; const alloc = if (options.needFileHashes) gpa else arena;
const hashed_file = try alloc.create(HashedFile); const hashed_file = try alloc.create(HashedFile);
const fs_path = try alloc.dupe(u8, entry.path); const fs_path = try alloc.dupe(u8, entry.path);
hashed_file.* = .{ hashed_file.* = .{
@ -137,11 +137,11 @@ pub fn computeDirectoryHash(
.hash = undefined, // to be populated by the worker .hash = undefined, // to be populated by the worker
.failure = undefined, // to be populated by the worker .failure = undefined, // to be populated by the worker
}; };
g.async(io, workerHashFile, .{ io, dir, hashed_file, &g }); wait_group.start();
try thread_pool.spawn(workerHashFile, .{ dir, hashed_file, &wait_group });
try all_files.append(allocator, hashed_file); try all_files.append(gpa, hashed_file);
} }
try g.await(io);
} }
std.mem.sort(*HashedFile, all_files.items, {}, HashedFile.lessThan); std.mem.sort(*HashedFile, all_files.items, {}, HashedFile.lessThan);
@ -156,26 +156,23 @@ pub fn computeDirectoryHash(
hasher.update(&hashed_file.hash); hasher.update(&hashed_file.hash);
} }
if (any_failures) return error.DirectoryHashUnavailable; if (any_failures) return error.DirectoryHashUnavailable;
if (options.needFileHashes) options.fileHashes = try all_files.toOwnedSlice(allocator); if (options.needFileHashes) options.fileHashes = try all_files.toOwnedSlice(gpa);
return hasher.finalResult(); return hasher.finalResult();
} }
fn workerHashFile(io: std.Io, dir: std.Io.Dir, hashed_file: *HashedFile, wg: *std.Io.Group) void { fn workerHashFile(dir: std.fs.Dir, hashed_file: *HashedFile, wg: *std.Thread.WaitGroup) void {
_ = wg; // assume here that 0.16.0 Io.Group no longer needs to be notified at the time of completion defer wg.finish();
hashed_file.failure = hashFileFallible(io, dir, hashed_file); hashed_file.failure = hashFileFallible(dir, hashed_file);
} }
fn hashFileFallible(io: std.Io, dir: std.Io.Dir, hashed_file: *HashedFile) HashedFile.Error!void { fn hashFileFallible(dir: std.fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
var buf: [8000]u8 = undefined; var buf: [8000]u8 = undefined;
var file = try dir.openFile(io, hashed_file.fs_path, .{}); var file = try dir.openFile(hashed_file.fs_path, .{});
defer file.close(io); defer file.close();
var hasher = Hash.init(.{}); var hasher = Hash.init(.{});
hasher.update(hashed_file.normalized_path); hasher.update(hashed_file.normalized_path);
hasher.update(&.{ 0, @intFromBool(try isExecutable(io, file)) }); hasher.update(&.{ 0, @intFromBool(try isExecutable(file)) });
while (true) { while (true) {
const bytes_read = file.readStreaming(io, &.{&buf}) catch |err| switch (err) { const bytes_read = try file.read(&buf);
error.EndOfStream => break,
else => return err,
};
if (bytes_read == 0) break; if (bytes_read == 0) break;
hasher.update(buf[0..bytes_read]); hasher.update(buf[0..bytes_read]);
} }
@ -200,7 +197,7 @@ fn normalizePath(arena: std.mem.Allocator, fs_path: []const u8) ![]const u8 {
return normalized; return normalized;
} }
fn isExecutable(io: std.Io, file: std.Io.File) !bool { fn isExecutable(file: std.fs.File) !bool {
if (builtin.os.tag == .windows) { if (builtin.os.tag == .windows) {
// TODO check the ACL on Windows. // TODO check the ACL on Windows.
// Until this is implemented, this could be a false negative on // Until this is implemented, this could be a false negative on
@ -208,7 +205,7 @@ fn isExecutable(io: std.Io, file: std.Io.File) !bool {
// when unpacking the tarball. // when unpacking the tarball.
return false; return false;
} else { } else {
const stat = try file.stat(io); const stat = try file.stat();
return stat.kind == .file and (stat.permissions.toMode() & std.posix.S.IXUSR != 0); return (stat.mode & std.posix.S.IXUSR) != 0;
} }
} }

View file

@ -25,21 +25,23 @@ const next_version = std.SemanticVersion.parse(next_version_str) catch unreachab
const zig_version = @import("builtin").zig_version; const zig_version = @import("builtin").zig_version;
const is_next = zig_version.order(next_version) == .eq or zig_version.order(next_version) == .gt; const is_next = zig_version.order(next_version) == .eq or zig_version.order(next_version) == .gt;
pub fn main(init: std.process.Init) anyerror!void { pub fn main() anyerror!void {
const io = init.io; const root_progress_node = std.Progress.start(.{});
const root_progress_node = std.Progress.start(io, .{});
defer root_progress_node.end(); defer root_progress_node.end();
const allocator = init.arena.allocator(); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
const args = try init.minimal.args.toSlice(allocator); const args = try std.process.argsAlloc(allocator);
var stdout_writer = std.Io.File.stdout().writer(io, &.{}); defer std.process.argsFree(allocator, args);
var stdout_writer = std.fs.File.stdout().writer(&.{});
const stdout = &stdout_writer.interface; const stdout = &stdout_writer.interface;
var output_dir = std.Io.Dir.cwd(); var output_dir = std.fs.cwd();
defer if (output_dir.handle > 0) output_dir.close(io); defer if (output_dir.fd > 0) output_dir.close();
var models_dir: ?std.Io.Dir = null; var models_dir: ?std.fs.Dir = null;
defer if (models_dir) |*m| m.close(io); defer if (models_dir) |*m| m.close();
for (args, 0..) |arg, i| { for (args, 0..) |arg, i| {
if (std.mem.eql(u8, "--help", arg) or if (std.mem.eql(u8, "--help", arg) or
std.mem.eql(u8, "-h", arg)) std.mem.eql(u8, "-h", arg))
@ -49,20 +51,17 @@ pub fn main(init: std.process.Init) anyerror!void {
try stdout.print(" --output specifies an output directory, otherwise the current working directory will be used\n", .{}); try stdout.print(" --output specifies an output directory, otherwise the current working directory will be used\n", .{});
std.process.exit(0); std.process.exit(0);
} }
if (std.mem.eql(u8, "--output", arg)) { if (std.mem.eql(u8, "--output", arg))
try output_dir.createDirPath(io, args[i + 1]); output_dir = try output_dir.makeOpenPath(args[i + 1], .{});
output_dir = try std.Io.Dir.cwd().openDir(io, args[i + 1], .{ .iterate = true });
}
if (std.mem.eql(u8, "--models", arg)) if (std.mem.eql(u8, "--models", arg))
models_dir = try std.Io.Dir.cwd().openDir(io, args[i + 1], .{ .iterate = true }); models_dir = try std.fs.cwd().openDir(args[i + 1], .{ .iterate = true });
} }
var files_processed: usize = 0; var manifest_file = try output_dir.createFile("service_manifest.zig", .{});
{ defer manifest_file.close();
var manifest_file = try output_dir.createFile(io, "service_manifest.zig", .{}); var manifest = manifest_file.writer(&manifest_buf).interface;
defer manifest_file.close(io);
var manifest = manifest_file.writer(io, &manifest_buf).interface;
defer manifest.flush() catch @panic("Could not flush service manifest"); defer manifest.flush() catch @panic("Could not flush service manifest");
var files_processed: usize = 0;
var skip_next = true; var skip_next = true;
for (args) |arg| { for (args) |arg| {
if (skip_next) { if (skip_next) {
@ -80,39 +79,30 @@ pub fn main(init: std.process.Init) anyerror!void {
skip_next = true; skip_next = true;
continue; continue;
} }
try processFile(io, arg, output_dir, &manifest); try processFile(arg, output_dir, &manifest);
files_processed += 1; files_processed += 1;
} }
}
if (files_processed == 0) { if (files_processed == 0) {
// no files specified, look for json files in models directory or cwd // no files specified, look for json files in models directory or cwd
// this is our normal mode of operation and where initial optimizations // this is our normal mode of operation and where initial optimizations
// can be made // can be made
if (models_dir) |m| { if (models_dir) |m| {
var cwd = try std.Io.Dir.cwd().openDir(io, ".", .{}); var cwd = try std.fs.cwd().openDir(".", .{});
defer cwd.close(io); defer cwd.close();
defer std.process.setCurrentDir(io, cwd) catch unreachable; defer cwd.setAsCwd() catch unreachable;
try std.process.setCurrentDir(io, m); try m.setAsCwd();
try processDirectories(io, m, output_dir, &root_progress_node); try processDirectories(m, output_dir, &root_progress_node);
} }
} }
if (args.len == 0) if (args.len == 0)
_ = try generateServices(allocator, io, ";", std.Io.File.stdin(), stdout); _ = try generateServices(allocator, ";", std.fs.File.stdin(), stdout);
if (verbose) { if (verbose) {
const output_path = try output_dir.realPathFileAlloc(io, ".", allocator); const output_path = try output_dir.realpathAlloc(allocator, ".");
// Build system suppresses stdout, we have to send this to stderr
std.debug.print("Output path: {s}\n", .{output_path}); std.debug.print("Output path: {s}\n", .{output_path});
std.debug.print(
\\Note: if this is run from within zig build, output from verbose mode will
\\ trigger zig to say 'failed command'. This program has succeeded,
\\ and the message from the build system will not effect actual processing
\\ of the build. It is simply indicative of the build runner detecting
\\ output
, .{});
} }
} }
@ -120,23 +110,25 @@ const OutputManifest = struct {
model_dir_hash_digest: [Hasher.hex_multihash_len]u8, model_dir_hash_digest: [Hasher.hex_multihash_len]u8,
output_dir_hash_digest: [Hasher.hex_multihash_len]u8, output_dir_hash_digest: [Hasher.hex_multihash_len]u8,
}; };
fn processDirectories(io: std.Io, models_dir: std.Io.Dir, output_dir: std.Io.Dir, parent_progress: *const std.Progress.Node) !void { fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir, parent_progress: *const std.Progress.Node) !void {
// Let's get ready to hash!! // Let's get ready to hash!!
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit(); defer arena.deinit();
const allocator = arena.allocator(); const allocator = arena.allocator();
var thread_pool: std.Thread.Pool = undefined;
try thread_pool.init(.{ .allocator = allocator });
defer thread_pool.deinit();
const count, var calculated_manifest = const count, var calculated_manifest =
try calculateDigests( try calculateDigests(
allocator,
io,
models_dir, models_dir,
output_dir, output_dir,
&thread_pool,
); );
const output_stored_manifest = if (is_next) const output_stored_manifest = if (is_next)
output_dir.readFileAlloc(io, "output_manifest.json", allocator, .unlimited) catch null output_dir.readFileAlloc("output_manifest.json", allocator, .unlimited) catch null
else else
output_dir.readFileAlloc(io, allocator, "output_manifest.json", std.math.maxInt(usize)) catch null; output_dir.readFileAlloc(allocator, "output_manifest.json", std.math.maxInt(usize)) catch null;
if (output_stored_manifest) |o| { if (output_stored_manifest) |o| {
// we have a stored manifest. Parse it and compare to our calculations // we have a stored manifest. Parse it and compare to our calculations
// we can leak as we're using an arena allocator // we can leak as we're using an arena allocator
@ -151,43 +143,43 @@ fn processDirectories(io: std.Io, models_dir: std.Io.Dir, output_dir: std.Io.Dir
} }
} }
// Do this in a brain dead fashion from here, no optimization // Do this in a brain dead fashion from here, no optimization
const manifest_file = try output_dir.createFile(io, "service_manifest.zig", .{}); const manifest_file = try output_dir.createFile("service_manifest.zig", .{});
defer manifest_file.close(io); defer manifest_file.close();
var manifest = manifest_file.writer(io, &manifest_buf); var manifest = manifest_file.writer(&manifest_buf);
defer manifest.interface.flush() catch @panic("Error flushing service_manifest.zig"); defer manifest.interface.flush() catch @panic("Error flushing service_manifest.zig");
var mi = models_dir.iterate(); var mi = models_dir.iterate();
const generating_models_progress = parent_progress.start("generating models", count); const generating_models_progress = parent_progress.start("generating models", count);
defer generating_models_progress.end(); defer generating_models_progress.end();
while (try mi.next(io)) |e| { while (try mi.next()) |e| {
if ((e.kind == .file or e.kind == .sym_link) and std.mem.endsWith(u8, e.name, ".json")) { if ((e.kind == .file or e.kind == .sym_link) and std.mem.endsWith(u8, e.name, ".json")) {
try processFile(io, e.name, output_dir, &manifest.interface); try processFile(e.name, output_dir, &manifest.interface);
generating_models_progress.completeOne(); generating_models_progress.completeOne();
} }
} }
// re-calculate so we can store the manifest // re-calculate so we can store the manifest
model_digest = calculated_manifest.model_dir_hash_digest; model_digest = calculated_manifest.model_dir_hash_digest;
_, calculated_manifest = try calculateDigests(allocator, io, models_dir, output_dir); _, calculated_manifest = try calculateDigests(models_dir, output_dir, &thread_pool);
const data = try std.fmt.allocPrint(allocator, "{f}", .{std.json.fmt(calculated_manifest, .{ .whitespace = .indent_2 })}); const data = try std.fmt.allocPrint(allocator, "{f}", .{std.json.fmt(calculated_manifest, .{ .whitespace = .indent_2 })});
try output_dir.writeFile(io, .{ .sub_path = "output_manifest.json", .data = data }); try output_dir.writeFile(.{ .sub_path = "output_manifest.json", .data = data });
} }
var model_digest: ?[Hasher.hex_multihash_len]u8 = null; var model_digest: ?[Hasher.hex_multihash_len]u8 = null;
fn calculateDigests(allocator: std.mem.Allocator, io: std.Io, models_dir: std.Io.Dir, output_dir: std.Io.Dir) !struct { usize, OutputManifest } { fn calculateDigests(models_dir: std.fs.Dir, output_dir: std.fs.Dir, thread_pool: *std.Thread.Pool) !struct { usize, OutputManifest } {
const Include = struct { const Include = struct {
threadlocal var count: usize = 0; threadlocal var count: usize = 0;
pub fn include(entry: std.Io.Dir.Walker.Entry) bool { pub fn include(entry: std.fs.Dir.Walker.Entry) bool {
const included = std.mem.endsWith(u8, entry.basename, ".json"); const included = std.mem.endsWith(u8, entry.basename, ".json");
if (included) count += 1; if (included) count += 1;
return included; return included;
} }
}; };
const model_hash = if (model_digest) |m| m[0..Hasher.digest_len].* else try Hasher.computeDirectoryHash(allocator, io, models_dir, @constCast(&Hasher.ComputeDirectoryOptions{ const model_hash = if (model_digest) |m| m[0..Hasher.digest_len].* else try Hasher.computeDirectoryHash(thread_pool, models_dir, @constCast(&Hasher.ComputeDirectoryOptions{
.isIncluded = Include.include, .isIncluded = Include.include,
.isExcluded = struct { .isExcluded = struct {
pub fn exclude(entry: std.Io.Dir.Walker.Entry) bool { pub fn exclude(entry: std.fs.Dir.Walker.Entry) bool {
_ = entry; _ = entry;
return false; return false;
} }
@ -196,18 +188,14 @@ fn calculateDigests(allocator: std.mem.Allocator, io: std.Io, models_dir: std.Io
})); }));
if (verbose) std.log.info("Model directory hash: {s}", .{model_digest orelse Hasher.hexDigest(model_hash)}); if (verbose) std.log.info("Model directory hash: {s}", .{model_digest orelse Hasher.hexDigest(model_hash)});
const output_hash = try Hasher.computeDirectoryHash(allocator, io, try output_dir.openDir( const output_hash = try Hasher.computeDirectoryHash(thread_pool, try output_dir.openDir(".", .{ .iterate = true }), @constCast(&Hasher.ComputeDirectoryOptions{
io,
".",
.{ .iterate = true },
), @constCast(&Hasher.ComputeDirectoryOptions{
.isIncluded = struct { .isIncluded = struct {
pub fn include(entry: std.Io.Dir.Walker.Entry) bool { pub fn include(entry: std.fs.Dir.Walker.Entry) bool {
return std.mem.endsWith(u8, entry.basename, ".zig"); return std.mem.endsWith(u8, entry.basename, ".zig");
} }
}.include, }.include,
.isExcluded = struct { .isExcluded = struct {
pub fn exclude(entry: std.Io.Dir.Walker.Entry) bool { pub fn exclude(entry: std.fs.Dir.Walker.Entry) bool {
_ = entry; _ = entry;
return false; return false;
} }
@ -216,14 +204,13 @@ fn calculateDigests(allocator: std.mem.Allocator, io: std.Io, models_dir: std.Io
})); }));
if (verbose) std.log.info("Output directory hash: {s}", .{Hasher.hexDigest(output_hash)}); if (verbose) std.log.info("Output directory hash: {s}", .{Hasher.hexDigest(output_hash)});
return .{ return .{
Include.count, Include.count, .{
.{
.model_dir_hash_digest = model_digest orelse Hasher.hexDigest(model_hash), .model_dir_hash_digest = model_digest orelse Hasher.hexDigest(model_hash),
.output_dir_hash_digest = Hasher.hexDigest(output_hash), .output_dir_hash_digest = Hasher.hexDigest(output_hash),
}, },
}; };
} }
fn processFile(io: std.Io, file_name: []const u8, output_dir: std.Io.Dir, manifest: *std.Io.Writer) !void { fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: *std.Io.Writer) !void {
// It's probably best to create our own allocator here so we can deint at the end and // It's probably best to create our own allocator here so we can deint at the end and
// toss all allocations related to the services in this file // toss all allocations related to the services in this file
// I can't guarantee we're not leaking something, and at the end of the // I can't guarantee we're not leaking something, and at the end of the
@ -241,6 +228,7 @@ fn processFile(io: std.Io, file_name: []const u8, output_dir: std.Io.Dir, manife
_ = try writer.write("const smithy = @import(\"smithy\");\n"); _ = try writer.write("const smithy = @import(\"smithy\");\n");
_ = try writer.write("const json = @import(\"json\");\n"); _ = try writer.write("const json = @import(\"json\");\n");
_ = try writer.write("const date = @import(\"date\");\n"); _ = try writer.write("const date = @import(\"date\");\n");
_ = try writer.write("const zeit = @import(\"zeit\");\n");
_ = try writer.write("\n"); _ = try writer.write("\n");
_ = try writer.write("const serializeMap = json.serializeMap;\n"); _ = try writer.write("const serializeMap = json.serializeMap;\n");
_ = try writer.write("\n"); _ = try writer.write("\n");
@ -249,7 +237,6 @@ fn processFile(io: std.Io, file_name: []const u8, output_dir: std.Io.Dir, manife
const service_names = generateServicesForFilePath( const service_names = generateServicesForFilePath(
allocator, allocator,
io,
";", ";",
file_name, file_name,
writer, writer,
@ -280,9 +267,9 @@ fn processFile(io: std.Io, file_name: []const u8, output_dir: std.Io.Dir, manife
const formatted = try zigFmt(allocator, unformatted); const formatted = try zigFmt(allocator, unformatted);
// Dump our buffer out to disk // Dump our buffer out to disk
var file = try output_dir.createFile(io, output_file_name, .{ .truncate = true }); var file = try output_dir.createFile(output_file_name, .{ .truncate = true });
defer file.close(io); defer file.close();
try file.writeStreamingAll(io, formatted); try file.writeAll(formatted);
for (service_names) |name| { for (service_names) |name| {
try manifest.print("pub const {s} = @import(\"{s}\");\n", .{ name, std.fs.path.basename(output_file_name) }); try manifest.print("pub const {s} = @import(\"{s}\");\n", .{ name, std.fs.path.basename(output_file_name) });
@ -301,14 +288,13 @@ fn zigFmt(allocator: std.mem.Allocator, buffer: [:0]const u8) ![]const u8 {
fn generateServicesForFilePath( fn generateServicesForFilePath(
allocator: std.mem.Allocator, allocator: std.mem.Allocator,
io: std.Io,
comptime terminator: []const u8, comptime terminator: []const u8,
path: []const u8, path: []const u8,
writer: *std.Io.Writer, writer: *std.Io.Writer,
) ![][]const u8 { ) ![][]const u8 {
const file = try std.Io.Dir.cwd().openFile(io, path, .{}); const file = try std.fs.cwd().openFile(path, .{});
defer file.close(io); defer file.close();
return try generateServices(allocator, io, terminator, file, writer); return try generateServices(allocator, terminator, file, writer);
} }
fn addReference(id: []const u8, map: *std.StringHashMap(u64)) !void { fn addReference(id: []const u8, map: *std.StringHashMap(u64)) !void {
@ -410,13 +396,12 @@ fn countReferences(
fn generateServices( fn generateServices(
allocator: std.mem.Allocator, allocator: std.mem.Allocator,
io: std.Io,
comptime _: []const u8, comptime _: []const u8,
file: std.Io.File, file: std.fs.File,
writer: *std.Io.Writer, writer: *std.Io.Writer,
) ![][]const u8 { ) ![][]const u8 {
var fbuf: [1024]u8 = undefined; var fbuf: [1024]u8 = undefined;
var freader = file.reader(io, &fbuf); var freader = file.reader(&fbuf);
var reader = &freader.interface; var reader = &freader.interface;
const json = try reader.allocRemaining(allocator, .limited(1024 * 1024 * 1024)); const json = try reader.allocRemaining(allocator, .limited(1024 * 1024 * 1024));
defer allocator.free(json); defer allocator.free(json);
@ -437,14 +422,14 @@ fn generateServices(
// a reference count in case there are recursive data structures // a reference count in case there are recursive data structures
var shape_references = std.StringHashMap(u64).init(allocator); var shape_references = std.StringHashMap(u64).init(allocator);
defer shape_references.deinit(); defer shape_references.deinit();
var stack: std.ArrayList([]const u8) = .empty; var stack: std.ArrayList([]const u8) = .{};
defer stack.deinit(allocator); defer stack.deinit(allocator);
for (services.items) |service| for (services.items) |service|
try countReferences(allocator, service, shapes, &shape_references, &stack); try countReferences(allocator, service, shapes, &shape_references, &stack);
var constant_names = try std.ArrayList([]const u8).initCapacity(allocator, services.items.len); var constant_names = try std.ArrayList([]const u8).initCapacity(allocator, services.items.len);
defer constant_names.deinit(allocator); defer constant_names.deinit(allocator);
var unresolved: std.ArrayList(smithy.ShapeInfo) = .empty; var unresolved: std.ArrayList(smithy.ShapeInfo) = .{};
defer unresolved.deinit(allocator); defer unresolved.deinit(allocator);
var generated = std.StringHashMap(void).init(allocator); var generated = std.StringHashMap(void).init(allocator);
defer generated.deinit(); defer generated.deinit();
@ -534,7 +519,7 @@ fn generateAdditionalTypes(allocator: std.mem.Allocator, file_state: FileGenerat
while (file_state.additional_types_to_generate.pop()) |t| { while (file_state.additional_types_to_generate.pop()) |t| {
if (file_state.additional_types_generated.getEntry(t.name) != null) continue; if (file_state.additional_types_generated.getEntry(t.name) != null) continue;
// std.log.info("\t\t{s}", .{t.name}); // std.log.info("\t\t{s}", .{t.name});
var type_stack: std.ArrayList(*const smithy.ShapeInfo) = .empty; var type_stack: std.ArrayList(*const smithy.ShapeInfo) = .{};
defer type_stack.deinit(allocator); defer type_stack.deinit(allocator);
const state = GenerationState{ const state = GenerationState{
.type_stack = &type_stack, .type_stack = &type_stack,
@ -585,7 +570,7 @@ fn generateOperation(allocator: std.mem.Allocator, operation: smithy.ShapeInfo,
const snake_case_name = try support.constantName(allocator, operation.name, .snake); const snake_case_name = try support.constantName(allocator, operation.name, .snake);
defer allocator.free(snake_case_name); defer allocator.free(snake_case_name);
var type_stack: std.ArrayList(*const smithy.ShapeInfo) = .empty; var type_stack: std.ArrayList(*const smithy.ShapeInfo) = .{};
defer type_stack.deinit(allocator); defer type_stack.deinit(allocator);
const state = GenerationState{ const state = GenerationState{
.type_stack = &type_stack, .type_stack = &type_stack,
@ -593,7 +578,8 @@ fn generateOperation(allocator: std.mem.Allocator, operation: smithy.ShapeInfo,
.allocator = allocator, .allocator = allocator,
.indent_level = 1, .indent_level = 1,
}; };
const child_state = state.indent(); var child_state = state;
child_state.indent_level += 1;
// indent should start at 4 spaces here // indent should start at 4 spaces here
const operation_name = avoidReserved(snake_case_name); const operation_name = avoidReserved(snake_case_name);
@ -694,17 +680,17 @@ fn generateMetadataFunction(operation_name: []const u8, state: GenerationState,
// } // }
// We want to add a short "get my parents" function into the response // We want to add a short "get my parents" function into the response
var child_state = state; var child_state = state;
child_state = child_state.indent(); child_state.indent_level += 1;
try outputIndent(child_state, writer); try outputIndent(child_state, writer);
_ = try writer.write("pub fn metaInfo() struct { "); _ = try writer.write("pub fn metaInfo() struct { ");
try writer.print("service_metadata: @TypeOf(service_metadata), action: @TypeOf({s})", .{operation_name}); try writer.print("service_metadata: @TypeOf(service_metadata), action: @TypeOf({s})", .{operation_name});
_ = try writer.write(" } {\n"); _ = try writer.write(" } {\n");
child_state = child_state.indent(); child_state.indent_level += 1;
try outputIndent(child_state, writer); try outputIndent(child_state, writer);
_ = try writer.write("return .{ .service_metadata = service_metadata, "); _ = try writer.write("return .{ .service_metadata = service_metadata, ");
try writer.print(".action = {s}", .{operation_name}); try writer.print(".action = {s}", .{operation_name});
_ = try writer.write(" };\n"); _ = try writer.write(" };\n");
child_state = child_state.deindent(); child_state.indent_level -= 1;
try outputIndent(child_state, writer); try outputIndent(child_state, writer);
_ = try writer.write("}\n"); _ = try writer.write("}\n");
try outputIndent(state, writer); try outputIndent(state, writer);
@ -880,7 +866,8 @@ fn generateMapTypeFor(map: anytype, writer: *std.Io.Writer, state: GenerationSta
try writer.writeAll("pub const is_map_type = true;\n\n"); try writer.writeAll("pub const is_map_type = true;\n\n");
const child_state = state.indent(); var child_state = state;
child_state.indent_level += 1;
_ = try writer.write("key: "); _ = try writer.write("key: ");
_ = try generateTypeFor(map.key, writer, child_state, options.endStructure(true)); _ = try generateTypeFor(map.key, writer, child_state, options.endStructure(true));
@ -929,7 +916,8 @@ fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, ty
// prolog. We'll rely on caller to get the spacing correct here // prolog. We'll rely on caller to get the spacing correct here
_ = try writer.write(type_type_name); _ = try writer.write(type_type_name);
_ = try writer.write(" {\n"); _ = try writer.write(" {\n");
const child_state = state.indent(); var child_state = state;
child_state.indent_level += 1;
var payload: ?[]const u8 = null; var payload: ?[]const u8 = null;
for (members) |member| { for (members) |member| {
// This is our mapping // This is our mapping
@ -1016,7 +1004,8 @@ fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, ty
try writer.writeByte('\n'); try writer.writeByte('\n');
try outputIndent(child_state, writer); try outputIndent(child_state, writer);
_ = try writer.write("pub fn fieldNameFor(_: @This(), comptime field_name: []const u8) []const u8 {\n"); _ = try writer.write("pub fn fieldNameFor(_: @This(), comptime field_name: []const u8) []const u8 {\n");
const grandchild_state = child_state.indent(); var grandchild_state = child_state;
grandchild_state.indent_level += 1;
// We need to force output here becaseu we're referencing the field in the return statement below // We need to force output here becaseu we're referencing the field in the return statement below
try writeMappings(grandchild_state, "", "mappings", field_name_mappings, true, writer); try writeMappings(grandchild_state, "", "mappings", field_name_mappings, true, writer);
try outputIndent(grandchild_state, writer); try outputIndent(grandchild_state, writer);
@ -1042,7 +1031,8 @@ fn writeMappings(
} }
try writer.print("{s}const {s} = .", .{ @"pub", mapping_name }); try writer.print("{s}const {s} = .", .{ @"pub", mapping_name });
_ = try writer.write("{\n"); _ = try writer.write("{\n");
const child_state = state.indent(); var child_state = state;
child_state.indent_level += 1;
for (mappings.items) |mapping| { for (mappings.items) |mapping| {
try outputIndent(child_state, writer); try outputIndent(child_state, writer);
try writer.print(".{s} = \"{s}\",\n", .{ avoidReserved(mapping.snake), mapping.original }); try writer.print(".{s} = \"{s}\",\n", .{ avoidReserved(mapping.snake), mapping.original });

View file

@ -87,7 +87,7 @@ fn getJsonMembers(allocator: Allocator, shape: Shape, state: GenerationState) !?
return null; return null;
} }
var json_members: std.ArrayListUnmanaged(JsonMember) = .empty; var json_members = std.ArrayListUnmanaged(JsonMember){};
var iter = hash_map.iterator(); var iter = hash_map.iterator();
while (iter.next()) |kvp| { while (iter.next()) |kvp| {

View file

@ -6,8 +6,8 @@
.dependencies = .{ .dependencies = .{
.aws = .{ .aws = .{
.url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/2fddf4f122198ba64fbb2320e702b317b0b86837/2fddf4f122198ba64fbb2320e702b317b0b86837-with-models.tar.gz", .url = "git+https://git.lerch.org/lobo/aws-sdk-for-zig.git?ref=master#efdef66fdbb2500d33a79a0b8d1855dd1bb20d56",
.hash = "aws-0.0.1-SbsFcLhoAwQ5TcclMwXhIljwW0Zz_Kcjd4yrIeQq5uHt", .hash = "aws-0.0.1-SbsFcLgtCgAndtGhoOyzQfmFtUux4tadFZv0tC6TAnL8",
}, },
}, },
} }

View file

@ -11,11 +11,12 @@ pub const std_options: std.Options = .{
}, },
}; };
pub fn main(init: std.process.Init) anyerror!void { pub fn main() anyerror!void {
const allocator = init.gpa; var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const io = init.io; defer _ = gpa.deinit();
const allocator = gpa.allocator();
var stdout_buffer: [1024]u8 = undefined; var stdout_buffer: [1024]u8 = undefined;
var stdout_raw = std.Io.File.stdout().writer(io, &stdout_buffer); var stdout_raw = std.fs.File.stdout().writer(&stdout_buffer);
const stdout = &stdout_raw.interface; const stdout = &stdout_raw.interface;
defer stdout.flush() catch unreachable; defer stdout.flush() catch unreachable;
@ -27,7 +28,7 @@ pub fn main(init: std.process.Init) anyerror!void {
// }; // };
// //
// var client = aws.Client.init(allocator, .{ .proxy = proxy }); // var client = aws.Client.init(allocator, .{ .proxy = proxy });
var client = aws.Client.init(allocator, .{ .io = io, .map = init.environ_map }); var client = aws.Client.init(allocator, .{});
defer client.deinit(); defer client.deinit();
const options = aws.Options{ const options = aws.Options{

View file

@ -5,8 +5,8 @@
.minimum_zig_version = "0.14.0", .minimum_zig_version = "0.14.0",
.dependencies = .{ .dependencies = .{
.zeit = .{ .zeit = .{
.url = "git+https://github.com/rockorager/zeit#2a79678e05e4e82cd4efd4fd6b754dcf029c3a64", .url = "git+https://github.com/rockorager/zeit?ref=zig-0.15#ed2ca60db118414bda2b12df2039e33bad3b0b88",
.hash = "zeit-0.6.0-5I6bk7q6AgBdMJxze3D4l9ylQhkviQ_BX9FigDt13MFn", .hash = "zeit-0.6.0-5I6bk0J9AgCVa0nnyL0lNY9Xa9F68hHq-ZarhuXNV-Jb",
}, },
.json = .{ .json = .{
.path = "../json", .path = "../json",

View file

@ -1,7 +1,6 @@
const std = @import("std"); const std = @import("std");
const log = std.log.scoped(.date); const log = std.log.scoped(.date);
const zeit = @import("zeit"); const zeit = @import("zeit");
const instantWithoutIo = @import("timestamp.zig").instantWithoutIo;
pub const DateTime = struct { pub const DateTime = struct {
day: u8, day: u8,
@ -38,13 +37,12 @@ pub const DateTime = struct {
} }
pub fn instant(self: DateTime) !zeit.Instant { pub fn instant(self: DateTime) !zeit.Instant {
return try instantWithoutIo(.{ .source = .{ .time = self.time() } }); return try zeit.instant(.{ .source = .{ .time = self.time() } });
} }
}; };
pub fn timestampToDateTime(timestamp: i64) DateTime { pub fn timestampToDateTime(timestamp: zeit.Seconds) DateTime {
// zeit.Seconds is i64, so this should be identical const ins = zeit.instant(.{ .source = .{ .unix_timestamp = timestamp } }) catch @panic("Failed to create instant from timestamp");
const ins = instantWithoutIo(.{ .source = .{ .unix_timestamp = timestamp } }) catch @panic("Failed to create instant from timestamp");
return DateTime.fromInstant(ins); return DateTime.fromInstant(ins);
} }
@ -55,7 +53,7 @@ pub fn parseEnglishToTimestamp(data: []const u8) !i64 {
/// Converts a string to a timestamp value. May not handle dates before the /// Converts a string to a timestamp value. May not handle dates before the
/// epoch. Dates should look like "Fri, 03 Jun 2022 18:12:36 GMT" /// epoch. Dates should look like "Fri, 03 Jun 2022 18:12:36 GMT"
pub fn parseEnglishToDateTime(data: []const u8) !DateTime { pub fn parseEnglishToDateTime(data: []const u8) !DateTime {
const ins = try instantWithoutIo(.{ .source = .{ .rfc1123 = data } }); const ins = try zeit.instant(.{ .source = .{ .rfc1123 = data } });
return DateTime.fromInstant(ins); return DateTime.fromInstant(ins);
} }
@ -66,7 +64,7 @@ pub fn parseIso8601ToTimestamp(data: []const u8) !i64 {
/// Converts a string to a timestamp value. May not handle dates before the /// Converts a string to a timestamp value. May not handle dates before the
/// epoch /// epoch
pub fn parseIso8601ToDateTime(data: []const u8) !DateTime { pub fn parseIso8601ToDateTime(data: []const u8) !DateTime {
const ins = try instantWithoutIo(.{ .source = .{ .iso8601 = data } }); const ins = try zeit.instant(.{ .source = .{ .iso8601 = data } });
return DateTime.fromInstant(ins); return DateTime.fromInstant(ins);
} }
@ -85,10 +83,8 @@ fn printDateTime(dt: DateTime) void {
}); });
} }
pub fn printNowUtc(io: std.Io) void { pub fn printNowUtc() void {
const now = std.Io.Clock.Timestamp.now(io, .awake); printDateTime(timestampToDateTime(std.time.timestamp()));
const timestamp = @as(i64, @intCast(@divFloor(now.raw.nanoseconds, std.time.ns_per_s)));
printDateTime(timestampToDateTime(timestamp));
} }
test "Convert timestamp to datetime" { test "Convert timestamp to datetime" {

View file

@ -10,7 +10,7 @@ pub const Timestamp = enum(zeit.Nanoseconds) {
_, _,
pub fn jsonStringify(value: Timestamp, jw: anytype) !void { pub fn jsonStringify(value: Timestamp, jw: anytype) !void {
const instant = instantWithoutIo(.{ const instant = zeit.instant(.{
.source = .{ .source = .{
.unix_nano = @intFromEnum(value), .unix_nano = @intFromEnum(value),
}, },
@ -34,7 +34,7 @@ pub const Timestamp = enum(zeit.Nanoseconds) {
} }
}; };
const ins = try instantWithoutIo(.{ const ins = try zeit.instant(.{
.source = switch (date_format) { .source = switch (date_format) {
DateFormat.iso8601 => .{ DateFormat.iso8601 => .{
.iso8601 = val, .iso8601 = val,
@ -49,36 +49,6 @@ pub const Timestamp = enum(zeit.Nanoseconds) {
} }
}; };
/// create a new Instant
pub fn instantWithoutIo(cfg: zeit.Instant.Config) !zeit.Instant {
const ts: zeit.Nanoseconds = switch (cfg.source) {
.now => return error.UseZeitInstantWithIoForNowInstants,
.unix_timestamp => |unix| @as(i128, unix) * std.time.ns_per_s,
.unix_nano => |nano| nano,
.time => |time| time.instant().timestamp,
.iso8601,
.rfc3339,
=> |iso| blk: {
const t = try zeit.Time.fromISO8601(iso);
break :blk t.instant().timestamp;
},
.rfc2822,
.rfc5322,
=> |eml| blk: {
const t = try zeit.Time.fromRFC5322(eml);
break :blk t.instant().timestamp;
},
.rfc1123 => |http_date| blk: {
const t = try zeit.Time.fromRFC1123(http_date);
break :blk t.instant().timestamp;
},
};
return .{
.timestamp = ts,
.timezone = cfg.timezone,
};
}
test Timestamp { test Timestamp {
const in_date = "Wed, 23 Apr 2025 11:23:45 GMT"; const in_date = "Wed, 23 Apr 2025 11:23:45 GMT";

View file

@ -1346,7 +1346,7 @@ test "json.validate" {
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator; const ArenaAllocator = std.heap.ArenaAllocator;
const ArrayList = std.ArrayList; const ArrayList = std.ArrayList;
const StringArrayHashMap = std.array_hash_map.String; const StringArrayHashMap = std.StringArrayHashMap;
pub const ValueTree = struct { pub const ValueTree = struct {
arena: ArenaAllocator, arena: ArenaAllocator,
@ -1580,11 +1580,11 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
if (!numberToken.is_integer) { if (!numberToken.is_integer) {
// probably is in scientific notation // probably is in scientific notation
const n = try std.fmt.parseFloat(f128, numberToken.slice(tokens.slice, tokens.i - 1)); const n = try std.fmt.parseFloat(f128, numberToken.slice(tokens.slice, tokens.i - 1));
return std.enums.fromInt(T, @as(i128, @intFromFloat(n))) orelse error.InvalidEnumTag; return try std.meta.intToEnum(T, @as(i128, @intFromFloat(n)));
} }
const n = try std.fmt.parseInt(enumInfo.tag_type, numberToken.slice(tokens.slice, tokens.i - 1), 10); const n = try std.fmt.parseInt(enumInfo.tag_type, numberToken.slice(tokens.slice, tokens.i - 1), 10);
return std.enums.fromInt(T, n) orelse error.InvalidEnumTag; return try std.meta.intToEnum(T, n);
}, },
.String => |stringToken| { .String => |stringToken| {
const source_slice = stringToken.slice(tokens.slice, tokens.i - 1); const source_slice = stringToken.slice(tokens.slice, tokens.i - 1);
@ -1772,7 +1772,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
.slice => { .slice => {
switch (token) { switch (token) {
.ArrayBegin => { .ArrayBegin => {
var arraylist = std.ArrayList(ptrInfo.child).empty; var arraylist = std.ArrayList(ptrInfo.child){};
errdefer { errdefer {
while (arraylist.pop()) |v| { while (arraylist.pop()) |v| {
parseFree(ptrInfo.child, v, options); parseFree(ptrInfo.child, v, options);
@ -1817,7 +1817,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
if (key_type == null) return error.UnexpectedToken; if (key_type == null) return error.UnexpectedToken;
const value_type = typeForField(ptrInfo.child, "value"); const value_type = typeForField(ptrInfo.child, "value");
if (value_type == null) return error.UnexpectedToken; if (value_type == null) return error.UnexpectedToken;
var arraylist = std.ArrayList(ptrInfo.child).empty; var arraylist = std.ArrayList(ptrInfo.child){};
errdefer { errdefer {
while (arraylist.pop()) |v| { while (arraylist.pop()) |v| {
parseFree(ptrInfo.child, v, options); parseFree(ptrInfo.child, v, options);

View file

@ -4,6 +4,7 @@ const std = @import("std");
const case = @import("case"); const case = @import("case");
const date = @import("date"); const date = @import("date");
const json = @import("json"); const json = @import("json");
const zeit = @import("zeit");
const credentials = @import("aws_credentials.zig"); const credentials = @import("aws_credentials.zig");
const awshttp = @import("aws_http.zig"); const awshttp = @import("aws_http.zig");
@ -113,8 +114,6 @@ pub const Services = servicemodel.Services;
pub const ClientOptions = struct { pub const ClientOptions = struct {
proxy: ?std.http.Client.Proxy = null, proxy: ?std.http.Client.Proxy = null,
io: std.Io,
map: *const std.process.Environ.Map,
}; };
pub const Client = struct { pub const Client = struct {
allocator: std.mem.Allocator, allocator: std.mem.Allocator,
@ -125,7 +124,7 @@ pub const Client = struct {
pub fn init(allocator: std.mem.Allocator, options: ClientOptions) Self { pub fn init(allocator: std.mem.Allocator, options: ClientOptions) Self {
return Self{ return Self{
.allocator = allocator, .allocator = allocator,
.aws_http = awshttp.AwsHttp.init(allocator, options.io, options.map, options.proxy), .aws_http = awshttp.AwsHttp.init(allocator, options.proxy),
}; };
} }
pub fn deinit(self: *Client) void { pub fn deinit(self: *Client) void {
@ -196,7 +195,7 @@ pub fn Request(comptime request_action: anytype) type {
log.debug("Rest method: '{s}'", .{aws_request.method}); log.debug("Rest method: '{s}'", .{aws_request.method});
log.debug("Rest success code: '{d}'", .{Action.http_config.success_code}); log.debug("Rest success code: '{d}'", .{Action.http_config.success_code});
log.debug("Rest raw uri: '{s}'", .{Action.http_config.uri}); log.debug("Rest raw uri: '{s}'", .{Action.http_config.uri});
var al = std.ArrayList([]const u8).empty; var al = std.ArrayList([]const u8){};
defer al.deinit(options.client.allocator); defer al.deinit(options.client.allocator);
aws_request.path = try buildPath( aws_request.path = try buildPath(
options.client.allocator, options.client.allocator,
@ -481,7 +480,9 @@ pub fn Request(comptime request_action: anytype) type {
) catch |e| { ) catch |e| {
log.err("Could not set header value: Response header {s}. Field {s}. Value {s}", .{ header.name, f.?.name, header.value }); log.err("Could not set header value: Response header {s}. Field {s}. Value {s}", .{ header.name, f.?.name, header.value });
log.err("Error: {}", .{e}); log.err("Error: {}", .{e});
std.debug.dumpCurrentStackTrace(.{}); if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
}; };
break; break;
@ -1081,37 +1082,45 @@ fn ServerResponse(comptime action: anytype) type {
const ResponseMetadata = struct { const ResponseMetadata = struct {
RequestId: []u8, RequestId: []u8,
}; };
const Result = @Struct( const Result = @Type(.{
.auto, .@"struct" = .{
null, .layout = .auto,
&[_][]const u8{ action.action_name ++ "Result", "ResponseMetadata" }, .fields = &[_]std.builtin.Type.StructField{
&[_]type{ T, ResponseMetadata },
&[_]std.builtin.Type.StructField.Attributes{
.{ .{
.name = action.action_name ++ "Result",
.type = T,
.default_value_ptr = null, .default_value_ptr = null,
.@"comptime" = false, .is_comptime = false,
.@"align" = std.meta.alignment(T), .alignment = std.meta.alignment(T),
}, },
.{ .{
.name = "ResponseMetadata",
.type = ResponseMetadata,
.default_value_ptr = null, .default_value_ptr = null,
.@"comptime" = false, .is_comptime = false,
.@"align" = std.meta.alignment(ResponseMetadata), .alignment = std.meta.alignment(ResponseMetadata),
}, },
}, },
); .decls = &[_]std.builtin.Type.Declaration{},
return @Struct( .is_tuple = false,
.auto, },
null, });
&[_][]const u8{action.action_name ++ "Response"}, return @Type(.{
&[_]type{Result}, .@"struct" = .{
&[_]std.builtin.Type.StructField.Attributes{ .layout = .auto,
.fields = &[_]std.builtin.Type.StructField{
.{ .{
.name = action.action_name ++ "Response",
.type = Result,
.default_value_ptr = null, .default_value_ptr = null,
.@"comptime" = false, .is_comptime = false,
.@"align" = std.meta.alignment(Result), .alignment = std.meta.alignment(Result),
}, },
}, },
); .decls = &[_]std.builtin.Type.Declaration{},
.is_tuple = false,
},
});
} }
fn FullResponse(comptime action: anytype) type { fn FullResponse(comptime action: anytype) type {
return struct { return struct {
@ -1432,7 +1441,6 @@ fn reportTraffic(
test { test {
_ = @import("aws_test.zig"); _ = @import("aws_test.zig");
_ = @import("servicemodel.zig");
} }
// buildQuery/buildPath tests, which are here as they are a) generic and b) private // buildQuery/buildPath tests, which are here as they are a) generic and b) private
@ -1472,7 +1480,7 @@ test "REST Json v1 serializes lists in queries" {
} }
test "REST Json v1 buildpath substitutes" { test "REST Json v1 buildpath substitutes" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
var al = std.ArrayList([]const u8).empty; var al = std.ArrayList([]const u8){};
defer al.deinit(allocator); defer al.deinit(allocator);
const svs = Services(.{.lambda}){}; const svs = Services(.{.lambda}){};
const request = svs.lambda.list_functions.Request{ const request = svs.lambda.list_functions.Request{
@ -1485,7 +1493,7 @@ test "REST Json v1 buildpath substitutes" {
} }
test "REST Json v1 buildpath handles restricted characters" { test "REST Json v1 buildpath handles restricted characters" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
var al = std.ArrayList([]const u8).empty; var al = std.ArrayList([]const u8){};
defer al.deinit(allocator); defer al.deinit(allocator);
const svs = Services(.{.lambda}){}; const svs = Services(.{.lambda}){};
const request = svs.lambda.list_functions.Request{ const request = svs.lambda.list_functions.Request{

View file

@ -82,18 +82,18 @@ pub const Options = struct {
pub var static_credentials: ?auth.Credentials = null; pub var static_credentials: ?auth.Credentials = null;
pub fn getCredentials(allocator: std.mem.Allocator, map: *const std.process.Environ.Map, io: std.Io, options: Options) !auth.Credentials { pub fn getCredentials(allocator: std.mem.Allocator, options: Options) !auth.Credentials {
if (static_credentials) |c| return c; if (static_credentials) |c| return c;
if (options.profile.prefer_profile_from_file) { if (options.profile.prefer_profile_from_file) {
log.debug( log.debug(
"Command line profile specified. Checking credentials file first. Profile name {s}", "Command line profile specified. Checking credentials file first. Profile name {s}",
.{options.profile.profile_name orelse "default"}, .{options.profile.profile_name orelse "default"},
); );
if (try getProfileCredentials(allocator, io, map, options.profile)) |cred| return cred; if (try getProfileCredentials(allocator, options.profile)) |cred| return cred;
// Profile not found. We'll mirror the cli here and bail early // Profile not found. We'll mirror the cli here and bail early
return error.CredentialsNotFound; return error.CredentialsNotFound;
} }
if (try getEnvironmentCredentials(allocator, map)) |cred| { if (try getEnvironmentCredentials(allocator)) |cred| {
log.debug("Found credentials in environment. Access key: {s}", .{cred.access_key}); log.debug("Found credentials in environment. Access key: {s}", .{cred.access_key});
return cred; return cred;
} }
@ -101,31 +101,32 @@ pub fn getCredentials(allocator: std.mem.Allocator, map: *const std.process.Envi
// GetWebIdentity is not currently implemented. The rest are tested and gtg // GetWebIdentity is not currently implemented. The rest are tested and gtg
// Note: Lambda just sets environment variables // Note: Lambda just sets environment variables
if (try getWebIdentityToken(allocator)) |cred| return cred; if (try getWebIdentityToken(allocator)) |cred| return cred;
if (try getProfileCredentials(allocator, io, map, options.profile)) |cred| return cred; if (try getProfileCredentials(allocator, options.profile)) |cred| return cred;
if (try getContainerCredentials(allocator, io, map)) |cred| return cred; if (try getContainerCredentials(allocator)) |cred| return cred;
// I don't think we need v1 at all? // I don't think we need v1 at all?
if (try getImdsv2Credentials(allocator, io)) |cred| return cred; if (try getImdsv2Credentials(allocator)) |cred| return cred;
return error.CredentialsNotFound; return error.CredentialsNotFound;
} }
fn getEnvironmentCredentials(allocator: std.mem.Allocator, map: *const std.process.Environ.Map) !?auth.Credentials { fn getEnvironmentCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
const secret_key = getEnvironmentVariable(map, "AWS_SECRET_ACCESS_KEY") orelse return null; const secret_key = (try getEnvironmentVariable(allocator, "AWS_SECRET_ACCESS_KEY")) orelse return null;
const access_key = getEnvironmentVariable(map, "AWS_ACCESS_KEY_ID") orelse return null; defer allocator.free(secret_key); //yes, we're not zeroing. But then, the secret key is in an environment var anyway
const token = getEnvironmentVariable(map, "AWS_SESSION_TOKEN") orelse
getEnvironmentVariable(map, "AWS_SECURITY_TOKEN"); // Security token is backward compat only
// Use cross-platform API (requires allocation) // Use cross-platform API (requires allocation)
return auth.Credentials.init( return auth.Credentials.init(
allocator, allocator,
try allocator.dupe(u8, access_key), (try getEnvironmentVariable(allocator, "AWS_ACCESS_KEY_ID")) orelse return null,
try allocator.dupe(u8, secret_key), try allocator.dupe(u8, secret_key),
if (token) |t| try allocator.dupe(u8, t) else null, (try getEnvironmentVariable(allocator, "AWS_SESSION_TOKEN")) orelse
try getEnvironmentVariable(allocator, "AWS_SECURITY_TOKEN"), // Security token is backward compat only
); );
} }
fn getEnvironmentVariable(map: *const std.process.Environ.Map, key: []const u8) ?[]const u8 { fn getEnvironmentVariable(allocator: std.mem.Allocator, key: []const u8) !?[]const u8 {
if (!map.contains(key)) return null; return std.process.getEnvVarOwned(allocator, key) catch |e| switch (e) {
return map.get(key); std.process.GetEnvVarOwnedError.EnvironmentVariableNotFound => return null,
else => return e,
};
} }
fn getWebIdentityToken(allocator: std.mem.Allocator) !?auth.Credentials { fn getWebIdentityToken(allocator: std.mem.Allocator) !?auth.Credentials {
@ -138,7 +139,7 @@ fn getWebIdentityToken(allocator: std.mem.Allocator) !?auth.Credentials {
// TODO: implement // TODO: implement
return null; return null;
} }
fn getContainerCredentials(allocator: std.mem.Allocator, io: std.Io, map: *const std.process.Environ.Map) !?auth.Credentials { fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
// A note on testing: The best way I have found to test this process is // A note on testing: The best way I have found to test this process is
// the following. Setup an ECS Fargate cluster and create a task definition // the following. Setup an ECS Fargate cluster and create a task definition
// with the command ["/bin/bash","-c","while true; do sleep 10; done"]. // with the command ["/bin/bash","-c","while true; do sleep 10; done"].
@ -179,11 +180,12 @@ fn getContainerCredentials(allocator: std.mem.Allocator, io: std.Io, map: *const
// //
// Compile code, copy to S3, install AWS CLI within the session, download // Compile code, copy to S3, install AWS CLI within the session, download
// from s3 and run // from s3 and run
const container_relative_uri = getEnvironmentVariable(map, "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") orelse return null; const container_relative_uri = (try getEnvironmentVariable(allocator, "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")) orelse return null;
defer allocator.free(container_relative_uri);
const container_uri = try std.fmt.allocPrint(allocator, "http://169.254.170.2{s}", .{container_relative_uri}); const container_uri = try std.fmt.allocPrint(allocator, "http://169.254.170.2{s}", .{container_relative_uri});
defer allocator.free(container_uri); defer allocator.free(container_uri);
var cl = std.http.Client{ .allocator = allocator, .io = io }; var cl = std.http.Client{ .allocator = allocator };
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
var aw: std.Io.Writer.Allocating = .init(allocator); var aw: std.Io.Writer.Allocating = .init(allocator);
defer aw.deinit(); defer aw.deinit();
@ -212,7 +214,9 @@ fn getContainerCredentials(allocator: std.mem.Allocator, io: std.Io, map: *const
const res = std.json.parseFromSlice(CredsResponse, allocator, aw.written(), .{}) catch |e| { const res = std.json.parseFromSlice(CredsResponse, allocator, aw.written(), .{}) catch |e| {
log.err("Unexpected Json response from container credentials endpoint: {s}", .{aw.written()}); log.err("Unexpected Json response from container credentials endpoint: {s}", .{aw.written()});
log.err("Error parsing json: {}", .{e}); log.err("Error parsing json: {}", .{e});
std.debug.dumpCurrentStackTrace(.{}); if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
return null; return null;
}; };
@ -228,10 +232,10 @@ fn getContainerCredentials(allocator: std.mem.Allocator, io: std.Io, map: *const
); );
} }
fn getImdsv2Credentials(allocator: std.mem.Allocator, io: std.Io) !?auth.Credentials { fn getImdsv2Credentials(allocator: std.mem.Allocator) !?auth.Credentials {
var token: ?[]u8 = null; var token: ?[]u8 = null;
defer if (token) |t| allocator.free(t); defer if (token) |t| allocator.free(t);
var cl = std.http.Client{ .allocator = allocator, .io = io }; var cl = std.http.Client{ .allocator = allocator };
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
// Get token // Get token
{ {
@ -308,7 +312,9 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, aw.written(), .{}) catch |e| { const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, aw.written(), .{}) catch |e| {
log.err("Unexpected Json response from IMDS endpoint: {s}", .{aw.written()}); log.err("Unexpected Json response from IMDS endpoint: {s}", .{aw.written()});
log.err("Error parsing json: {}", .{e}); log.err("Error parsing json: {}", .{e});
std.debug.dumpCurrentStackTrace(.{}); if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
return null; return null;
}; };
defer imds_response.deinit(); defer imds_response.deinit();
@ -361,7 +367,10 @@ fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, ro
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, aw.written(), .{}) catch |e| { const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, aw.written(), .{}) catch |e| {
log.err("Unexpected Json response from IMDS endpoint: {s}", .{aw.written()}); log.err("Unexpected Json response from IMDS endpoint: {s}", .{aw.written()});
log.err("Error parsing json: {}", .{e}); log.err("Error parsing json: {}", .{e});
std.debug.dumpCurrentStackTrace(.{}); if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
return null; return null;
}; };
defer imds_response.deinit(); defer imds_response.deinit();
@ -388,13 +397,12 @@ fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, ro
} }
fn getProfileCredentials(allocator: std.mem.Allocator, io: std.Io, map: *const std.process.Environ.Map, options: Profile) !?auth.Credentials { fn getProfileCredentials(allocator: std.mem.Allocator, options: Profile) !?auth.Credentials {
var default_path: ?[]const u8 = null; var default_path: ?[]const u8 = null;
defer if (default_path) |p| allocator.free(p); defer if (default_path) |p| allocator.free(p);
const creds_file_path = try filePath( const creds_file_path = try filePath(
allocator, allocator,
map,
options.credential_file, options.credential_file,
"AWS_SHARED_CREDENTIALS_FILE", "AWS_SHARED_CREDENTIALS_FILE",
default_path, default_path,
@ -404,7 +412,6 @@ fn getProfileCredentials(allocator: std.mem.Allocator, io: std.Io, map: *const s
default_path = default_path orelse creds_file_path.home; default_path = default_path orelse creds_file_path.home;
const config_file_path = try filePath( const config_file_path = try filePath(
allocator, allocator,
map,
options.config_file, options.config_file,
"AWS_CONFIG_FILE", "AWS_CONFIG_FILE",
default_path, default_path,
@ -414,22 +421,22 @@ fn getProfileCredentials(allocator: std.mem.Allocator, io: std.Io, map: *const s
default_path = default_path orelse config_file_path.home; default_path = default_path orelse config_file_path.home;
// Get active profile // Get active profile
const profile = try allocator.dupe(u8, getEnvironmentVariable(map, "AWS_PROFILE") orelse const profile = (try getEnvironmentVariable(allocator, "AWS_PROFILE")) orelse
options.profile_name orelse "default"); try allocator.dupe(u8, options.profile_name orelse "default");
defer allocator.free(profile); defer allocator.free(profile);
log.debug("Looking for file credentials using profile '{s}'", .{profile}); log.debug("Looking for file credentials using profile '{s}'", .{profile});
log.debug("Checking credentials file: {s}", .{creds_file_path.evaluated_path}); log.debug("Checking credentials file: {s}", .{creds_file_path.evaluated_path});
const credentials_file = std.Io.Dir.openFileAbsolute(io, creds_file_path.evaluated_path, .{}) catch null; const credentials_file = std.fs.openFileAbsolute(creds_file_path.evaluated_path, .{}) catch null;
defer if (credentials_file) |f| f.close(io); defer if (credentials_file) |f| f.close();
// It's much more likely that we'll find credentials in the credentials file // It's much more likely that we'll find credentials in the credentials file
// so we'll try that first // so we'll try that first
const creds_file_creds = try credsForFile(allocator, io, credentials_file, profile); const creds_file_creds = try credsForFile(allocator, credentials_file, profile);
var conf_file_creds = PartialCredentials{}; var conf_file_creds = PartialCredentials{};
if (creds_file_creds.access_key == null or creds_file_creds.secret_key == null) { if (creds_file_creds.access_key == null or creds_file_creds.secret_key == null) {
log.debug("Checking config file: {s}", .{config_file_path.evaluated_path}); log.debug("Checking config file: {s}", .{config_file_path.evaluated_path});
const config_file = std.Io.Dir.openFileAbsolute(io, creds_file_path.evaluated_path, .{}) catch null; const config_file = std.fs.openFileAbsolute(creds_file_path.evaluated_path, .{}) catch null;
defer if (config_file) |f| f.close(io); defer if (config_file) |f| f.close();
conf_file_creds = try credsForFile(allocator, io, config_file, profile); conf_file_creds = try credsForFile(allocator, config_file, profile);
} }
const access_key = keyFrom(allocator, creds_file_creds.access_key, conf_file_creds.access_key); const access_key = keyFrom(allocator, creds_file_creds.access_key, conf_file_creds.access_key);
const secret_key = keyFrom(allocator, creds_file_creds.secret_key, conf_file_creds.secret_key); const secret_key = keyFrom(allocator, creds_file_creds.secret_key, conf_file_creds.secret_key);
@ -468,10 +475,10 @@ const PartialCredentials = struct {
access_key: ?[]const u8 = null, access_key: ?[]const u8 = null,
secret_key: ?[]const u8 = null, secret_key: ?[]const u8 = null,
}; };
fn credsForFile(allocator: std.mem.Allocator, io: std.Io, file: ?std.Io.File, profile: []const u8) !PartialCredentials { fn credsForFile(allocator: std.mem.Allocator, file: ?std.fs.File, profile: []const u8) !PartialCredentials {
if (file == null) return PartialCredentials{}; if (file == null) return PartialCredentials{};
var fbuf: [1024]u8 = undefined; var fbuf: [1024]u8 = undefined;
var freader = file.?.reader(io, &fbuf); var freader = file.?.reader(&fbuf);
var reader = &freader.interface; var reader = &freader.interface;
const text = try reader.allocRemaining(allocator, .unlimited); const text = try reader.allocRemaining(allocator, .unlimited);
defer allocator.free(text); defer allocator.free(text);
@ -603,7 +610,6 @@ fn trimmed(text: []const u8, start: ?usize, end: ?usize) []const u8 {
fn filePath( fn filePath(
allocator: std.mem.Allocator, allocator: std.mem.Allocator,
map: *const std.process.Environ.Map,
specified_path: ?[]const u8, specified_path: ?[]const u8,
env_var_name: []const u8, env_var_name: []const u8,
config_dir: ?[]const u8, config_dir: ?[]const u8,
@ -611,28 +617,39 @@ fn filePath(
) !EvaluatedPath { ) !EvaluatedPath {
if (specified_path) |p| return EvaluatedPath{ .evaluated_path = try allocator.dupe(u8, p) }; if (specified_path) |p| return EvaluatedPath{ .evaluated_path = try allocator.dupe(u8, p) };
// Not specified. Check environment variable, otherwise, hard coded default // Not specified. Check environment variable, otherwise, hard coded default
if (getEnvironmentVariable(map, env_var_name)) |v| return EvaluatedPath{ .evaluated_path = try allocator.dupe(u8, v) }; if (try getEnvironmentVariable(allocator, env_var_name)) |v| return EvaluatedPath{ .evaluated_path = v };
// Not in environment variable either. Go fish // Not in environment variable either. Go fish
return try getDefaultPath(allocator, map, config_dir, ".aws", config_file_name); return try getDefaultPath(allocator, config_dir, ".aws", config_file_name);
} }
const EvaluatedPath = struct { const EvaluatedPath = struct {
home: ?[]const u8 = null, home: ?[]const u8 = null,
evaluated_path: []const u8, evaluated_path: []const u8,
}; };
fn getDefaultPath(allocator: std.mem.Allocator, map: *const std.process.Environ.Map, home_dir: ?[]const u8, dir: []const u8, file: []const u8) !EvaluatedPath { fn getDefaultPath(allocator: std.mem.Allocator, home_dir: ?[]const u8, dir: []const u8, file: []const u8) !EvaluatedPath {
const home = home_dir orelse try getHomeDir(allocator, map); const home = home_dir orelse try getHomeDir(allocator);
log.debug("Home directory: {s}", .{home}); log.debug("Home directory: {s}", .{home});
const rc = try std.fs.path.join(allocator, &[_][]const u8{ home, dir, file }); const rc = try std.fs.path.join(allocator, &[_][]const u8{ home, dir, file });
log.debug("Path evaluated as: {s}", .{rc}); log.debug("Path evaluated as: {s}", .{rc});
return EvaluatedPath{ .home = home, .evaluated_path = rc }; return EvaluatedPath{ .home = home, .evaluated_path = rc };
} }
fn getHomeDir(allocator: std.mem.Allocator, map: *const std.process.Environ.Map) ![]const u8 { fn getHomeDir(allocator: std.mem.Allocator) ![]const u8 {
const env_key = switch (builtin.os.tag) { switch (builtin.os.tag) {
.windows => "USERPROFILE", .windows => {
.macos, .linux, .freebsd, .netbsd, .dragonfly, .openbsd, .illumos => "HOME", return std.process.getEnvVarOwned(allocator, "USERPROFILE") catch |err| switch (err) {
error.OutOfMemory => |e| return e,
else => return error.HomeDirUnavailable,
};
},
.macos, .linux, .freebsd, .netbsd, .dragonfly, .openbsd, .solaris => {
const home_dir = std.posix.getenv("HOME") orelse {
// TODO look in /etc/passwd
return error.HomeDirUnavailable;
};
return allocator.dupe(u8, home_dir);
},
// Code from https://github.com/ziglang/zig/blob/9f9f215305389c08a21730859982b68bf2681932/lib/std/fs/get_app_data_dir.zig // Code from https://github.com/ziglang/zig/blob/9f9f215305389c08a21730859982b68bf2681932/lib/std/fs/get_app_data_dir.zig
// be_user_settings magic number is probably different for home directory // be_user_settings magic number is probably different for home directory
// .haiku => { // .haiku => {
@ -648,26 +665,17 @@ fn getHomeDir(allocator: std.mem.Allocator, map: *const std.process.Environ.Map)
// } // }
// }, // },
else => @compileError("Unsupported OS"), else => @compileError("Unsupported OS"),
}; }
return try allocator.dupe(u8, getEnvironmentVariable(map, env_key) orelse return error.HomeDirUnavailable);
} }
test "filePath" { test "filePath" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
var map = std.process.Environ.Map.init(allocator);
defer map.deinit();
try map.put("USERPROFILE", "c:\\users\\myuser");
try map.put("HOME", "/home/user");
// std.testing.log_level = .debug; // std.testing.log_level = .debug;
// log.debug("\n", .{}); // log.debug("\n", .{});
const path = try filePath(allocator, &map, null, "NOTHING", null, "hello"); const path = try filePath(allocator, null, "NOTHING", null, "hello");
defer allocator.free(path.evaluated_path); defer allocator.free(path.evaluated_path);
defer allocator.free(path.home.?); defer allocator.free(path.home.?);
// try std.testing.expect(path.evaluated_path.len > 10); try std.testing.expect(path.evaluated_path.len > 10);
if (builtin.os.tag == .windows)
try std.testing.expectEqualStrings("c:\\users\\myuser\\.aws\\hello", path.evaluated_path)
else
try std.testing.expectEqualStrings("/home/user/.aws/hello", path.evaluated_path);
try std.testing.expectEqualStrings("hello", path.evaluated_path[path.evaluated_path.len - 5 ..]); try std.testing.expectEqualStrings("hello", path.evaluated_path[path.evaluated_path.len - 5 ..]);
try std.testing.expect(path.home != null); try std.testing.expect(path.home != null);
} }

View file

@ -103,7 +103,6 @@ pub const Mock = struct {
context: usize = 0, context: usize = 0,
request_fn: *const fn ( request_fn: *const fn (
usize, usize,
std.Io,
std.http.Method, std.http.Method,
std.Uri, std.Uri,
std.http.Client.RequestOptions, std.http.Client.RequestOptions,
@ -112,8 +111,8 @@ pub const Mock = struct {
receive_head: *const fn (usize) std.http.Client.Request.ReceiveHeadError!std.http.Client.Response, receive_head: *const fn (usize) std.http.Client.Request.ReceiveHeadError!std.http.Client.Response,
reader_decompressing: *const fn (usize) *std.Io.Reader, reader_decompressing: *const fn (usize) *std.Io.Reader,
fn request(m: Mock, io: std.Io, method: std.http.Method, uri: std.Uri, options: std.http.Client.RequestOptions) std.http.Client.RequestError!std.http.Client.Request { fn request(m: Mock, method: std.http.Method, uri: std.Uri, options: std.http.Client.RequestOptions) std.http.Client.RequestError!std.http.Client.Request {
return m.request_fn(m.context, io, method, uri, options); return m.request_fn(m.context, method, uri, options);
} }
fn sendBodyComplete(m: Mock, body: []u8) std.Io.Writer.Error!void { fn sendBodyComplete(m: Mock, body: []u8) std.Io.Writer.Error!void {
return m.send_body_complete(m.context, body); return m.send_body_complete(m.context, body);
@ -147,17 +146,13 @@ const EndPoint = struct {
pub const AwsHttp = struct { pub const AwsHttp = struct {
allocator: std.mem.Allocator, allocator: std.mem.Allocator,
proxy: ?std.http.Client.Proxy, proxy: ?std.http.Client.Proxy,
io: std.Io,
map: *const std.process.Environ.Map,
const Self = @This(); const Self = @This();
pub fn init(allocator: std.mem.Allocator, io: std.Io, map: *const std.process.Environ.Map, proxy: ?std.http.Client.Proxy) Self { pub fn init(allocator: std.mem.Allocator, proxy: ?std.http.Client.Proxy) Self {
return Self{ return Self{
.allocator = allocator, .allocator = allocator,
.proxy = proxy, .proxy = proxy,
.io = io,
.map = map,
// .credentialsProvider = // creds provider could be useful // .credentialsProvider = // creds provider could be useful
}; };
} }
@ -189,11 +184,11 @@ pub const AwsHttp = struct {
// S3 control uses <account-id>.s3-control.<region>.amazonaws.com // S3 control uses <account-id>.s3-control.<region>.amazonaws.com
// //
// So this regionSubDomain call needs to handle generic customization // So this regionSubDomain call needs to handle generic customization
const endpoint = try endpointForRequest(self.allocator, service, request, self.map, options); const endpoint = try endpointForRequest(self.allocator, service, request, options);
defer endpoint.deinit(); defer endpoint.deinit();
log.debug("Calling endpoint {s}", .{endpoint.uri}); log.debug("Calling endpoint {s}", .{endpoint.uri});
// TODO: Should we allow customization here? // TODO: Should we allow customization here?
const creds = try credentials.getCredentials(self.allocator, self.map, self.io, options.credential_options); const creds = try credentials.getCredentials(self.allocator, options.credential_options);
defer creds.deinit(); defer creds.deinit();
const signing_config: signing.Config = .{ const signing_config: signing.Config = .{
.region = getRegion(service, options.region), .region = getRegion(service, options.region),
@ -241,21 +236,21 @@ pub const AwsHttp = struct {
// We will use endpoint instead // We will use endpoint instead
request_cp.path = endpoint.path; request_cp.path = endpoint.path;
var request_headers: std.ArrayList(std.http.Header) = .empty; var request_headers = std.ArrayList(std.http.Header){};
defer request_headers.deinit(self.allocator); defer request_headers.deinit(self.allocator);
const len = try addHeaders(self.allocator, &request_headers, endpoint.host, request_cp.body, request_cp.content_type, request_cp.headers); const len = try addHeaders(self.allocator, &request_headers, endpoint.host, request_cp.body, request_cp.content_type, request_cp.headers);
defer if (len) |l| self.allocator.free(l); defer if (len) |l| self.allocator.free(l);
request_cp.headers = request_headers.items; request_cp.headers = request_headers.items;
if (signing_config) |opts| request_cp = try signing.signRequest(self.allocator, self.io, request_cp, opts); if (signing_config) |opts| request_cp = try signing.signRequest(self.allocator, request_cp, opts);
defer { defer {
if (signing_config) |opts| { if (signing_config) |opts| {
signing.freeSignedRequest(self.allocator, &request_cp, opts); signing.freeSignedRequest(self.allocator, &request_cp, opts);
} }
} }
var headers: std.ArrayList(std.http.Header) = .empty; var headers = std.ArrayList(std.http.Header){};
defer headers.deinit(self.allocator); defer headers.deinit(self.allocator);
for (request_cp.headers) |header| for (request_cp.headers) |header|
try headers.append(self.allocator, .{ .name = header.name, .value = header.value }); try headers.append(self.allocator, .{ .name = header.name, .value = header.value });
@ -268,7 +263,7 @@ pub const AwsHttp = struct {
defer self.allocator.free(url); defer self.allocator.free(url);
log.debug("Request url: {s}", .{url}); log.debug("Request url: {s}", .{url});
// TODO: Fix this proxy stuff. This is all a kludge just to compile, but std.http.Client has it all built in now // TODO: Fix this proxy stuff. This is all a kludge just to compile, but std.http.Client has it all built in now
var cl = std.http.Client{ .allocator = self.allocator, .io = self.io, .https_proxy = if (self.proxy) |*p| @constCast(p) else null }; var cl = std.http.Client{ .allocator = self.allocator, .https_proxy = if (self.proxy) |*p| @constCast(p) else null };
defer cl.deinit(); // TODO: Connection pooling defer cl.deinit(); // TODO: Connection pooling
const method = std.meta.stringToEnum(std.http.Method, request_cp.method).?; const method = std.meta.stringToEnum(std.http.Method, request_cp.method).?;
@ -290,7 +285,7 @@ pub const AwsHttp = struct {
}; };
var req = if (options.mock) |m| var req = if (options.mock) |m|
try m.request(self.io, method, uri, req_options) // This will call the test harness try m.request(method, uri, req_options) // This will call the test harness
else else
try cl.request(method, uri, req_options); try cl.request(method, uri, req_options);
defer req.deinit(); defer req.deinit();
@ -331,7 +326,7 @@ pub const AwsHttp = struct {
.{ @intFromEnum(response.head.status), response.head.status.phrase() }, .{ @intFromEnum(response.head.status), response.head.status.phrase() },
); );
log.debug("Response headers:", .{}); log.debug("Response headers:", .{});
var resp_headers: std.ArrayList(Header) = .empty; var resp_headers = std.ArrayList(Header){};
defer resp_headers.deinit(self.allocator); defer resp_headers.deinit(self.allocator);
var it = response.head.iterateHeaders(); var it = response.head.iterateHeaders();
while (it.next()) |h| { // even though we don't expect to fill the buffer, while (it.next()) |h| { // even though we don't expect to fill the buffer,
@ -372,7 +367,6 @@ pub const AwsHttp = struct {
.body = try aw.toOwnedSlice(), .body = try aw.toOwnedSlice(),
.headers = try resp_headers.toOwnedSlice(self.allocator), .headers = try resp_headers.toOwnedSlice(self.allocator),
.allocator = self.allocator, .allocator = self.allocator,
.io = self.io,
}; };
return rc; return rc;
} }
@ -416,22 +410,25 @@ fn addHeaders(
return null; return null;
} }
fn getEnvironmentVariable(map: *const std.process.Environ.Map, key: []const u8) ?[]const u8 { fn getEnvironmentVariable(allocator: std.mem.Allocator, key: []const u8) !?[]const u8 {
if (!map.contains(key)) return null; return std.process.getEnvVarOwned(allocator, key) catch |e| switch (e) {
return map.get(key); std.process.GetEnvVarOwnedError.EnvironmentVariableNotFound => return null,
else => return e,
};
} }
/// override endpoint url. Intended for use in testing. Normally, you should /// override endpoint url. Intended for use in testing. Normally, you should
/// rely on AWS_ENDPOINT_URL environment variable for this /// rely on AWS_ENDPOINT_URL environment variable for this
pub var endpoint_override: ?[]const u8 = null; pub var endpoint_override: ?[]const u8 = null;
fn endpointForRequest(allocator: std.mem.Allocator, service: []const u8, request: HttpRequest, map: *const std.process.Environ.Map, options: Options) !EndPoint { fn endpointForRequest(allocator: std.mem.Allocator, service: []const u8, request: HttpRequest, options: Options) !EndPoint {
if (endpoint_override) |override| { if (endpoint_override) |override| {
const uri = try allocator.dupe(u8, override); const uri = try allocator.dupe(u8, override);
return endPointFromUri(allocator, uri, request.path); return endPointFromUri(allocator, uri, request.path);
} }
const environment_override = getEnvironmentVariable(map, "AWS_ENDPOINT_URL"); const environment_override = try getEnvironmentVariable(allocator, "AWS_ENDPOINT_URL");
if (environment_override) |override| { if (environment_override) |override| {
defer allocator.free(override);
const uri = try allocator.dupe(u8, override); const uri = try allocator.dupe(u8, override);
return endPointFromUri(allocator, uri, request.path); return endPointFromUri(allocator, uri, request.path);
} }
@ -578,8 +575,7 @@ test "endpointForRequest standard operation" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const service = "dynamodb"; const service = "dynamodb";
const map = std.process.Environ.Map.init(allocator); const endpoint = try endpointForRequest(allocator, service, request, options);
const endpoint = try endpointForRequest(allocator, service, request, &map, options);
defer endpoint.deinit(); defer endpoint.deinit();
try std.testing.expectEqualStrings("https://dynamodb.us-west-2.amazonaws.com", endpoint.uri); try std.testing.expectEqualStrings("https://dynamodb.us-west-2.amazonaws.com", endpoint.uri);
} }
@ -594,8 +590,7 @@ test "endpointForRequest for cloudfront" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const service = "cloudfront"; const service = "cloudfront";
const map = std.process.Environ.Map.init(allocator); const endpoint = try endpointForRequest(allocator, service, request, options);
const endpoint = try endpointForRequest(allocator, service, request, &map, options);
defer endpoint.deinit(); defer endpoint.deinit();
try std.testing.expectEqualStrings("https://cloudfront.amazonaws.com", endpoint.uri); try std.testing.expectEqualStrings("https://cloudfront.amazonaws.com", endpoint.uri);
} }
@ -610,8 +605,7 @@ test "endpointForRequest for s3" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const service = "s3"; const service = "s3";
const map = std.process.Environ.Map.init(allocator); const endpoint = try endpointForRequest(allocator, service, request, options);
const endpoint = try endpointForRequest(allocator, service, request, &map, options);
defer endpoint.deinit(); defer endpoint.deinit();
try std.testing.expectEqualStrings("https://s3.us-east-2.amazonaws.com", endpoint.uri); try std.testing.expectEqualStrings("https://s3.us-east-2.amazonaws.com", endpoint.uri);
} }
@ -627,8 +621,7 @@ test "endpointForRequest for s3 - specific bucket" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const service = "s3"; const service = "s3";
const map = std.process.Environ.Map.init(allocator); const endpoint = try endpointForRequest(allocator, service, request, options);
const endpoint = try endpointForRequest(allocator, service, request, &map, options);
defer endpoint.deinit(); defer endpoint.deinit();
try std.testing.expectEqualStrings("https://bucket.s3.us-east-2.amazonaws.com", endpoint.uri); try std.testing.expectEqualStrings("https://bucket.s3.us-east-2.amazonaws.com", endpoint.uri);
try std.testing.expectEqualStrings("/key", endpoint.path); try std.testing.expectEqualStrings("/key", endpoint.path);

View file

@ -13,8 +13,6 @@ pub const Result = struct {
body: []const u8, body: []const u8,
headers: []const std.http.Header, headers: []const std.http.Header,
allocator: std.mem.Allocator, allocator: std.mem.Allocator,
/// The io that was used for the request
io: std.Io,
pub fn deinit(self: Result) void { pub fn deinit(self: Result) void {
self.allocator.free(self.body); self.allocator.free(self.body);

View file

@ -157,7 +157,7 @@ pub const SigningError = error{
XAmzExpiresHeaderInRequest, XAmzExpiresHeaderInRequest,
/// Used if the request headers already includes x-amz-region-set /// Used if the request headers already includes x-amz-region-set
XAmzRegionSetHeaderInRequest, XAmzRegionSetHeaderInRequest,
} || error{OutOfMemory}; // || std.Io.Clock.Error; } || error{OutOfMemory};
const forbidden_headers = .{ const forbidden_headers = .{
.{ .name = "x-amz-content-sha256", .err = SigningError.XAmzContentSha256HeaderInRequest }, .{ .name = "x-amz-content-sha256", .err = SigningError.XAmzContentSha256HeaderInRequest },
@ -185,7 +185,7 @@ const skipped_headers = .{
/// Signs a request. Only header signing is currently supported. Note that /// Signs a request. Only header signing is currently supported. Note that
/// This adds two headers to the request, which will need to be freed by the /// This adds two headers to the request, which will need to be freed by the
/// caller. Use freeSignedRequest with the same parameters to free /// caller. Use freeSignedRequest with the same parameters to free
pub fn signRequest(allocator: std.mem.Allocator, io: std.Io, request: base.Request, config: Config) SigningError!base.Request { pub fn signRequest(allocator: std.mem.Allocator, request: base.Request, config: Config) SigningError!base.Request {
try validateConfig(config); try validateConfig(config);
for (request.headers) |h| { for (request.headers) |h| {
inline for (forbidden_headers) |f| { inline for (forbidden_headers) |f| {
@ -195,10 +195,7 @@ pub fn signRequest(allocator: std.mem.Allocator, io: std.Io, request: base.Reque
} }
var rc = request; var rc = request;
const signing_time = config.signing_time orelse blk: { const signing_time = config.signing_time orelse std.time.timestamp();
const now = std.Io.Clock.Timestamp.now(io, .real);
break :blk @as(i64, @intCast(@divFloor(now.raw.nanoseconds, std.time.ns_per_s)));
};
const signed_date = date.timestampToDateTime(signing_time); const signed_date = date.timestampToDateTime(signing_time);
@ -336,7 +333,9 @@ pub fn signRequest(allocator: std.mem.Allocator, io: std.Io, request: base.Reque
pub fn freeSignedRequest(allocator: std.mem.Allocator, request: *base.Request, config: Config) void { pub fn freeSignedRequest(allocator: std.mem.Allocator, request: *base.Request, config: Config) void {
validateConfig(config) catch |e| { validateConfig(config) catch |e| {
log.err("Signing validation failed during signature free: {}", .{e}); log.err("Signing validation failed during signature free: {}", .{e});
std.debug.dumpCurrentStackTrace(.{}); if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
return; return;
}; };
@ -353,10 +352,10 @@ pub fn freeSignedRequest(allocator: std.mem.Allocator, request: *base.Request, c
pub const credentialsFn = *const fn ([]const u8) ?Credentials; pub const credentialsFn = *const fn ([]const u8) ?Credentials;
pub fn verifyServerRequest(allocator: std.mem.Allocator, io: std.Io, request: *std.http.Server.Request, request_body_reader: *std.Io.Reader, credentials_fn: credentialsFn) !bool { pub fn verifyServerRequest(allocator: std.mem.Allocator, request: *std.http.Server.Request, request_body_reader: *std.Io.Reader, credentials_fn: credentialsFn) !bool {
var unverified_request = try UnverifiedRequest.init(allocator, request); var unverified_request = try UnverifiedRequest.init(allocator, request);
defer unverified_request.deinit(); defer unverified_request.deinit();
return verify(allocator, io, unverified_request, request_body_reader, credentials_fn); return verify(allocator, unverified_request, request_body_reader, credentials_fn);
} }
pub const UnverifiedRequest = struct { pub const UnverifiedRequest = struct {
@ -367,7 +366,7 @@ pub const UnverifiedRequest = struct {
raw: *std.http.Server.Request, raw: *std.http.Server.Request,
pub fn init(allocator: std.mem.Allocator, request: *std.http.Server.Request) !UnverifiedRequest { pub fn init(allocator: std.mem.Allocator, request: *std.http.Server.Request) !UnverifiedRequest {
var al = std.ArrayList(std.http.Header).empty; var al = std.ArrayList(std.http.Header){};
defer al.deinit(allocator); defer al.deinit(allocator);
var it = request.iterateHeaders(); var it = request.iterateHeaders();
while (it.next()) |h| try al.append(allocator, h); while (it.next()) |h| try al.append(allocator, h);
@ -394,7 +393,7 @@ pub const UnverifiedRequest = struct {
} }
}; };
pub fn verify(allocator: std.mem.Allocator, io: std.Io, request: UnverifiedRequest, request_body_reader: *std.Io.Reader, credentials_fn: credentialsFn) !bool { pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_body_reader: *std.Io.Reader, credentials_fn: credentialsFn) !bool {
var arena = std.heap.ArenaAllocator.init(allocator); var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit(); defer arena.deinit();
const aa = arena.allocator(); const aa = arena.allocator();
@ -426,7 +425,6 @@ pub fn verify(allocator: std.mem.Allocator, io: std.Io, request: UnverifiedReque
if (signature == null) return error.AuthorizationHeaderMissingSignature; if (signature == null) return error.AuthorizationHeaderMissingSignature;
return verifyParsedAuthorization( return verifyParsedAuthorization(
aa, aa,
io,
request, request,
credential.?, credential.?,
signed_headers.?, signed_headers.?,
@ -438,7 +436,6 @@ pub fn verify(allocator: std.mem.Allocator, io: std.Io, request: UnverifiedReque
fn verifyParsedAuthorization( fn verifyParsedAuthorization(
allocator: std.mem.Allocator, allocator: std.mem.Allocator,
io: std.Io,
request: UnverifiedRequest, request: UnverifiedRequest,
credential: []const u8, credential: []const u8,
signed_headers: []const u8, signed_headers: []const u8,
@ -505,7 +502,7 @@ fn verifyParsedAuthorization(
signed_request.query = request.target[signed_request.path.len..]; // TODO: should this be +1? query here would include '?' signed_request.query = request.target[signed_request.path.len..]; // TODO: should this be +1? query here would include '?'
signed_request.body = try request_body_reader.allocRemaining(allocator, .unlimited); signed_request.body = try request_body_reader.allocRemaining(allocator, .unlimited);
defer allocator.free(signed_request.body); defer allocator.free(signed_request.body);
signed_request = try signRequest(allocator, io, signed_request, config); signed_request = try signRequest(allocator, signed_request, config);
defer freeSignedRequest(allocator, &signed_request, config); defer freeSignedRequest(allocator, &signed_request, config);
return verifySignedRequest(signed_request, signature); return verifySignedRequest(signed_request, signature);
} }
@ -809,7 +806,7 @@ fn canonicalQueryString(allocator: std.mem.Allocator, path: []const u8) ![]const
// Split this by component // Split this by component
var portions = std.mem.splitScalar(u8, query, '&'); var portions = std.mem.splitScalar(u8, query, '&');
var sort_me = std.ArrayList([]const u8).empty; var sort_me = std.ArrayList([]const u8){};
defer sort_me.deinit(allocator); defer sort_me.deinit(allocator);
while (portions.next()) |item| while (portions.next()) |item|
try sort_me.append(allocator, item); try sort_me.append(allocator, item);
@ -1103,7 +1100,6 @@ test "can sign" {
// [debug] (awshttp): Content-Length: 43 // [debug] (awshttp): Content-Length: 43
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const io = std.testing.io;
var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5); var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5);
defer headers.deinit(allocator); defer headers.deinit(allocator);
try headers.append(allocator, .{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" }); try headers.append(allocator, .{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" });
@ -1135,7 +1131,7 @@ test "can sign" {
.signing_time = 1440938160, // 20150830T123600Z .signing_time = 1440938160, // 20150830T123600Z
}; };
// TODO: There is an x-amz-content-sha256. Investigate // TODO: There is an x-amz-content-sha256. Investigate
var signed_req = try signRequest(allocator, io, req, config); var signed_req = try signRequest(allocator, req, config);
defer freeSignedRequest(allocator, &signed_req, config); defer freeSignedRequest(allocator, &signed_req, config);
try std.testing.expectEqualStrings("X-Amz-Date", signed_req.headers[signed_req.headers.len - 3].name); try std.testing.expectEqualStrings("X-Amz-Date", signed_req.headers[signed_req.headers.len - 3].name);
@ -1155,7 +1151,6 @@ test "can sign" {
var test_credential: ?Credentials = null; var test_credential: ?Credentials = null;
test "can verify server request" { test "can verify server request" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const io = std.testing.io;
const access_key = try allocator.dupe(u8, "ACCESS"); const access_key = try allocator.dupe(u8, "ACCESS");
const secret_key = try allocator.dupe(u8, "SECRET"); const secret_key = try allocator.dupe(u8, "SECRET");
@ -1196,7 +1191,7 @@ test "can verify server request" {
// const old_level = std.testing.log_level; // const old_level = std.testing.log_level;
// std.testing.log_level = .debug; // std.testing.log_level = .debug;
// defer std.testing.log_level = old_level; // defer std.testing.log_level = old_level;
try std.testing.expect(try verifyServerRequest(allocator, io, &request, &body_reader, struct { try std.testing.expect(try verifyServerRequest(allocator, &request, &body_reader, struct {
cred: Credentials, cred: Credentials,
const Self = @This(); const Self = @This();
@ -1208,7 +1203,6 @@ test "can verify server request" {
} }
test "can verify server request without x-amz-content-sha256" { test "can verify server request without x-amz-content-sha256" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const io = std.testing.io;
const access_key = try allocator.dupe(u8, "ACCESS"); const access_key = try allocator.dupe(u8, "ACCESS");
const secret_key = try allocator.dupe(u8, "SECRET"); const secret_key = try allocator.dupe(u8, "SECRET");
@ -1299,7 +1293,7 @@ test "can verify server request without x-amz-content-sha256" {
} }
{ // verification { // verification
try std.testing.expect(try verifyServerRequest(allocator, io, &request, &body_reader, struct { try std.testing.expect(try verifyServerRequest(allocator, &request, &body_reader, struct {
cred: Credentials, cred: Credentials,
const Self = @This(); const Self = @This();

View file

@ -253,8 +253,6 @@ const TestOptions = struct {
}; };
const TestSetup = struct { const TestSetup = struct {
allocator: std.mem.Allocator, allocator: std.mem.Allocator,
io: std.Io,
map: *const std.process.Environ.Map,
options: TestOptions, options: TestOptions,
creds: aws_auth.Credentials, creds: aws_auth.Credentials,
client: aws.Client, client: aws.Client,
@ -265,6 +263,7 @@ const TestSetup = struct {
pub const RequestActuals = struct { pub const RequestActuals = struct {
request: *std.http.Client.Request, request: *std.http.Client.Request,
trace: []const u8,
// Looks like uri might be getting trounced before deinit // Looks like uri might be getting trounced before deinit
request_uri: []const u8, request_uri: []const u8,
@ -303,9 +302,9 @@ const TestSetup = struct {
} }
allocator.free(self.extra_headers); allocator.free(self.extra_headers);
allocator.free(self.trace);
allocator.free(self.request_uri); allocator.free(self.request_uri);
allocator.destroy(self.request.reader.in); allocator.destroy(self.request.reader.in);
allocator.destroy(self.request.client);
allocator.destroy(self.request); allocator.destroy(self.request);
} }
}; };
@ -323,39 +322,32 @@ const TestSetup = struct {
fn request( fn request(
self_ptr: usize, self_ptr: usize,
io: std.Io,
method: std.http.Method, method: std.http.Method,
uri: std.Uri, uri: std.Uri,
options: std.http.Client.RequestOptions, options: std.http.Client.RequestOptions,
) std.http.Client.RequestError!std.http.Client.Request { ) std.http.Client.RequestError!std.http.Client.Request {
_ = io;
const self: *Self = @ptrFromInt(self_ptr); const self: *Self = @ptrFromInt(self_ptr);
if (self.request_actuals) |_| { if (self.request_actuals) |r| {
std.debug.print("request has been called twice:\n", .{}); std.debug.print("request has been called twice. Previous stack trace:\n", .{});
var stderr = std.fs.File.stderr().writer(&.{});
stderr.interface.writeAll(r.trace) catch @panic("could not write to stderr");
std.debug.print("Current stack trace:\n", .{}); std.debug.print("Current stack trace:\n", .{});
std.debug.dumpCurrentStackTrace(.{}); std.debug.dumpCurrentStackTrace(null);
return error.ConnectionRefused; // we should not be called twice return error.ConnectionRefused; // we should not be called twice
} }
const acts = try self.allocator.create(RequestActuals); const acts = try self.allocator.create(RequestActuals);
errdefer self.allocator.destroy(acts); errdefer self.allocator.destroy(acts);
var aw = std.Io.Writer.Allocating.init(self.allocator);
defer aw.deinit();
std.debug.dumpCurrentStackTraceToWriter(null, &aw.writer) catch return error.OutOfMemory;
const req = try self.allocator.create(std.http.Client.Request); const req = try self.allocator.create(std.http.Client.Request);
errdefer self.allocator.destroy(req); errdefer self.allocator.destroy(req);
const reader = try self.allocator.create(std.Io.Reader); const reader = try self.allocator.create(std.Io.Reader);
errdefer self.allocator.destroy(reader); errdefer self.allocator.destroy(reader);
reader.* = .fixed(self.options.server_response); reader.* = .fixed(self.options.server_response);
// Create a minimal mock client that only provides io for deinit
// By creating it with the allocator, we leave critical fields like
// connection_pool as undefined, which will fail spectacularly if
// a real request were to be attempted
const mock_client = try self.allocator.create(std.http.Client);
errdefer self.allocator.destroy(mock_client);
mock_client.* = .{
.allocator = self.allocator,
.io = self.io,
};
req.* = .{ req.* = .{
.uri = uri, .uri = uri,
.client = mock_client, .client = undefined,
.connection = options.connection, .connection = options.connection,
.reader = .{ .reader = .{
.in = reader, .in = reader,
@ -381,6 +373,7 @@ const TestSetup = struct {
}); });
acts.* = .{ acts.* = .{
.trace = try self.allocator.dupe(u8, aw.written()),
.request = req, .request = req,
.request_uri = try std.fmt.allocPrint(self.allocator, "{f}", .{uri}), .request_uri = try std.fmt.allocPrint(self.allocator, "{f}", .{uri}),
.extra_headers = try al.toOwnedSlice(self.allocator), .extra_headers = try al.toOwnedSlice(self.allocator),
@ -439,10 +432,7 @@ const TestSetup = struct {
return self.request_actuals.?.request.reader.in; return self.request_actuals.?.request.reader.in;
} }
fn init(options: TestOptions) !*Self { fn init(options: TestOptions) !*Self {
const io = std.testing.io; const client = aws.Client.init(options.allocator, .{});
const map = try options.allocator.create(std.process.Environ.Map);
map.* = std.process.Environ.Map.init(options.allocator);
const client = aws.Client.init(options.allocator, .{ .io = io, .map = map });
const call_options = try options.allocator.create(aws.Options); const call_options = try options.allocator.create(aws.Options);
const self = try options.allocator.create(Self); const self = try options.allocator.create(Self);
call_options.* = .{ call_options.* = .{
@ -462,8 +452,6 @@ const TestSetup = struct {
self.* = .{ self.* = .{
.options = options, .options = options,
.allocator = options.allocator, .allocator = options.allocator,
.io = io,
.map = map,
.creds = aws_auth.Credentials.init( .creds = aws_auth.Credentials.init(
options.allocator, options.allocator,
try options.allocator.dupe(u8, "ACCESS"), try options.allocator.dupe(u8, "ACCESS"),
@ -477,7 +465,6 @@ const TestSetup = struct {
return self; return self;
} }
fn deinit(self: *Self) void { fn deinit(self: *Self) void {
self.options.allocator.destroy(self.map);
if (self.response_actuals) |r| { if (self.response_actuals) |r| {
self.allocator.free(r.body); self.allocator.free(r.body);
self.allocator.destroy(r); self.allocator.destroy(r);
@ -1177,9 +1164,13 @@ test "json_1_1: ECR timestamps" {
try std.testing.expectEqualStrings("https://146325435496.dkr.ecr.us-west-2.amazonaws.com", call.response.authorization_data.?[0].proxy_endpoint.?); try std.testing.expectEqualStrings("https://146325435496.dkr.ecr.us-west-2.amazonaws.com", call.response.authorization_data.?[0].proxy_endpoint.?);
// try std.testing.expectEqual(@as(i64, 1.73859841557E9), call.response.authorization_data.?[0].expires_at.?); // try std.testing.expectEqual(@as(i64, 1.73859841557E9), call.response.authorization_data.?[0].expires_at.?);
const expected_ts = try date.Timestamp.parse("2022-05-17T06:56:13.652000+00:00"); const zeit = @import("zeit");
const actual = call.response.authorization_data.?[0].expires_at.?; const expected_ins = try zeit.instant(.{
try std.testing.expectEqual(expected_ts, actual); .source = .{ .iso8601 = "2022-05-17T06:56:13.652000+00:00" },
});
const expected_ts: date.Timestamp = @enumFromInt(expected_ins.timestamp);
try std.testing.expectEqual(expected_ts, call.response.authorization_data.?[0].expires_at.?);
} }
test "jsonStringify: structure + enums" { test "jsonStringify: structure + enums" {
@ -1330,8 +1321,7 @@ test "jsonStringify nullable object" {
test "works against a live server" { test "works against a live server" {
const Server = struct { const Server = struct {
allocator: std.mem.Allocator, allocator: std.mem.Allocator,
io: std.Io, ready: std.Thread.Semaphore = .{},
ready: std.Io.Semaphore = .{},
requests_received: usize = 0, requests_received: usize = 0,
thread: ?std.Thread = null, thread: ?std.Thread = null,
listening_uri: []const u8 = undefined, listening_uri: []const u8 = undefined,
@ -1353,7 +1343,7 @@ test "works against a live server" {
threadMain, threadMain,
.{self}, .{self},
); );
try self.ready.wait(self.io); // This could hang the test... try self.ready.timedWait(1000 * std.time.ns_per_ms);
awshttp.endpoint_override = self.listening_uri; awshttp.endpoint_override = self.listening_uri;
if (awshttp.endpoint_override == null) return error.TestSetupStartFailure; if (awshttp.endpoint_override == null) return error.TestSetupStartFailure;
std.log.debug("endpoint override set to {?s}", .{awshttp.endpoint_override}); std.log.debug("endpoint override set to {?s}", .{awshttp.endpoint_override});
@ -1362,7 +1352,7 @@ test "works against a live server" {
pub fn stop(self: *Server) !void { pub fn stop(self: *Server) !void {
if (self.thread == null) return; // thread not started, nothing to do if (self.thread == null) return; // thread not started, nothing to do
// post stop message // post stop message
var client = std.http.Client{ .allocator = self.allocator, .io = self.io }; var client = std.http.Client{ .allocator = self.allocator };
_ = try client.fetch(.{ // we ignore return because that should just shut down _ = try client.fetch(.{ // we ignore return because that should just shut down
.method = .POST, .method = .POST,
.payload = "quit", .payload = "quit",
@ -1373,24 +1363,24 @@ test "works against a live server" {
} }
fn threadMain(self: *Server) !void { fn threadMain(self: *Server) !void {
const address = try std.Io.net.IpAddress.parseLiteral("127.0.0.1:0"); const address = try std.net.Address.parseIp("127.0.0.1", 0);
var server = try address.listen(self.io, .{}); var server = try address.listen(.{});
defer server.deinit(self.io); defer server.deinit();
const server_port = server.socket.address.getPort(); const server_port = server.listen_address.in.getPort();
self.listening_uri = try std.fmt.allocPrint(self.allocator, "http://127.0.0.1:{d}", .{server_port}); self.listening_uri = try std.fmt.allocPrint(self.allocator, "http://127.0.0.1:{d}", .{server_port});
defer { defer {
self.allocator.free(self.listening_uri); self.allocator.free(self.listening_uri);
self.listening_uri = undefined; self.listening_uri = undefined;
} }
self.ready.post(self.io); self.ready.post();
while (true) { while (true) {
var connection = try server.accept(self.io); var connection = try server.accept();
defer connection.close(self.io); defer connection.stream.close();
var recv_buffer: [4000]u8 = undefined; var recv_buffer: [4000]u8 = undefined;
var send_buffer: [4000]u8 = undefined; var send_buffer: [4000]u8 = undefined;
var conn_reader = connection.reader(self.io, &recv_buffer); var conn_reader = connection.stream.reader(&recv_buffer);
var conn_writer = connection.writer(self.io, &send_buffer); var conn_writer = connection.stream.writer(&send_buffer);
var http_server = std.http.Server.init(&conn_reader.interface, &conn_writer.interface); var http_server = std.http.Server.init(conn_reader.interface(), &conn_writer.interface);
while (http_server.reader.state == .ready) { while (http_server.reader.state == .ready) {
var req = try http_server.receiveHead(); var req = try http_server.receiveHead();
if (req.head.content_length) |l| { if (req.head.content_length) |l| {
@ -1415,9 +1405,7 @@ test "works against a live server" {
} }
}; };
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const io = std.testing.io; var server = Server{ .allocator = allocator };
const map = std.process.Environ.Map.init(allocator);
var server = Server{ .allocator = allocator, .io = io };
try server.start(); try server.start();
var stopped = false; var stopped = false;
defer if (!stopped) server.stop() catch log.err("error stopping server", .{}); defer if (!stopped) server.stop() catch log.err("error stopping server", .{});
@ -1437,7 +1425,7 @@ test "works against a live server" {
// } // }
const sts = (Services(.{.sts}){}).sts; const sts = (Services(.{.sts}){}).sts;
const client = aws.Client.init(std.testing.allocator, .{ .io = io, .map = &map }); const client = aws.Client.init(std.testing.allocator, .{});
const creds = aws_auth.Credentials.init( const creds = aws_auth.Credentials.init(
allocator, allocator,
try allocator.dupe(u8, "ACCESS"), try allocator.dupe(u8, "ACCESS"),

View file

@ -32,9 +32,10 @@ pub fn log(
const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix; const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix;
// Print the message to stderr, silently ignoring any errors // Print the message to stderr, silently ignoring any errors
const locked = std.debug.lockStderr(&.{}); std.debug.lockStdErr();
defer std.debug.unlockStderr(); defer std.debug.unlockStdErr();
const stderr = &locked.file_writer.interface; var stderr_writer = std.fs.File.stderr().writer(&.{});
const stderr = &stderr_writer.interface;
nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return; nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return;
} }
@ -58,15 +59,16 @@ const Tests = enum {
rest_xml_work_with_s3, rest_xml_work_with_s3,
}; };
pub fn main(init: std.process.Init) anyerror!void { pub fn main() anyerror!void {
const allocator = init.gpa; var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const io = init.io; defer _ = gpa.deinit();
const map = init.environ_map; const allocator = gpa.allocator();
var tests = try std.ArrayList(Tests).initCapacity(allocator, @typeInfo(Tests).@"enum".fields.len); var tests = try std.ArrayList(Tests).initCapacity(allocator, @typeInfo(Tests).@"enum".fields.len);
defer tests.deinit(allocator); defer tests.deinit(allocator);
var args = try init.minimal.args.iterateAllocator(init.arena.allocator()); var args = try std.process.argsWithAllocator(allocator);
defer args.deinit();
var stdout_buf: [4096]u8 = undefined; var stdout_buf: [4096]u8 = undefined;
const stdout_raw = std.Io.File.stdout().writer(io, &stdout_buf); const stdout_raw = std.fs.File.stdout().writer(&stdout_buf);
var stdout = stdout_raw.interface; var stdout = stdout_raw.interface;
defer stdout.flush() catch @panic("could not flush stdout"); defer stdout.flush() catch @panic("could not flush stdout");
var arg0: ?[]const u8 = null; var arg0: ?[]const u8 = null;
@ -109,7 +111,7 @@ pub fn main(init: std.process.Init) anyerror!void {
} }
std.log.info("Start\n", .{}); std.log.info("Start\n", .{});
const client_options = aws.ClientOptions{ .proxy = proxy, .io = io, .map = map }; const client_options = aws.ClientOptions{ .proxy = proxy };
var client = aws.Client.init(allocator, client_options); var client = aws.Client.init(allocator, client_options);
const options = aws.Options{ const options = aws.Options{
.region = "us-west-2", .region = "us-west-2",
@ -371,8 +373,7 @@ fn proxyFromString(string: []const u8) !std.http.Client.Proxy {
rc.protocol = .tls; rc.protocol = .tls;
} else return error.InvalidScheme; } else return error.InvalidScheme;
var split_iterator = std.mem.splitScalar(u8, remaining, ':'); var split_iterator = std.mem.splitScalar(u8, remaining, ':');
const host_str = std.mem.trimEnd(u8, split_iterator.first(), "/"); rc.host = std.mem.trimRight(u8, split_iterator.first(), "/");
rc.host = try std.Io.net.HostName.init(host_str);
if (split_iterator.next()) |port| if (split_iterator.next()) |port|
rc.port = try std.fmt.parseInt(u16, port, 10); rc.port = try std.fmt.parseInt(u16, port, 10);
return rc; return rc;

View file

@ -1,27 +1,32 @@
const std = @import("std"); const std = @import("std");
const service_list = @import("service_manifest"); const service_list = @import("service_manifest");
const expectEqualStrings = std.testing.expectEqualStrings;
pub fn Services(comptime service_imports: anytype) type { pub fn Services(comptime service_imports: anytype) type {
if (service_imports.len == 0) return services; if (service_imports.len == 0) return services;
// From here, the fields of our structure can be generated at comptime... // From here, the fields of our structure can be generated at comptime...
const fields_len = serviceCount(service_imports); var fields: [serviceCount(service_imports)]std.builtin.Type.StructField = undefined;
var field_names: [fields_len][]const u8 = undefined;
var field_types: [fields_len]type = undefined;
var field_attrs: [fields_len]std.builtin.Type.StructField.Attributes = undefined;
for (0..fields_len) |i| { for (&fields, 0..) |*item, i| {
const import_field = @field(service_list, @tagName(service_imports[i])); const import_field = @field(service_list, @tagName(service_imports[i]));
field_names[i] = @tagName(service_imports[i]); item.* = .{
field_types[i] = @TypeOf(import_field); .name = @tagName(service_imports[i]),
field_attrs[i] = .{ .type = @TypeOf(import_field),
.default_value_ptr = &import_field, .default_value_ptr = &import_field,
.@"comptime" = false, .is_comptime = false,
.@"align" = std.meta.alignment(field_types[i]), .alignment = std.meta.alignment(@TypeOf(import_field)),
}; };
} }
// finally, generate the type // finally, generate the type
return @Struct(.auto, null, &field_names, &field_types, &field_attrs); return @Type(.{
.@"struct" = .{
.layout = .auto,
.fields = &fields,
.decls = &[_]std.builtin.Type.Declaration{},
.is_tuple = false,
},
});
} }
fn serviceCount(desired_services: anytype) usize { fn serviceCount(desired_services: anytype) usize {
@ -34,23 +39,17 @@ fn serviceCount(desired_services: anytype) usize {
pub const services = service_list; pub const services = service_list;
test "services includes sts" { test "services includes sts" {
try std.testing.expectEqualStrings("2011-06-15", services.sts.version.?); try expectEqualStrings("2011-06-15", services.sts.version.?);
} }
test "sts includes get_caller_identity" { test "sts includes get_caller_identity" {
try std.testing.expectEqualStrings("GetCallerIdentity", services.sts.get_caller_identity.action_name); try expectEqualStrings("GetCallerIdentity", services.sts.get_caller_identity.action_name);
} }
test "can get service and action name from request" { test "can get service and action name from request" {
// get request object. This call doesn't have parameters // get request object. This call doesn't have parameters
const metadata = services.sts.get_caller_identity.Request.metaInfo(); const metadata = services.sts.get_caller_identity.Request.metaInfo();
try std.testing.expectEqualStrings("2011-06-15", metadata.service_metadata.version.?); try expectEqualStrings("2011-06-15", metadata.service_metadata.version.?);
} }
test "can filter services" { test "can filter services" {
const filtered_services = Services(.{ .sts, .wafv2 }){}; const filtered_services = Services(.{ .sts, .wafv2 }){};
try std.testing.expectEqualStrings("2011-06-15", filtered_services.sts.version.?); try expectEqualStrings("2011-06-15", filtered_services.sts.version.?);
}
test "can reify type" {
const F = Services(.{.lambda});
const info = @typeInfo(F).@"struct";
try std.testing.expectEqual(@as(usize, 1), info.fields.len);
try std.testing.expectEqualStrings("lambda", info.fields[0].name);
} }

View file

@ -31,8 +31,8 @@ pub const Element = struct {
fn init(tag: []const u8, alloc: Allocator) Element { fn init(tag: []const u8, alloc: Allocator) Element {
return .{ return .{
.tag = tag, .tag = tag,
.attributes = .empty, .attributes = AttributeList{},
.children = .empty, .children = ContentList{},
.allocator = alloc, .allocator = alloc,
}; };
} }

View file

@ -168,7 +168,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
}, },
); );
if (@errorReturnTrace()) |trace| { if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace); std.debug.dumpStackTrace(trace.*);
} }
} }
return e; return e;
@ -192,7 +192,9 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
e, e,
}, },
); );
std.debug.dumpCurrentStackTrace(.{}); if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
} }
return e; return e;
}; };
@ -379,7 +381,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
log.debug("type = {s}, style = {s}, ptr_info.child == {s}, element = {s}", .{ @typeName(T), @tagName(array_style), @typeName(ptr_info.child), element.tag }); log.debug("type = {s}, style = {s}, ptr_info.child == {s}, element = {s}", .{ @typeName(T), @tagName(array_style), @typeName(ptr_info.child), element.tag });
var children = std.ArrayList(ptr_info.child).empty; var children = std.ArrayList(ptr_info.child){};
defer children.deinit(allocator); defer children.deinit(allocator);
switch (array_style) { switch (array_style) {

View file

@ -1,3 +1,3 @@
{ {
"ignore": ["lib/json/src/json.zig", "codegen/src/Hasher.zig"] "ignore": ["lib/json/src/json.zig"]
} }