Compare commits
9 commits
master
...
zig-develo
| Author | SHA1 | Date | |
|---|---|---|---|
| 1cff425ff6 | |||
| 9b870aa969 | |||
| b9a18d30b4 | |||
| f15887b550 | |||
| aec39b2103 | |||
| d400e50a9c | |||
| 3f5d9d9542 | |||
| ef74739b9b | |||
| 10a0e0ab99 |
23 changed files with 238 additions and 428 deletions
|
|
@ -18,7 +18,7 @@ jobs:
|
|||
- name: Check out repository code
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup Zig
|
||||
uses: https://codeberg.org/mlugg/setup-zig@v2.2.1
|
||||
uses: https://github.com/mlugg/setup-zig@v2.0.5
|
||||
# We will let setup-zig use minimum_zig_version from build.zig.zon
|
||||
# setup-zig also sets up the zig cache appropriately
|
||||
- name: Ulimit
|
||||
|
|
@ -44,8 +44,11 @@ jobs:
|
|||
# should be using git archive, but we need our generated code to be part of it
|
||||
- name: Package source code with generated models
|
||||
run: |
|
||||
zig build package
|
||||
(cd zig-out/package && tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz --format ustar *)
|
||||
sed -i 's#// UNCOMMENT AFTER MODEL GEN TO USE IN BUILD SCRIPTS //##' build.zig
|
||||
tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||
--format ustar \
|
||||
--exclude 'zig-*' \
|
||||
*
|
||||
# Something in this PR broke this transform. I don't mind removing it, but
|
||||
# the PR attempts to handle situations with or without a prefix, but it
|
||||
# doesn't. I have not yet determined what the problem is, though
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ jobs:
|
|||
with:
|
||||
ref: zig-mach
|
||||
- name: Setup Zig
|
||||
uses: https://codeberg.org/mlugg/setup-zig@v2.2.1
|
||||
uses: https://github.com/mlugg/setup-zig@v2.0.1
|
||||
with:
|
||||
version: mach-latest
|
||||
- name: Restore Zig caches
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ jobs:
|
|||
with:
|
||||
ref: zig-develop
|
||||
- name: Setup Zig
|
||||
uses: https://codeberg.org/mlugg/setup-zig@v2.2.1
|
||||
uses: https://github.com/mlugg/setup-zig@v2.0.5
|
||||
with:
|
||||
version: master
|
||||
- name: Run smoke test
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ jobs:
|
|||
with:
|
||||
ref: zig-0.14.x
|
||||
- name: Setup Zig
|
||||
uses: https://codeberg.org/mlugg/setup-zig@v2.2.1
|
||||
uses: https://github.com/mlugg/setup-zig@v2.0.1
|
||||
with:
|
||||
version: 0.14.0
|
||||
- name: Run smoke test
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
[tools]
|
||||
prek = "0.3.1"
|
||||
"ubi:DonIsaac/zlint" = "0.7.9"
|
||||
zig = "0.15.2"
|
||||
zls = "0.15.1"
|
||||
pre-commit = "latest"
|
||||
"ubi:DonIsaac/zlint" = "latest"
|
||||
zig = "master"
|
||||
zls = "0.15.0"
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v6.0.0
|
||||
rev: v3.2.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
|
|
|
|||
181
build.zig
181
build.zig
|
|
@ -3,6 +3,8 @@ const Builder = @import("std").Build;
|
|||
|
||||
const models_subdir = "codegen/sdk-codegen/aws-models/"; // note will probably not work on windows
|
||||
|
||||
// UNCOMMENT AFTER MODEL GEN TO USE IN BUILD SCRIPTS //pub const aws = @import("src/aws.zig");
|
||||
|
||||
const test_targets = [_]std.Target.Query{
|
||||
.{}, // native
|
||||
.{ .cpu_arch = .x86_64, .os_tag = .linux },
|
||||
|
|
@ -112,41 +114,12 @@ pub fn build(b: *Builder) !void {
|
|||
|
||||
cg.dependOn(&cg_cmd.step);
|
||||
|
||||
// Each module will need access to the generated AWS modules. These
|
||||
// are all imported by service_manifest.zig, which is a generated list
|
||||
// of services created by the codegen process.
|
||||
//
|
||||
// First, we need to check if pre-generated models exist, which only happens
|
||||
// for packaged distribution.
|
||||
//
|
||||
// The idea here is that if we have a packaged distibution (tarball with
|
||||
// models available, we are pre-generated, do not need the codegen step
|
||||
// (and in fact do not have that available), and our service_manifest
|
||||
// module needs to be the pre-packaged file.
|
||||
//
|
||||
// If we do not have a packaged distribution, the file will not exist,
|
||||
// because it is generated by codegen and will live in the zig cache directory,
|
||||
// so we depend on the codegen step and the service_manifest module will
|
||||
// be based on the codegen output itself.
|
||||
//
|
||||
// Most of this complication comes from the fact that we want to enable
|
||||
// consuming build.zig files to be able to use the SDK at build time for
|
||||
// things like code deployments, e.g. https://git.lerch.org/lobo/lambda-zig
|
||||
const has_pre_generated =
|
||||
if (b.build_root.handle.access("src/models/service_manifest.zig", .{})) true else |_| false;
|
||||
|
||||
// Only depend on codegen if we don't have pre-generated models
|
||||
if (!has_pre_generated)
|
||||
exe.step.dependOn(cg);
|
||||
|
||||
// Use pre-generated models if available, otherwise use codegen output
|
||||
const service_manifest_source: std.Build.LazyPath = if (has_pre_generated)
|
||||
b.path("src/models/service_manifest.zig")
|
||||
else
|
||||
cg_output_dir.path(b, "service_manifest.zig");
|
||||
|
||||
// This allows us to have each module depend on the
|
||||
// generated service manifest.
|
||||
const service_manifest_module = b.createModule(.{
|
||||
.root_source_file = service_manifest_source,
|
||||
.root_source_file = cg_output_dir.path(b, "service_manifest.zig"),
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
|
@ -206,7 +179,6 @@ pub fn build(b: *Builder) !void {
|
|||
.filters = test_filters,
|
||||
});
|
||||
|
||||
if (!has_pre_generated)
|
||||
unit_tests.step.dependOn(cg);
|
||||
unit_tests.use_llvm = !no_llvm;
|
||||
|
||||
|
|
@ -230,7 +202,6 @@ pub fn build(b: *Builder) !void {
|
|||
.filters = test_filters,
|
||||
});
|
||||
smoke_test.use_llvm = !no_llvm;
|
||||
if (!has_pre_generated)
|
||||
smoke_test.step.dependOn(cg);
|
||||
|
||||
const run_smoke_test = b.addRunArtifact(smoke_test);
|
||||
|
|
@ -241,13 +212,6 @@ pub fn build(b: *Builder) !void {
|
|||
} else {
|
||||
b.installArtifact(exe);
|
||||
}
|
||||
|
||||
// Package step - creates distribution source directory
|
||||
const pkg_step = PackageStep.create(b, cg_output_dir);
|
||||
pkg_step.step.dependOn(cg);
|
||||
|
||||
const package = b.step("package", "Copy code to zig-out/package with generated models");
|
||||
package.dependOn(&pkg_step.step);
|
||||
}
|
||||
|
||||
fn configure(compile: *std.Build.Module, modules: std.StringHashMap(*std.Build.Module), include_time: bool) void {
|
||||
|
|
@ -287,138 +251,3 @@ fn getDependencyModules(b: *std.Build, args: anytype) !std.StringHashMap(*std.Bu
|
|||
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Custom build step that creates a distribution source directory
|
||||
/// This copies all source files plus the generated service models into a
|
||||
/// package directory suitable for distribution
|
||||
const PackageStep = struct {
|
||||
step: std.Build.Step,
|
||||
cg_output_dir: std.Build.LazyPath,
|
||||
|
||||
const base_id: std.Build.Step.Id = .custom;
|
||||
|
||||
/// Files to include in the package (relative to build root)
|
||||
const package_files = [_][]const u8{
|
||||
"build.zig",
|
||||
"build.zig.zon",
|
||||
"README.md",
|
||||
"LICENSE",
|
||||
};
|
||||
|
||||
/// Directories to include in the package (relative to build root)
|
||||
const package_dirs = [_][]const u8{
|
||||
"src",
|
||||
"lib",
|
||||
};
|
||||
|
||||
pub fn create(owner: *std.Build, cg_output_dir: std.Build.LazyPath) *PackageStep {
|
||||
const self = owner.allocator.create(PackageStep) catch @panic("OOM");
|
||||
self.* = .{
|
||||
.step = std.Build.Step.init(.{
|
||||
.id = base_id,
|
||||
.name = "copy generated files",
|
||||
.owner = owner,
|
||||
.makeFn = make,
|
||||
}),
|
||||
.cg_output_dir = cg_output_dir,
|
||||
};
|
||||
return self;
|
||||
}
|
||||
|
||||
fn make(step: *std.Build.Step, options: std.Build.Step.MakeOptions) anyerror!void {
|
||||
_ = options;
|
||||
const self: *PackageStep = @fieldParentPtr("step", step);
|
||||
const b = step.owner;
|
||||
|
||||
// Get the path to generated models
|
||||
const models_path = self.cg_output_dir.getPath2(b, &self.step);
|
||||
|
||||
// Create output directory for packaging
|
||||
const package_dir = b.pathJoin(&.{ "zig-out", "package" });
|
||||
const models_dest_dir = b.pathJoin(&.{ package_dir, "src", "models" });
|
||||
std.fs.cwd().makePath(models_dest_dir) catch |err| {
|
||||
return step.fail("Failed to create package directory: {}", .{err});
|
||||
};
|
||||
|
||||
// Copy all source files to package directory
|
||||
for (package_files) |file_name|
|
||||
copyFile(b, b.build_root.handle, file_name, package_dir) catch {};
|
||||
|
||||
// Copy directories
|
||||
for (package_dirs) |dir_name|
|
||||
copyDirRecursive(b, b.build_root.handle, dir_name, package_dir) catch |err| {
|
||||
return step.fail("Failed to copy directory '{s}': {}", .{ dir_name, err });
|
||||
};
|
||||
|
||||
// Copy generated models to src/models/
|
||||
copyGeneratedModels(b, models_path, models_dest_dir) catch |err| {
|
||||
return step.fail("Failed to copy generated models: {}", .{err});
|
||||
};
|
||||
|
||||
step.result_cached = false;
|
||||
}
|
||||
|
||||
fn copyFile(b: *std.Build, src_dir: std.fs.Dir, file_path: []const u8, dest_prefix: []const u8) !void {
|
||||
const dest_path = b.pathJoin(&.{ dest_prefix, file_path });
|
||||
|
||||
// Ensure parent directory exists
|
||||
if (std.fs.path.dirname(dest_path)) |parent|
|
||||
std.fs.cwd().makePath(parent) catch {};
|
||||
|
||||
src_dir.copyFile(file_path, std.fs.cwd(), dest_path, .{}) catch return;
|
||||
}
|
||||
|
||||
fn copyDirRecursive(b: *std.Build, src_base: std.fs.Dir, dir_path: []const u8, dest_prefix: []const u8) !void {
|
||||
var src_dir = src_base.openDir(dir_path, .{ .iterate = true }) catch return;
|
||||
defer src_dir.close();
|
||||
|
||||
var walker = try src_dir.walk(b.allocator);
|
||||
defer walker.deinit();
|
||||
|
||||
while (try walker.next()) |entry| {
|
||||
// Skip zig build artifact directories
|
||||
if (std.mem.indexOf(u8, entry.path, "zig-out") != null or
|
||||
std.mem.indexOf(u8, entry.path, ".zig-cache") != null or
|
||||
std.mem.indexOf(u8, entry.path, "zig-cache") != null)
|
||||
continue;
|
||||
|
||||
const src_path = b.pathJoin(&.{ dir_path, entry.path });
|
||||
const dest_path = b.pathJoin(&.{ dest_prefix, dir_path, entry.path });
|
||||
|
||||
switch (entry.kind) {
|
||||
.directory => std.fs.cwd().makePath(dest_path) catch {},
|
||||
.file => {
|
||||
// Ensure parent directory exists
|
||||
if (std.fs.path.dirname(dest_path)) |parent| {
|
||||
std.fs.cwd().makePath(parent) catch {};
|
||||
}
|
||||
src_base.copyFile(src_path, std.fs.cwd(), dest_path, .{}) catch {};
|
||||
},
|
||||
.sym_link => {
|
||||
var link_buf: [std.fs.max_path_bytes]u8 = undefined;
|
||||
const link_target = entry.dir.readLink(entry.basename, &link_buf) catch continue;
|
||||
// Ensure parent directory exists
|
||||
if (std.fs.path.dirname(dest_path)) |parent| {
|
||||
std.fs.cwd().makePath(parent) catch {};
|
||||
}
|
||||
std.fs.cwd().symLink(link_target, dest_path, .{}) catch {};
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn copyGeneratedModels(b: *std.Build, models_path: []const u8, models_dest_dir: []const u8) !void {
|
||||
var models_dir = std.fs.cwd().openDir(models_path, .{ .iterate = true }) catch
|
||||
return error.ModelsNotFound;
|
||||
defer models_dir.close();
|
||||
|
||||
var iter = models_dir.iterate();
|
||||
while (try iter.next()) |entry| {
|
||||
if (entry.kind != .file) continue;
|
||||
|
||||
const dest_path = b.pathJoin(&.{ models_dest_dir, entry.name });
|
||||
models_dir.copyFile(entry.name, std.fs.cwd(), dest_path, .{}) catch continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -22,10 +22,6 @@
|
|||
.url = "https://github.com/aws/aws-sdk-go-v2/archive/refs/tags/release-2025-05-05.tar.gz",
|
||||
.hash = "N-V-__8AAKWdeiawujEcrfukQbb8lLAiQIRT0uG5gCcm4b7W",
|
||||
},
|
||||
.zeit = .{
|
||||
.url = "git+https://github.com/rockorager/zeit?ref=zig-0.15#ed2ca60db118414bda2b12df2039e33bad3b0b88",
|
||||
.hash = "zeit-0.6.0-5I6bk0J9AgCVa0nnyL0lNY9Xa9F68hHq-ZarhuXNV-Jb",
|
||||
},
|
||||
.date = .{
|
||||
.path = "lib/date",
|
||||
},
|
||||
|
|
@ -36,5 +32,9 @@
|
|||
.url = "git+https://github.com/travisstaloch/case.git#f8003fe5f93b65f673d10d41323e347225e8cb87",
|
||||
.hash = "case-0.0.1-chGYqx_EAADaGJjmoln5M1iMBDTrMdd8to5wdEVpfXm4",
|
||||
},
|
||||
.zeit = .{
|
||||
.url = "git+https://github.com/elerch/zeit#8190461dc1f892f6370fa9d5cd76690aac0e1c71",
|
||||
.hash = "zeit-0.6.0-5I6bk99-AgDNMIDuw2Zcoe_9QYIpzwZJqeqMpU54egTd",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -33,6 +33,10 @@ pub fn main() anyerror!void {
|
|||
defer arena.deinit();
|
||||
const allocator = arena.allocator();
|
||||
|
||||
var threaded: std.Io.Threaded = .init(allocator);
|
||||
defer threaded.deinit();
|
||||
const io = threaded.io();
|
||||
|
||||
const args = try std.process.argsAlloc(allocator);
|
||||
defer std.process.argsFree(allocator, args);
|
||||
var stdout_writer = std.fs.File.stdout().writer(&.{});
|
||||
|
|
@ -79,7 +83,7 @@ pub fn main() anyerror!void {
|
|||
skip_next = true;
|
||||
continue;
|
||||
}
|
||||
try processFile(arg, output_dir, &manifest);
|
||||
try processFile(io, arg, output_dir, &manifest);
|
||||
files_processed += 1;
|
||||
}
|
||||
if (files_processed == 0) {
|
||||
|
|
@ -93,12 +97,12 @@ pub fn main() anyerror!void {
|
|||
defer cwd.setAsCwd() catch unreachable;
|
||||
|
||||
try m.setAsCwd();
|
||||
try processDirectories(m, output_dir, &root_progress_node);
|
||||
try processDirectories(io, m, output_dir, &root_progress_node);
|
||||
}
|
||||
}
|
||||
|
||||
if (args.len == 0)
|
||||
_ = try generateServices(allocator, ";", std.fs.File.stdin(), stdout);
|
||||
_ = try generateServices(allocator, io, ";", std.fs.File.stdin(), stdout);
|
||||
|
||||
if (verbose) {
|
||||
const output_path = try output_dir.realpathAlloc(allocator, ".");
|
||||
|
|
@ -110,7 +114,7 @@ const OutputManifest = struct {
|
|||
model_dir_hash_digest: [Hasher.hex_multihash_len]u8,
|
||||
output_dir_hash_digest: [Hasher.hex_multihash_len]u8,
|
||||
};
|
||||
fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir, parent_progress: *const std.Progress.Node) !void {
|
||||
fn processDirectories(io: std.Io, models_dir: std.fs.Dir, output_dir: std.fs.Dir, parent_progress: *const std.Progress.Node) !void {
|
||||
// Let's get ready to hash!!
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena.deinit();
|
||||
|
|
@ -154,7 +158,7 @@ fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir, parent_pro
|
|||
|
||||
while (try mi.next()) |e| {
|
||||
if ((e.kind == .file or e.kind == .sym_link) and std.mem.endsWith(u8, e.name, ".json")) {
|
||||
try processFile(e.name, output_dir, &manifest.interface);
|
||||
try processFile(io, e.name, output_dir, &manifest.interface);
|
||||
generating_models_progress.completeOne();
|
||||
}
|
||||
}
|
||||
|
|
@ -210,7 +214,7 @@ fn calculateDigests(models_dir: std.fs.Dir, output_dir: std.fs.Dir, thread_pool:
|
|||
},
|
||||
};
|
||||
}
|
||||
fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: *std.Io.Writer) !void {
|
||||
fn processFile(io: std.Io, file_name: []const u8, output_dir: std.fs.Dir, manifest: *std.Io.Writer) !void {
|
||||
// It's probably best to create our own allocator here so we can deint at the end and
|
||||
// toss all allocations related to the services in this file
|
||||
// I can't guarantee we're not leaking something, and at the end of the
|
||||
|
|
@ -237,6 +241,7 @@ fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: *std.Io.
|
|||
|
||||
const service_names = generateServicesForFilePath(
|
||||
allocator,
|
||||
io,
|
||||
";",
|
||||
file_name,
|
||||
writer,
|
||||
|
|
@ -288,13 +293,14 @@ fn zigFmt(allocator: std.mem.Allocator, buffer: [:0]const u8) ![]const u8 {
|
|||
|
||||
fn generateServicesForFilePath(
|
||||
allocator: std.mem.Allocator,
|
||||
io: std.Io,
|
||||
comptime terminator: []const u8,
|
||||
path: []const u8,
|
||||
writer: *std.Io.Writer,
|
||||
) ![][]const u8 {
|
||||
const file = try std.fs.cwd().openFile(path, .{});
|
||||
defer file.close();
|
||||
return try generateServices(allocator, terminator, file, writer);
|
||||
return try generateServices(allocator, io, terminator, file, writer);
|
||||
}
|
||||
|
||||
fn addReference(id: []const u8, map: *std.StringHashMap(u64)) !void {
|
||||
|
|
@ -396,12 +402,13 @@ fn countReferences(
|
|||
|
||||
fn generateServices(
|
||||
allocator: std.mem.Allocator,
|
||||
io: std.Io,
|
||||
comptime _: []const u8,
|
||||
file: std.fs.File,
|
||||
writer: *std.Io.Writer,
|
||||
) ![][]const u8 {
|
||||
var fbuf: [1024]u8 = undefined;
|
||||
var freader = file.reader(&fbuf);
|
||||
var freader = file.reader(io, &fbuf);
|
||||
var reader = &freader.interface;
|
||||
const json = try reader.allocRemaining(allocator, .limited(1024 * 1024 * 1024));
|
||||
defer allocator.free(json);
|
||||
|
|
@ -645,7 +652,7 @@ fn generateOperation(allocator: std.mem.Allocator, operation: smithy.ShapeInfo,
|
|||
try outputIndent(child_state, writer);
|
||||
try writer.print(".uri = \"{s}\",\n", .{trait.http.uri});
|
||||
try outputIndent(child_state, writer);
|
||||
try writer.print(".success_code = @as(u10, {d}),\n", .{trait.http.code});
|
||||
try writer.print(".success_code = {d},\n", .{trait.http.code});
|
||||
try outputIndent(state, writer);
|
||||
_ = try writer.write("};\n\n");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,9 +34,9 @@ pub fn generateToJsonFunction(shape_id: []const u8, writer: *std.Io.Writer, stat
|
|||
const member_value = try getMemberValueJson(allocator, "self", member);
|
||||
defer allocator.free(member_value);
|
||||
|
||||
try writer.print("try jw.objectField(\"{s}\");\n", .{member.json_key});
|
||||
try writeMemberJson(
|
||||
.{
|
||||
.object_field_name = member.json_key,
|
||||
.shape_id = member.target,
|
||||
.field_name = member.field_name,
|
||||
.field_value = member_value,
|
||||
|
|
@ -146,8 +146,6 @@ fn writeMemberValue(
|
|||
}
|
||||
|
||||
const WriteMemberJsonParams = struct {
|
||||
object_field_name: []const u8,
|
||||
quote_object_field_name: bool = true,
|
||||
shape_id: []const u8,
|
||||
field_name: []const u8,
|
||||
field_value: []const u8,
|
||||
|
|
@ -198,9 +196,9 @@ fn writeStructureJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) !vo
|
|||
const member_value = try getMemberValueJson(allocator, object_value, member);
|
||||
defer allocator.free(member_value);
|
||||
|
||||
try writer.print("try jw.objectField(\"{s}\");\n", .{member.json_key});
|
||||
try writeMemberJson(
|
||||
.{
|
||||
.object_field_name = member.json_key,
|
||||
.shape_id = member.target,
|
||||
.field_name = member.field_name,
|
||||
.field_value = member_value,
|
||||
|
|
@ -216,7 +214,7 @@ fn writeStructureJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) !vo
|
|||
|
||||
if (is_optional) {
|
||||
try writer.writeAll("} else {\n");
|
||||
try writer.writeAll("//try jw.write(null);\n");
|
||||
try writer.writeAll("try jw.write(null);\n");
|
||||
try writer.writeAll("}\n");
|
||||
}
|
||||
}
|
||||
|
|
@ -270,7 +268,7 @@ fn writeListJson(list: smithy_tools.ListShape, params: WriteMemberJsonParams, wr
|
|||
|
||||
if (list_is_optional) {
|
||||
try writer.writeAll("} else {\n");
|
||||
try writer.writeAll("//try jw.write(null);\n");
|
||||
try writer.writeAll("try jw.write(null);\n");
|
||||
try writer.writeAll("}\n");
|
||||
}
|
||||
}
|
||||
|
|
@ -329,10 +327,9 @@ fn writeMapJson(map: smithy_tools.MapShape, params: WriteMemberJsonParams, write
|
|||
// start loop
|
||||
try writer.print("for ({s}) |{s}|", .{ map_value, map_value_capture });
|
||||
try writer.writeAll("{\n");
|
||||
try writer.print("try jw.objectField({s});\n", .{map_capture_key});
|
||||
|
||||
try writeMemberJson(.{
|
||||
.object_field_name = map_capture_key,
|
||||
.quote_object_field_name = false,
|
||||
.shape_id = map.value,
|
||||
.field_name = "value",
|
||||
.field_value = map_capture_value,
|
||||
|
|
@ -348,7 +345,7 @@ fn writeMapJson(map: smithy_tools.MapShape, params: WriteMemberJsonParams, write
|
|||
|
||||
if (map_is_optional) {
|
||||
try writer.writeAll("} else {\n");
|
||||
try writer.writeAll("//try jw.write(null);\n");
|
||||
try writer.writeAll("try jw.write(null);\n");
|
||||
try writer.writeAll("}\n");
|
||||
}
|
||||
}
|
||||
|
|
@ -364,16 +361,7 @@ fn writeMemberJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerr
|
|||
const shape_info = try smithy_tools.getShapeInfo(shape_id, state.file_state.shapes);
|
||||
const shape = shape_info.shape;
|
||||
|
||||
const quote = if (params.quote_object_field_name) "\"" else "";
|
||||
const is_optional = smithy_tools.shapeIsOptional(params.member.traits);
|
||||
if (is_optional) {
|
||||
try writer.print("if ({s}) |_|\n", .{params.field_value});
|
||||
try writer.writeAll("{\n");
|
||||
}
|
||||
try writer.print("try jw.objectField({s}{s}{s});\n", .{ quote, params.object_field_name, quote });
|
||||
|
||||
if (state.getTypeRecurrenceCount(shape_id) > 2) {
|
||||
if (is_optional) try writer.writeAll("\n}\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -401,5 +389,4 @@ fn writeMemberJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerr
|
|||
.short => try writeScalarJson("short", params, writer),
|
||||
.service, .resource, .operation, .member, .set => std.debug.panic("Shape type not supported: {}", .{shape}),
|
||||
}
|
||||
if (is_optional) try writer.writeAll("\n}\n");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,8 +6,8 @@
|
|||
|
||||
.dependencies = .{
|
||||
.aws = .{
|
||||
.url = "git+https://git.lerch.org/lobo/aws-sdk-for-zig.git?ref=master#efdef66fdbb2500d33a79a0b8d1855dd1bb20d56",
|
||||
.hash = "aws-0.0.1-SbsFcLgtCgAndtGhoOyzQfmFtUux4tadFZv0tC6TAnL8",
|
||||
.url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/9b870aa969124de05de2a71e0afb9050a2998b14/9b870aa969124de05de2a71e0afb9050a2998b14nightly-zig-with-models.tar.gz",
|
||||
.hash = "aws-0.0.1-SbsFcFsaCgBDSmjnC9Lue34UN_csGkkAEBJ4EkUl9r6w",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,7 +28,10 @@ pub fn main() anyerror!void {
|
|||
// };
|
||||
//
|
||||
// var client = aws.Client.init(allocator, .{ .proxy = proxy });
|
||||
var client = aws.Client.init(allocator, .{});
|
||||
var threaded: std.Io.Threaded = .init(allocator);
|
||||
defer threaded.deinit();
|
||||
const io = threaded.io();
|
||||
var client = aws.Client.init(allocator, .{ .io = io });
|
||||
defer client.deinit();
|
||||
|
||||
const options = aws.Options{
|
||||
|
|
|
|||
|
|
@ -5,8 +5,8 @@
|
|||
.minimum_zig_version = "0.14.0",
|
||||
.dependencies = .{
|
||||
.zeit = .{
|
||||
.url = "git+https://github.com/rockorager/zeit?ref=zig-0.15#ed2ca60db118414bda2b12df2039e33bad3b0b88",
|
||||
.hash = "zeit-0.6.0-5I6bk0J9AgCVa0nnyL0lNY9Xa9F68hHq-ZarhuXNV-Jb",
|
||||
.url = "git+https://github.com/elerch/zeit#8190461dc1f892f6370fa9d5cd76690aac0e1c71",
|
||||
.hash = "zeit-0.6.0-5I6bk99-AgDNMIDuw2Zcoe_9QYIpzwZJqeqMpU54egTd",
|
||||
},
|
||||
.json = .{
|
||||
.path = "../json",
|
||||
|
|
|
|||
|
|
@ -83,8 +83,10 @@ fn printDateTime(dt: DateTime) void {
|
|||
});
|
||||
}
|
||||
|
||||
pub fn printNowUtc() void {
|
||||
printDateTime(timestampToDateTime(std.time.timestamp()));
|
||||
pub fn printNowUtc(io: std.Io) void {
|
||||
const now = std.Io.Clock.Timestamp.now(io, .awake) catch return;
|
||||
const timestamp = @as(i64, @intCast(@divFloor(now.raw.nanoseconds, std.time.ns_per_s)));
|
||||
printDateTime(timestampToDateTime(timestamp));
|
||||
}
|
||||
|
||||
test "Convert timestamp to datetime" {
|
||||
|
|
|
|||
121
src/aws.zig
121
src/aws.zig
|
|
@ -6,7 +6,6 @@ const date = @import("date");
|
|||
const json = @import("json");
|
||||
const zeit = @import("zeit");
|
||||
|
||||
const credentials = @import("aws_credentials.zig");
|
||||
const awshttp = @import("aws_http.zig");
|
||||
const url = @import("url.zig");
|
||||
const servicemodel = @import("servicemodel.zig");
|
||||
|
|
@ -20,6 +19,7 @@ const scoped_log = std.log.scoped(.aws);
|
|||
/// controls are insufficient (e.g. use in build script)
|
||||
pub fn globalLogControl(aws_level: std.log.Level, http_level: std.log.Level, signing_level: std.log.Level, off: bool) void {
|
||||
const signing = @import("aws_signing.zig");
|
||||
const credentials = @import("aws_credentials.zig");
|
||||
logs_off = off;
|
||||
signing.logs_off = off;
|
||||
credentials.logs_off = off;
|
||||
|
|
@ -82,9 +82,8 @@ const log = struct {
|
|||
pub const Options = struct {
|
||||
region: []const u8 = "aws-global",
|
||||
dualstack: bool = false,
|
||||
success_http_status: std.http.Status = .ok,
|
||||
success_http_code: i64 = 200,
|
||||
client: Client,
|
||||
credential_options: credentials.Options = .{},
|
||||
|
||||
diagnostics: ?*Diagnostics = null,
|
||||
|
||||
|
|
@ -92,7 +91,7 @@ pub const Options = struct {
|
|||
};
|
||||
|
||||
pub const Diagnostics = struct {
|
||||
response_status: std.http.Status,
|
||||
http_code: i64,
|
||||
response_body: []const u8,
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
|
|
@ -114,6 +113,7 @@ pub const Services = servicemodel.Services;
|
|||
|
||||
pub const ClientOptions = struct {
|
||||
proxy: ?std.http.Client.Proxy = null,
|
||||
io: std.Io,
|
||||
};
|
||||
pub const Client = struct {
|
||||
allocator: std.mem.Allocator,
|
||||
|
|
@ -124,7 +124,7 @@ pub const Client = struct {
|
|||
pub fn init(allocator: std.mem.Allocator, options: ClientOptions) Self {
|
||||
return Self{
|
||||
.allocator = allocator,
|
||||
.aws_http = awshttp.AwsHttp.init(allocator, options.proxy),
|
||||
.aws_http = awshttp.AwsHttp.init(allocator, options.io, options.proxy),
|
||||
};
|
||||
}
|
||||
pub fn deinit(self: *Client) void {
|
||||
|
|
@ -232,18 +232,8 @@ pub fn Request(comptime request_action: anytype) type {
|
|||
var buffer = std.Io.Writer.Allocating.init(options.client.allocator);
|
||||
defer buffer.deinit();
|
||||
if (Self.service_meta.aws_protocol == .rest_json_1) {
|
||||
if (std.mem.eql(u8, "PUT", aws_request.method) or std.mem.eql(u8, "POST", aws_request.method)) {
|
||||
// Buried in the tests are our answer here:
|
||||
// https://github.com/smithy-lang/smithy/blob/main/smithy-aws-protocol-tests/model/restJson1/json-structs.smithy#L71C24-L71C78
|
||||
// documentation: "Rest Json should not serialize null structure values",
|
||||
try buffer.writer.print(
|
||||
"{f}",
|
||||
.{std.json.fmt(request, .{
|
||||
.whitespace = .indent_4,
|
||||
.emit_null_optional_fields = false,
|
||||
})},
|
||||
);
|
||||
}
|
||||
if (std.mem.eql(u8, "PUT", aws_request.method) or std.mem.eql(u8, "POST", aws_request.method))
|
||||
try buffer.writer.print("{f}", .{std.json.fmt(request, .{ .whitespace = .indent_4 })});
|
||||
}
|
||||
aws_request.body = buffer.written();
|
||||
var rest_xml_body: ?[]const u8 = null;
|
||||
|
|
@ -304,9 +294,14 @@ pub fn Request(comptime request_action: anytype) type {
|
|||
}
|
||||
}
|
||||
|
||||
var rest_options = options;
|
||||
rest_options.success_http_status = @enumFromInt(Action.http_config.success_code);
|
||||
return try Self.callAws(aws_request, rest_options);
|
||||
return try Self.callAws(aws_request, .{
|
||||
.success_http_code = Action.http_config.success_code,
|
||||
.region = options.region,
|
||||
.dualstack = options.dualstack,
|
||||
.client = options.client,
|
||||
.diagnostics = options.diagnostics,
|
||||
.mock = options.mock,
|
||||
});
|
||||
}
|
||||
|
||||
/// Calls using one of the json protocols (json_1_0, json_1_1)
|
||||
|
|
@ -326,20 +321,11 @@ pub fn Request(comptime request_action: anytype) type {
|
|||
// smithy spec, "A null value MAY be provided or omitted
|
||||
// for a boxed member with no observable difference." But we're
|
||||
// seeing a lot of differences here between spec and reality
|
||||
//
|
||||
// This is deliciously unclear:
|
||||
// https://github.com/smithy-lang/smithy/blob/main/smithy-aws-protocol-tests/model/awsJson1_1/null.smithy#L36
|
||||
//
|
||||
// It looks like struct nulls are meant to be dropped, but sparse
|
||||
// lists/maps included. We'll err here on the side of eliminating them
|
||||
|
||||
const body = try std.fmt.allocPrint(
|
||||
options.client.allocator,
|
||||
"{f}",
|
||||
.{std.json.fmt(request, .{
|
||||
.whitespace = .indent_4,
|
||||
.emit_null_optional_fields = false,
|
||||
})},
|
||||
.{std.json.fmt(request, .{ .whitespace = .indent_4 })},
|
||||
);
|
||||
defer options.client.allocator.free(body);
|
||||
|
||||
|
|
@ -411,27 +397,16 @@ pub fn Request(comptime request_action: anytype) type {
|
|||
.dualstack = options.dualstack,
|
||||
.sigv4_service_name = Self.service_meta.sigv4_name,
|
||||
.mock = options.mock,
|
||||
.credential_options = options.credential_options,
|
||||
},
|
||||
);
|
||||
defer response.deinit();
|
||||
|
||||
if (response.response_code != options.success_http_status) {
|
||||
// If the consumer prrovided diagnostics, they are likely handling
|
||||
// this error themselves. We'll not spam them with log.err
|
||||
// output. Note that we may need to add additional information
|
||||
// in diagnostics, as reportTraffic provides more information
|
||||
// than what exists in the diagnostics data
|
||||
if (response.response_code != options.success_http_code and response.response_code != 404) {
|
||||
try reportTraffic(options.client.allocator, "Call Failed", aws_request, response, log.err);
|
||||
if (options.diagnostics) |d| {
|
||||
d.response_status = response.response_code;
|
||||
d.http_code = response.response_code;
|
||||
d.response_body = try d.allocator.dupe(u8, response.body);
|
||||
} else try reportTraffic(
|
||||
options.client.allocator,
|
||||
"Call Failed",
|
||||
aws_request,
|
||||
response,
|
||||
log.err,
|
||||
);
|
||||
}
|
||||
return error.HttpFailure;
|
||||
}
|
||||
|
||||
|
|
@ -481,7 +456,7 @@ pub fn Request(comptime request_action: anytype) type {
|
|||
log.err("Could not set header value: Response header {s}. Field {s}. Value {s}", .{ header.name, f.?.name, header.value });
|
||||
log.err("Error: {}", .{e});
|
||||
if (@errorReturnTrace()) |trace| {
|
||||
std.debug.dumpStackTrace(trace.*);
|
||||
std.debug.dumpStackTrace(trace);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -1082,45 +1057,37 @@ fn ServerResponse(comptime action: anytype) type {
|
|||
const ResponseMetadata = struct {
|
||||
RequestId: []u8,
|
||||
};
|
||||
const Result = @Type(.{
|
||||
.@"struct" = .{
|
||||
.layout = .auto,
|
||||
.fields = &[_]std.builtin.Type.StructField{
|
||||
const Result = @Struct(
|
||||
.auto,
|
||||
null,
|
||||
&[_][]const u8{ action.action_name ++ "Result", "ResponseMetadata" },
|
||||
&[_]type{ T, ResponseMetadata },
|
||||
&[_]std.builtin.Type.StructField.Attributes{
|
||||
.{
|
||||
.name = action.action_name ++ "Result",
|
||||
.type = T,
|
||||
.default_value_ptr = null,
|
||||
.is_comptime = false,
|
||||
.alignment = std.meta.alignment(T),
|
||||
.@"comptime" = false,
|
||||
.@"align" = std.meta.alignment(T),
|
||||
},
|
||||
.{
|
||||
.name = "ResponseMetadata",
|
||||
.type = ResponseMetadata,
|
||||
.default_value_ptr = null,
|
||||
.is_comptime = false,
|
||||
.alignment = std.meta.alignment(ResponseMetadata),
|
||||
.@"comptime" = false,
|
||||
.@"align" = std.meta.alignment(ResponseMetadata),
|
||||
},
|
||||
},
|
||||
.decls = &[_]std.builtin.Type.Declaration{},
|
||||
.is_tuple = false,
|
||||
},
|
||||
});
|
||||
return @Type(.{
|
||||
.@"struct" = .{
|
||||
.layout = .auto,
|
||||
.fields = &[_]std.builtin.Type.StructField{
|
||||
);
|
||||
return @Struct(
|
||||
.auto,
|
||||
null,
|
||||
&[_][]const u8{action.action_name ++ "Response"},
|
||||
&[_]type{Result},
|
||||
&[_]std.builtin.Type.StructField.Attributes{
|
||||
.{
|
||||
.name = action.action_name ++ "Response",
|
||||
.type = Result,
|
||||
.default_value_ptr = null,
|
||||
.is_comptime = false,
|
||||
.alignment = std.meta.alignment(Result),
|
||||
.@"comptime" = false,
|
||||
.@"align" = std.meta.alignment(Result),
|
||||
},
|
||||
},
|
||||
.decls = &[_]std.builtin.Type.Declaration{},
|
||||
.is_tuple = false,
|
||||
},
|
||||
});
|
||||
);
|
||||
}
|
||||
fn FullResponse(comptime action: anytype) type {
|
||||
return struct {
|
||||
|
|
@ -1219,10 +1186,7 @@ fn buildPath(
|
|||
"{f}",
|
||||
.{std.json.fmt(
|
||||
@field(request, field.name),
|
||||
.{
|
||||
.whitespace = .indent_4,
|
||||
.emit_null_optional_fields = false,
|
||||
},
|
||||
.{ .whitespace = .indent_4 },
|
||||
)},
|
||||
);
|
||||
const trimmed_replacement_val = std.mem.trim(u8, replacement_buffer.written(), "\"");
|
||||
|
|
@ -1441,6 +1405,7 @@ fn reportTraffic(
|
|||
|
||||
test {
|
||||
_ = @import("aws_test.zig");
|
||||
_ = @import("servicemodel.zig");
|
||||
}
|
||||
|
||||
// buildQuery/buildPath tests, which are here as they are a) generic and b) private
|
||||
|
|
|
|||
|
|
@ -69,11 +69,6 @@ pub const Profile = struct {
|
|||
config_file: ?[]const u8 = null,
|
||||
/// Config file. Defaults to AWS_PROFILE or default
|
||||
profile_name: ?[]const u8 = null,
|
||||
/// Profile name specified via command line should change precedence of operation,
|
||||
/// moves credential file checking to the top. The sdk does not have a
|
||||
/// way to know if this is coming from a command line, so this field
|
||||
/// serves as a way to accomplish that task
|
||||
prefer_profile_from_file: bool = false,
|
||||
};
|
||||
|
||||
pub const Options = struct {
|
||||
|
|
@ -82,17 +77,8 @@ pub const Options = struct {
|
|||
|
||||
pub var static_credentials: ?auth.Credentials = null;
|
||||
|
||||
pub fn getCredentials(allocator: std.mem.Allocator, options: Options) !auth.Credentials {
|
||||
pub fn getCredentials(allocator: std.mem.Allocator, io: std.Io, options: Options) !auth.Credentials {
|
||||
if (static_credentials) |c| return c;
|
||||
if (options.profile.prefer_profile_from_file) {
|
||||
log.debug(
|
||||
"Command line profile specified. Checking credentials file first. Profile name {s}",
|
||||
.{options.profile.profile_name orelse "default"},
|
||||
);
|
||||
if (try getProfileCredentials(allocator, options.profile)) |cred| return cred;
|
||||
// Profile not found. We'll mirror the cli here and bail early
|
||||
return error.CredentialsNotFound;
|
||||
}
|
||||
if (try getEnvironmentCredentials(allocator)) |cred| {
|
||||
log.debug("Found credentials in environment. Access key: {s}", .{cred.access_key});
|
||||
return cred;
|
||||
|
|
@ -101,11 +87,11 @@ pub fn getCredentials(allocator: std.mem.Allocator, options: Options) !auth.Cred
|
|||
// GetWebIdentity is not currently implemented. The rest are tested and gtg
|
||||
// Note: Lambda just sets environment variables
|
||||
if (try getWebIdentityToken(allocator)) |cred| return cred;
|
||||
if (try getProfileCredentials(allocator, options.profile)) |cred| return cred;
|
||||
if (try getProfileCredentials(allocator, io, options.profile)) |cred| return cred;
|
||||
|
||||
if (try getContainerCredentials(allocator)) |cred| return cred;
|
||||
if (try getContainerCredentials(allocator, io)) |cred| return cred;
|
||||
// I don't think we need v1 at all?
|
||||
if (try getImdsv2Credentials(allocator)) |cred| return cred;
|
||||
if (try getImdsv2Credentials(allocator, io)) |cred| return cred;
|
||||
return error.CredentialsNotFound;
|
||||
}
|
||||
|
||||
|
|
@ -139,7 +125,7 @@ fn getWebIdentityToken(allocator: std.mem.Allocator) !?auth.Credentials {
|
|||
// TODO: implement
|
||||
return null;
|
||||
}
|
||||
fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
|
||||
fn getContainerCredentials(allocator: std.mem.Allocator, io: std.Io) !?auth.Credentials {
|
||||
// A note on testing: The best way I have found to test this process is
|
||||
// the following. Setup an ECS Fargate cluster and create a task definition
|
||||
// with the command ["/bin/bash","-c","while true; do sleep 10; done"].
|
||||
|
|
@ -185,7 +171,7 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
|
|||
const container_uri = try std.fmt.allocPrint(allocator, "http://169.254.170.2{s}", .{container_relative_uri});
|
||||
defer allocator.free(container_uri);
|
||||
|
||||
var cl = std.http.Client{ .allocator = allocator };
|
||||
var cl = std.http.Client{ .allocator = allocator, .io = io };
|
||||
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
|
||||
var aw: std.Io.Writer.Allocating = .init(allocator);
|
||||
defer aw.deinit();
|
||||
|
|
@ -215,7 +201,7 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
|
|||
log.err("Unexpected Json response from container credentials endpoint: {s}", .{aw.written()});
|
||||
log.err("Error parsing json: {}", .{e});
|
||||
if (@errorReturnTrace()) |trace| {
|
||||
std.debug.dumpStackTrace(trace.*);
|
||||
std.debug.dumpStackTrace(trace);
|
||||
}
|
||||
|
||||
return null;
|
||||
|
|
@ -232,10 +218,10 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
|
|||
);
|
||||
}
|
||||
|
||||
fn getImdsv2Credentials(allocator: std.mem.Allocator) !?auth.Credentials {
|
||||
fn getImdsv2Credentials(allocator: std.mem.Allocator, io: std.Io) !?auth.Credentials {
|
||||
var token: ?[]u8 = null;
|
||||
defer if (token) |t| allocator.free(t);
|
||||
var cl = std.http.Client{ .allocator = allocator };
|
||||
var cl = std.http.Client{ .allocator = allocator, .io = io };
|
||||
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
|
||||
// Get token
|
||||
{
|
||||
|
|
@ -313,7 +299,7 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
|
|||
log.err("Unexpected Json response from IMDS endpoint: {s}", .{aw.written()});
|
||||
log.err("Error parsing json: {}", .{e});
|
||||
if (@errorReturnTrace()) |trace| {
|
||||
std.debug.dumpStackTrace(trace.*);
|
||||
std.debug.dumpStackTrace(trace);
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
|
@ -368,7 +354,7 @@ fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, ro
|
|||
log.err("Unexpected Json response from IMDS endpoint: {s}", .{aw.written()});
|
||||
log.err("Error parsing json: {}", .{e});
|
||||
if (@errorReturnTrace()) |trace| {
|
||||
std.debug.dumpStackTrace(trace.*);
|
||||
std.debug.dumpStackTrace(trace);
|
||||
}
|
||||
|
||||
return null;
|
||||
|
|
@ -397,7 +383,7 @@ fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, ro
|
|||
|
||||
}
|
||||
|
||||
fn getProfileCredentials(allocator: std.mem.Allocator, options: Profile) !?auth.Credentials {
|
||||
fn getProfileCredentials(allocator: std.mem.Allocator, io: std.Io, options: Profile) !?auth.Credentials {
|
||||
var default_path: ?[]const u8 = null;
|
||||
defer if (default_path) |p| allocator.free(p);
|
||||
|
||||
|
|
@ -412,8 +398,8 @@ fn getProfileCredentials(allocator: std.mem.Allocator, options: Profile) !?auth.
|
|||
default_path = default_path orelse creds_file_path.home;
|
||||
const config_file_path = try filePath(
|
||||
allocator,
|
||||
options.config_file,
|
||||
"AWS_CONFIG_FILE",
|
||||
options.credential_file,
|
||||
"AWS_SHARED_CREDENTIALS_FILE",
|
||||
default_path,
|
||||
"config",
|
||||
);
|
||||
|
|
@ -422,7 +408,7 @@ fn getProfileCredentials(allocator: std.mem.Allocator, options: Profile) !?auth.
|
|||
|
||||
// Get active profile
|
||||
const profile = (try getEnvironmentVariable(allocator, "AWS_PROFILE")) orelse
|
||||
try allocator.dupe(u8, options.profile_name orelse "default");
|
||||
try allocator.dupe(u8, "default");
|
||||
defer allocator.free(profile);
|
||||
log.debug("Looking for file credentials using profile '{s}'", .{profile});
|
||||
log.debug("Checking credentials file: {s}", .{creds_file_path.evaluated_path});
|
||||
|
|
@ -430,13 +416,13 @@ fn getProfileCredentials(allocator: std.mem.Allocator, options: Profile) !?auth.
|
|||
defer if (credentials_file) |f| f.close();
|
||||
// It's much more likely that we'll find credentials in the credentials file
|
||||
// so we'll try that first
|
||||
const creds_file_creds = try credsForFile(allocator, credentials_file, profile);
|
||||
const creds_file_creds = try credsForFile(allocator, io, credentials_file, profile);
|
||||
var conf_file_creds = PartialCredentials{};
|
||||
if (creds_file_creds.access_key == null or creds_file_creds.secret_key == null) {
|
||||
log.debug("Checking config file: {s}", .{config_file_path.evaluated_path});
|
||||
const config_file = std.fs.openFileAbsolute(creds_file_path.evaluated_path, .{}) catch null;
|
||||
defer if (config_file) |f| f.close();
|
||||
conf_file_creds = try credsForFile(allocator, config_file, profile);
|
||||
conf_file_creds = try credsForFile(allocator, io, config_file, profile);
|
||||
}
|
||||
const access_key = keyFrom(allocator, creds_file_creds.access_key, conf_file_creds.access_key);
|
||||
const secret_key = keyFrom(allocator, creds_file_creds.secret_key, conf_file_creds.secret_key);
|
||||
|
|
@ -475,10 +461,10 @@ const PartialCredentials = struct {
|
|||
access_key: ?[]const u8 = null,
|
||||
secret_key: ?[]const u8 = null,
|
||||
};
|
||||
fn credsForFile(allocator: std.mem.Allocator, file: ?std.fs.File, profile: []const u8) !PartialCredentials {
|
||||
fn credsForFile(allocator: std.mem.Allocator, io: std.Io, file: ?std.fs.File, profile: []const u8) !PartialCredentials {
|
||||
if (file == null) return PartialCredentials{};
|
||||
var fbuf: [1024]u8 = undefined;
|
||||
var freader = file.?.reader(&fbuf);
|
||||
var freader = file.?.reader(io, &fbuf);
|
||||
var reader = &freader.interface;
|
||||
const text = try reader.allocRemaining(allocator, .unlimited);
|
||||
defer allocator.free(text);
|
||||
|
|
@ -643,7 +629,7 @@ fn getHomeDir(allocator: std.mem.Allocator) ![]const u8 {
|
|||
else => return error.HomeDirUnavailable,
|
||||
};
|
||||
},
|
||||
.macos, .linux, .freebsd, .netbsd, .dragonfly, .openbsd, .solaris => {
|
||||
.macos, .linux, .freebsd, .netbsd, .dragonfly, .openbsd, .illumos => {
|
||||
const home_dir = std.posix.getenv("HOME") orelse {
|
||||
// TODO look in /etc/passwd
|
||||
return error.HomeDirUnavailable;
|
||||
|
|
|
|||
|
|
@ -90,8 +90,6 @@ pub const Options = struct {
|
|||
dualstack: bool = false,
|
||||
sigv4_service_name: ?[]const u8 = null,
|
||||
|
||||
credential_options: credentials.Options = .{},
|
||||
|
||||
mock: ?Mock = null,
|
||||
};
|
||||
|
||||
|
|
@ -146,13 +144,15 @@ const EndPoint = struct {
|
|||
pub const AwsHttp = struct {
|
||||
allocator: std.mem.Allocator,
|
||||
proxy: ?std.http.Client.Proxy,
|
||||
io: std.Io,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(allocator: std.mem.Allocator, proxy: ?std.http.Client.Proxy) Self {
|
||||
pub fn init(allocator: std.mem.Allocator, io: std.Io, proxy: ?std.http.Client.Proxy) Self {
|
||||
return Self{
|
||||
.allocator = allocator,
|
||||
.proxy = proxy,
|
||||
.io = io,
|
||||
// .credentialsProvider = // creds provider could be useful
|
||||
};
|
||||
}
|
||||
|
|
@ -188,7 +188,7 @@ pub const AwsHttp = struct {
|
|||
defer endpoint.deinit();
|
||||
log.debug("Calling endpoint {s}", .{endpoint.uri});
|
||||
// TODO: Should we allow customization here?
|
||||
const creds = try credentials.getCredentials(self.allocator, options.credential_options);
|
||||
const creds = try credentials.getCredentials(self.allocator, self.io, .{});
|
||||
defer creds.deinit();
|
||||
const signing_config: signing.Config = .{
|
||||
.region = getRegion(service, options.region),
|
||||
|
|
@ -243,7 +243,7 @@ pub const AwsHttp = struct {
|
|||
defer if (len) |l| self.allocator.free(l);
|
||||
request_cp.headers = request_headers.items;
|
||||
|
||||
if (signing_config) |opts| request_cp = try signing.signRequest(self.allocator, request_cp, opts);
|
||||
if (signing_config) |opts| request_cp = try signing.signRequest(self.allocator, self.io, request_cp, opts);
|
||||
defer {
|
||||
if (signing_config) |opts| {
|
||||
signing.freeSignedRequest(self.allocator, &request_cp, opts);
|
||||
|
|
@ -263,7 +263,7 @@ pub const AwsHttp = struct {
|
|||
defer self.allocator.free(url);
|
||||
log.debug("Request url: {s}", .{url});
|
||||
// TODO: Fix this proxy stuff. This is all a kludge just to compile, but std.http.Client has it all built in now
|
||||
var cl = std.http.Client{ .allocator = self.allocator, .https_proxy = if (self.proxy) |*p| @constCast(p) else null };
|
||||
var cl = std.http.Client{ .allocator = self.allocator, .io = self.io, .https_proxy = if (self.proxy) |*p| @constCast(p) else null };
|
||||
defer cl.deinit(); // TODO: Connection pooling
|
||||
const method = std.meta.stringToEnum(std.http.Method, request_cp.method).?;
|
||||
|
||||
|
|
@ -363,7 +363,7 @@ pub const AwsHttp = struct {
|
|||
log.debug("raw response body:\n{s}", .{aw.written()});
|
||||
|
||||
const rc = HttpResult{
|
||||
.response_code = response.head.status,
|
||||
.response_code = @intFromEnum(response.head.status),
|
||||
.body = try aw.toOwnedSlice(),
|
||||
.headers = try resp_headers.toOwnedSlice(self.allocator),
|
||||
.allocator = self.allocator,
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ pub const Request = struct {
|
|||
headers: []const std.http.Header = &.{},
|
||||
};
|
||||
pub const Result = struct {
|
||||
response_code: std.http.Status,
|
||||
response_code: u16, // actually 3 digits can fit in u10
|
||||
body: []const u8,
|
||||
headers: []const std.http.Header,
|
||||
allocator: std.mem.Allocator,
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ pub const SigningError = error{
|
|||
XAmzExpiresHeaderInRequest,
|
||||
/// Used if the request headers already includes x-amz-region-set
|
||||
XAmzRegionSetHeaderInRequest,
|
||||
} || error{OutOfMemory};
|
||||
} || error{OutOfMemory} || std.Io.Clock.Error;
|
||||
|
||||
const forbidden_headers = .{
|
||||
.{ .name = "x-amz-content-sha256", .err = SigningError.XAmzContentSha256HeaderInRequest },
|
||||
|
|
@ -185,7 +185,7 @@ const skipped_headers = .{
|
|||
/// Signs a request. Only header signing is currently supported. Note that
|
||||
/// This adds two headers to the request, which will need to be freed by the
|
||||
/// caller. Use freeSignedRequest with the same parameters to free
|
||||
pub fn signRequest(allocator: std.mem.Allocator, request: base.Request, config: Config) SigningError!base.Request {
|
||||
pub fn signRequest(allocator: std.mem.Allocator, io: std.Io, request: base.Request, config: Config) SigningError!base.Request {
|
||||
try validateConfig(config);
|
||||
for (request.headers) |h| {
|
||||
inline for (forbidden_headers) |f| {
|
||||
|
|
@ -195,7 +195,10 @@ pub fn signRequest(allocator: std.mem.Allocator, request: base.Request, config:
|
|||
}
|
||||
var rc = request;
|
||||
|
||||
const signing_time = config.signing_time orelse std.time.timestamp();
|
||||
const signing_time = config.signing_time orelse blk: {
|
||||
const now = try std.Io.Clock.Timestamp.now(io, .awake);
|
||||
break :blk @as(i64, @intCast(@divFloor(now.raw.nanoseconds, std.time.ns_per_s)));
|
||||
};
|
||||
|
||||
const signed_date = date.timestampToDateTime(signing_time);
|
||||
|
||||
|
|
@ -334,7 +337,7 @@ pub fn freeSignedRequest(allocator: std.mem.Allocator, request: *base.Request, c
|
|||
validateConfig(config) catch |e| {
|
||||
log.err("Signing validation failed during signature free: {}", .{e});
|
||||
if (@errorReturnTrace()) |trace| {
|
||||
std.debug.dumpStackTrace(trace.*);
|
||||
std.debug.dumpStackTrace(trace);
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
|
@ -352,10 +355,10 @@ pub fn freeSignedRequest(allocator: std.mem.Allocator, request: *base.Request, c
|
|||
|
||||
pub const credentialsFn = *const fn ([]const u8) ?Credentials;
|
||||
|
||||
pub fn verifyServerRequest(allocator: std.mem.Allocator, request: *std.http.Server.Request, request_body_reader: *std.Io.Reader, credentials_fn: credentialsFn) !bool {
|
||||
pub fn verifyServerRequest(allocator: std.mem.Allocator, io: std.Io, request: *std.http.Server.Request, request_body_reader: *std.Io.Reader, credentials_fn: credentialsFn) !bool {
|
||||
var unverified_request = try UnverifiedRequest.init(allocator, request);
|
||||
defer unverified_request.deinit();
|
||||
return verify(allocator, unverified_request, request_body_reader, credentials_fn);
|
||||
return verify(allocator, io, unverified_request, request_body_reader, credentials_fn);
|
||||
}
|
||||
|
||||
pub const UnverifiedRequest = struct {
|
||||
|
|
@ -393,7 +396,7 @@ pub const UnverifiedRequest = struct {
|
|||
}
|
||||
};
|
||||
|
||||
pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_body_reader: *std.Io.Reader, credentials_fn: credentialsFn) !bool {
|
||||
pub fn verify(allocator: std.mem.Allocator, io: std.Io, request: UnverifiedRequest, request_body_reader: *std.Io.Reader, credentials_fn: credentialsFn) !bool {
|
||||
var arena = std.heap.ArenaAllocator.init(allocator);
|
||||
defer arena.deinit();
|
||||
const aa = arena.allocator();
|
||||
|
|
@ -425,6 +428,7 @@ pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_
|
|||
if (signature == null) return error.AuthorizationHeaderMissingSignature;
|
||||
return verifyParsedAuthorization(
|
||||
aa,
|
||||
io,
|
||||
request,
|
||||
credential.?,
|
||||
signed_headers.?,
|
||||
|
|
@ -436,6 +440,7 @@ pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_
|
|||
|
||||
fn verifyParsedAuthorization(
|
||||
allocator: std.mem.Allocator,
|
||||
io: std.Io,
|
||||
request: UnverifiedRequest,
|
||||
credential: []const u8,
|
||||
signed_headers: []const u8,
|
||||
|
|
@ -502,7 +507,7 @@ fn verifyParsedAuthorization(
|
|||
signed_request.query = request.target[signed_request.path.len..]; // TODO: should this be +1? query here would include '?'
|
||||
signed_request.body = try request_body_reader.allocRemaining(allocator, .unlimited);
|
||||
defer allocator.free(signed_request.body);
|
||||
signed_request = try signRequest(allocator, signed_request, config);
|
||||
signed_request = try signRequest(allocator, io, signed_request, config);
|
||||
defer freeSignedRequest(allocator, &signed_request, config);
|
||||
return verifySignedRequest(signed_request, signature);
|
||||
}
|
||||
|
|
@ -1100,6 +1105,9 @@ test "can sign" {
|
|||
// [debug] (awshttp): Content-Length: 43
|
||||
|
||||
const allocator = std.testing.allocator;
|
||||
var threaded: std.Io.Threaded = .init(allocator);
|
||||
defer threaded.deinit();
|
||||
const io = threaded.io();
|
||||
var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5);
|
||||
defer headers.deinit(allocator);
|
||||
try headers.append(allocator, .{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" });
|
||||
|
|
@ -1131,7 +1139,7 @@ test "can sign" {
|
|||
.signing_time = 1440938160, // 20150830T123600Z
|
||||
};
|
||||
// TODO: There is an x-amz-content-sha256. Investigate
|
||||
var signed_req = try signRequest(allocator, req, config);
|
||||
var signed_req = try signRequest(allocator, io, req, config);
|
||||
|
||||
defer freeSignedRequest(allocator, &signed_req, config);
|
||||
try std.testing.expectEqualStrings("X-Amz-Date", signed_req.headers[signed_req.headers.len - 3].name);
|
||||
|
|
@ -1151,6 +1159,9 @@ test "can sign" {
|
|||
var test_credential: ?Credentials = null;
|
||||
test "can verify server request" {
|
||||
const allocator = std.testing.allocator;
|
||||
var threaded: std.Io.Threaded = .init(allocator);
|
||||
defer threaded.deinit();
|
||||
const io = threaded.io();
|
||||
|
||||
const access_key = try allocator.dupe(u8, "ACCESS");
|
||||
const secret_key = try allocator.dupe(u8, "SECRET");
|
||||
|
|
@ -1191,7 +1202,7 @@ test "can verify server request" {
|
|||
// const old_level = std.testing.log_level;
|
||||
// std.testing.log_level = .debug;
|
||||
// defer std.testing.log_level = old_level;
|
||||
try std.testing.expect(try verifyServerRequest(allocator, &request, &body_reader, struct {
|
||||
try std.testing.expect(try verifyServerRequest(allocator, io, &request, &body_reader, struct {
|
||||
cred: Credentials,
|
||||
|
||||
const Self = @This();
|
||||
|
|
@ -1203,6 +1214,9 @@ test "can verify server request" {
|
|||
}
|
||||
test "can verify server request without x-amz-content-sha256" {
|
||||
const allocator = std.testing.allocator;
|
||||
var threaded: std.Io.Threaded = .init(allocator);
|
||||
defer threaded.deinit();
|
||||
const io = threaded.io();
|
||||
|
||||
const access_key = try allocator.dupe(u8, "ACCESS");
|
||||
const secret_key = try allocator.dupe(u8, "SECRET");
|
||||
|
|
@ -1293,7 +1307,7 @@ test "can verify server request without x-amz-content-sha256" {
|
|||
}
|
||||
|
||||
{ // verification
|
||||
try std.testing.expect(try verifyServerRequest(allocator, &request, &body_reader, struct {
|
||||
try std.testing.expect(try verifyServerRequest(allocator, io, &request, &body_reader, struct {
|
||||
cred: Credentials,
|
||||
|
||||
const Self = @This();
|
||||
|
|
|
|||
|
|
@ -129,7 +129,7 @@ test "proper serialization for kms" {
|
|||
const parsed_body = try std.json.parseFromSlice(struct {
|
||||
KeyId: []const u8,
|
||||
Plaintext: []const u8,
|
||||
EncryptionContext: ?struct {} = null,
|
||||
EncryptionContext: ?struct {},
|
||||
GrantTokens: [][]const u8,
|
||||
EncryptionAlgorithm: []const u8,
|
||||
DryRun: bool,
|
||||
|
|
@ -166,6 +166,7 @@ test "basic json request serialization" {
|
|||
try buffer.writer.print("{f}", .{std.json.fmt(request, .{ .whitespace = .indent_4 })});
|
||||
try std.testing.expectEqualStrings(
|
||||
\\{
|
||||
\\ "ExclusiveStartTableName": null,
|
||||
\\ "Limit": 1
|
||||
\\}
|
||||
, buffer.written());
|
||||
|
|
@ -253,6 +254,8 @@ const TestOptions = struct {
|
|||
};
|
||||
const TestSetup = struct {
|
||||
allocator: std.mem.Allocator,
|
||||
threaded: std.Io.Threaded,
|
||||
io: std.Io,
|
||||
options: TestOptions,
|
||||
creds: aws_auth.Credentials,
|
||||
client: aws.Client,
|
||||
|
|
@ -305,6 +308,7 @@ const TestSetup = struct {
|
|||
allocator.free(self.trace);
|
||||
allocator.free(self.request_uri);
|
||||
allocator.destroy(self.request.reader.in);
|
||||
allocator.destroy(self.request.client);
|
||||
allocator.destroy(self.request);
|
||||
}
|
||||
};
|
||||
|
|
@ -332,22 +336,32 @@ const TestSetup = struct {
|
|||
var stderr = std.fs.File.stderr().writer(&.{});
|
||||
stderr.interface.writeAll(r.trace) catch @panic("could not write to stderr");
|
||||
std.debug.print("Current stack trace:\n", .{});
|
||||
std.debug.dumpCurrentStackTrace(null);
|
||||
std.debug.dumpCurrentStackTrace(.{});
|
||||
return error.ConnectionRefused; // we should not be called twice
|
||||
}
|
||||
const acts = try self.allocator.create(RequestActuals);
|
||||
errdefer self.allocator.destroy(acts);
|
||||
var aw = std.Io.Writer.Allocating.init(self.allocator);
|
||||
defer aw.deinit();
|
||||
std.debug.dumpCurrentStackTraceToWriter(null, &aw.writer) catch return error.OutOfMemory;
|
||||
std.debug.writeCurrentStackTrace(.{}, &aw.writer, .no_color) catch return error.OutOfMemory;
|
||||
const req = try self.allocator.create(std.http.Client.Request);
|
||||
errdefer self.allocator.destroy(req);
|
||||
const reader = try self.allocator.create(std.Io.Reader);
|
||||
errdefer self.allocator.destroy(reader);
|
||||
reader.* = .fixed(self.options.server_response);
|
||||
// Create a minimal mock client that only provides io for deinit
|
||||
// By creating it with the allocator, we leave critical fields like
|
||||
// connection_pool as undefined, which will fail spectacularly if
|
||||
// a real request were to be attempted
|
||||
const mock_client = try self.allocator.create(std.http.Client);
|
||||
errdefer self.allocator.destroy(mock_client);
|
||||
mock_client.* = .{
|
||||
.allocator = self.allocator,
|
||||
.io = self.io,
|
||||
};
|
||||
req.* = .{
|
||||
.uri = uri,
|
||||
.client = undefined,
|
||||
.client = mock_client,
|
||||
.connection = options.connection,
|
||||
.reader = .{
|
||||
.in = reader,
|
||||
|
|
@ -432,7 +446,9 @@ const TestSetup = struct {
|
|||
return self.request_actuals.?.request.reader.in;
|
||||
}
|
||||
fn init(options: TestOptions) !*Self {
|
||||
const client = aws.Client.init(options.allocator, .{});
|
||||
var threaded: std.Io.Threaded = .init(options.allocator);
|
||||
const io = threaded.io();
|
||||
const client = aws.Client.init(options.allocator, .{ .io = io });
|
||||
const call_options = try options.allocator.create(aws.Options);
|
||||
const self = try options.allocator.create(Self);
|
||||
call_options.* = .{
|
||||
|
|
@ -452,6 +468,8 @@ const TestSetup = struct {
|
|||
self.* = .{
|
||||
.options = options,
|
||||
.allocator = options.allocator,
|
||||
.threaded = threaded,
|
||||
.io = io,
|
||||
.creds = aws_auth.Credentials.init(
|
||||
options.allocator,
|
||||
try options.allocator.dupe(u8, "ACCESS"),
|
||||
|
|
@ -475,6 +493,7 @@ const TestSetup = struct {
|
|||
}
|
||||
self.allocator.destroy(self.call_options);
|
||||
self.call_options = undefined;
|
||||
self.threaded.deinit();
|
||||
self.allocator.destroy(self);
|
||||
aws_creds.static_credentials = null;
|
||||
}
|
||||
|
|
@ -631,7 +650,7 @@ test "json_1_0_query_with_input: dynamodb listTables runtime" {
|
|||
try req_actuals.expectHeader("X-Amz-Target", "DynamoDB_20120810.ListTables");
|
||||
|
||||
const parsed_body = try std.json.parseFromSlice(struct {
|
||||
ExclusiveStartTableName: ?[]const u8 = null,
|
||||
ExclusiveStartTableName: ?[]const u8,
|
||||
Limit: u8,
|
||||
}, std.testing.allocator, req_actuals.body.?, .{});
|
||||
defer parsed_body.deinit();
|
||||
|
|
@ -700,7 +719,7 @@ test "json_1_1_query_with_input: ecs listClusters runtime" {
|
|||
try req_actuals.expectHeader("X-Amz-Target", "AmazonEC2ContainerServiceV20141113.ListClusters");
|
||||
|
||||
const parsed_body = try std.json.parseFromSlice(struct {
|
||||
nextToken: ?[]const u8 = null,
|
||||
nextToken: ?[]const u8,
|
||||
maxResults: u8,
|
||||
}, std.testing.allocator, req_actuals.body.?, .{});
|
||||
defer parsed_body.deinit();
|
||||
|
|
@ -740,8 +759,8 @@ test "json_1_1_query_no_input: ecs listClusters runtime" {
|
|||
try req_actuals.expectHeader("X-Amz-Target", "AmazonEC2ContainerServiceV20141113.ListClusters");
|
||||
|
||||
const parsed_body = try std.json.parseFromSlice(struct {
|
||||
nextToken: ?[]const u8 = null,
|
||||
maxResults: ?u8 = null,
|
||||
nextToken: ?[]const u8,
|
||||
maxResults: ?u8,
|
||||
}, std.testing.allocator, req_actuals.body.?, .{});
|
||||
defer parsed_body.deinit();
|
||||
|
||||
|
|
@ -1249,20 +1268,6 @@ test "jsonStringify" {
|
|||
try std.testing.expectEqualStrings("1234", json_parsed.value.arn);
|
||||
try std.testing.expectEqualStrings("bar", json_parsed.value.tags.foo);
|
||||
}
|
||||
test "jsonStringify does not emit null values on serialization" {
|
||||
{
|
||||
const lambda = (Services(.{.lambda}){}).lambda;
|
||||
const request = lambda.CreateFunctionRequest{
|
||||
.function_name = "foo",
|
||||
.role = "bar",
|
||||
.code = .{},
|
||||
};
|
||||
|
||||
const request_json = try std.fmt.allocPrint(std.testing.allocator, "{f}", .{std.json.fmt(request, .{})});
|
||||
defer std.testing.allocator.free(request_json);
|
||||
try std.testing.expect(std.mem.indexOf(u8, request_json, "null") == null);
|
||||
}
|
||||
}
|
||||
|
||||
test "jsonStringify nullable object" {
|
||||
// structure is not null
|
||||
|
|
@ -1285,7 +1290,7 @@ test "jsonStringify nullable object" {
|
|||
FunctionVersion: []const u8,
|
||||
Name: []const u8,
|
||||
RoutingConfig: struct {
|
||||
AdditionalVersionWeights: ?struct {} = null,
|
||||
AdditionalVersionWeights: ?struct {},
|
||||
},
|
||||
}, std.testing.allocator, request_json, .{ .ignore_unknown_fields = true });
|
||||
defer json_parsed.deinit();
|
||||
|
|
@ -1321,6 +1326,7 @@ test "jsonStringify nullable object" {
|
|||
test "works against a live server" {
|
||||
const Server = struct {
|
||||
allocator: std.mem.Allocator,
|
||||
io: std.Io,
|
||||
ready: std.Thread.Semaphore = .{},
|
||||
requests_received: usize = 0,
|
||||
thread: ?std.Thread = null,
|
||||
|
|
@ -1352,7 +1358,7 @@ test "works against a live server" {
|
|||
pub fn stop(self: *Server) !void {
|
||||
if (self.thread == null) return; // thread not started, nothing to do
|
||||
// post stop message
|
||||
var client = std.http.Client{ .allocator = self.allocator };
|
||||
var client = std.http.Client{ .allocator = self.allocator, .io = self.io };
|
||||
_ = try client.fetch(.{ // we ignore return because that should just shut down
|
||||
.method = .POST,
|
||||
.payload = "quit",
|
||||
|
|
@ -1363,10 +1369,10 @@ test "works against a live server" {
|
|||
}
|
||||
|
||||
fn threadMain(self: *Server) !void {
|
||||
const address = try std.net.Address.parseIp("127.0.0.1", 0);
|
||||
var server = try address.listen(.{});
|
||||
defer server.deinit();
|
||||
const server_port = server.listen_address.in.getPort();
|
||||
const address = try std.Io.net.IpAddress.parseLiteral("127.0.0.1:0");
|
||||
var server = try address.listen(self.io, .{});
|
||||
defer server.deinit(self.io);
|
||||
const server_port = server.socket.address.getPort();
|
||||
self.listening_uri = try std.fmt.allocPrint(self.allocator, "http://127.0.0.1:{d}", .{server_port});
|
||||
defer {
|
||||
self.allocator.free(self.listening_uri);
|
||||
|
|
@ -1374,13 +1380,13 @@ test "works against a live server" {
|
|||
}
|
||||
self.ready.post();
|
||||
while (true) {
|
||||
var connection = try server.accept();
|
||||
defer connection.stream.close();
|
||||
var connection = try server.accept(self.io);
|
||||
defer connection.close(self.io);
|
||||
var recv_buffer: [4000]u8 = undefined;
|
||||
var send_buffer: [4000]u8 = undefined;
|
||||
var conn_reader = connection.stream.reader(&recv_buffer);
|
||||
var conn_writer = connection.stream.writer(&send_buffer);
|
||||
var http_server = std.http.Server.init(conn_reader.interface(), &conn_writer.interface);
|
||||
var conn_reader = connection.reader(self.io, &recv_buffer);
|
||||
var conn_writer = connection.writer(self.io, &send_buffer);
|
||||
var http_server = std.http.Server.init(&conn_reader.interface, &conn_writer.interface);
|
||||
while (http_server.reader.state == .ready) {
|
||||
var req = try http_server.receiveHead();
|
||||
if (req.head.content_length) |l| {
|
||||
|
|
@ -1405,7 +1411,10 @@ test "works against a live server" {
|
|||
}
|
||||
};
|
||||
const allocator = std.testing.allocator;
|
||||
var server = Server{ .allocator = allocator };
|
||||
var threaded: std.Io.Threaded = .init(allocator);
|
||||
defer threaded.deinit();
|
||||
const io = threaded.io();
|
||||
var server = Server{ .allocator = allocator, .io = io };
|
||||
try server.start();
|
||||
var stopped = false;
|
||||
defer if (!stopped) server.stop() catch log.err("error stopping server", .{});
|
||||
|
|
@ -1425,7 +1434,7 @@ test "works against a live server" {
|
|||
// }
|
||||
|
||||
const sts = (Services(.{.sts}){}).sts;
|
||||
const client = aws.Client.init(std.testing.allocator, .{});
|
||||
const client = aws.Client.init(std.testing.allocator, .{ .io = io });
|
||||
const creds = aws_auth.Credentials.init(
|
||||
allocator,
|
||||
try allocator.dupe(u8, "ACCESS"),
|
||||
|
|
|
|||
|
|
@ -111,7 +111,10 @@ pub fn main() anyerror!void {
|
|||
}
|
||||
|
||||
std.log.info("Start\n", .{});
|
||||
const client_options = aws.ClientOptions{ .proxy = proxy };
|
||||
var threaded: std.Io.Threaded = .init(allocator);
|
||||
defer threaded.deinit();
|
||||
const io = threaded.io();
|
||||
const client_options = aws.ClientOptions{ .proxy = proxy, .io = io };
|
||||
var client = aws.Client.init(allocator, client_options);
|
||||
const options = aws.Options{
|
||||
.region = "us-west-2",
|
||||
|
|
@ -373,7 +376,8 @@ fn proxyFromString(string: []const u8) !std.http.Client.Proxy {
|
|||
rc.protocol = .tls;
|
||||
} else return error.InvalidScheme;
|
||||
var split_iterator = std.mem.splitScalar(u8, remaining, ':');
|
||||
rc.host = std.mem.trimRight(u8, split_iterator.first(), "/");
|
||||
const host_str = std.mem.trimRight(u8, split_iterator.first(), "/");
|
||||
rc.host = try std.Io.net.HostName.init(host_str);
|
||||
if (split_iterator.next()) |port|
|
||||
rc.port = try std.fmt.parseInt(u16, port, 10);
|
||||
return rc;
|
||||
|
|
|
|||
|
|
@ -1,32 +1,27 @@
|
|||
const std = @import("std");
|
||||
const service_list = @import("service_manifest");
|
||||
const expectEqualStrings = std.testing.expectEqualStrings;
|
||||
|
||||
pub fn Services(comptime service_imports: anytype) type {
|
||||
if (service_imports.len == 0) return services;
|
||||
// From here, the fields of our structure can be generated at comptime...
|
||||
var fields: [serviceCount(service_imports)]std.builtin.Type.StructField = undefined;
|
||||
const fields_len = serviceCount(service_imports);
|
||||
var field_names: [fields_len][]const u8 = undefined;
|
||||
var field_types: [fields_len]type = undefined;
|
||||
var field_attrs: [fields_len]std.builtin.Type.StructField.Attributes = undefined;
|
||||
|
||||
for (&fields, 0..) |*item, i| {
|
||||
for (0..fields_len) |i| {
|
||||
const import_field = @field(service_list, @tagName(service_imports[i]));
|
||||
item.* = .{
|
||||
.name = @tagName(service_imports[i]),
|
||||
.type = @TypeOf(import_field),
|
||||
field_names[i] = @tagName(service_imports[i]);
|
||||
field_types[i] = @TypeOf(import_field);
|
||||
field_attrs[i] = .{
|
||||
.default_value_ptr = &import_field,
|
||||
.is_comptime = false,
|
||||
.alignment = std.meta.alignment(@TypeOf(import_field)),
|
||||
.@"comptime" = false,
|
||||
.@"align" = std.meta.alignment(field_types[i]),
|
||||
};
|
||||
}
|
||||
|
||||
// finally, generate the type
|
||||
return @Type(.{
|
||||
.@"struct" = .{
|
||||
.layout = .auto,
|
||||
.fields = &fields,
|
||||
.decls = &[_]std.builtin.Type.Declaration{},
|
||||
.is_tuple = false,
|
||||
},
|
||||
});
|
||||
return @Struct(.auto, null, &field_names, &field_types, &field_attrs);
|
||||
}
|
||||
|
||||
fn serviceCount(desired_services: anytype) usize {
|
||||
|
|
@ -39,17 +34,23 @@ fn serviceCount(desired_services: anytype) usize {
|
|||
pub const services = service_list;
|
||||
|
||||
test "services includes sts" {
|
||||
try expectEqualStrings("2011-06-15", services.sts.version.?);
|
||||
try std.testing.expectEqualStrings("2011-06-15", services.sts.version.?);
|
||||
}
|
||||
test "sts includes get_caller_identity" {
|
||||
try expectEqualStrings("GetCallerIdentity", services.sts.get_caller_identity.action_name);
|
||||
try std.testing.expectEqualStrings("GetCallerIdentity", services.sts.get_caller_identity.action_name);
|
||||
}
|
||||
test "can get service and action name from request" {
|
||||
// get request object. This call doesn't have parameters
|
||||
const metadata = services.sts.get_caller_identity.Request.metaInfo();
|
||||
try expectEqualStrings("2011-06-15", metadata.service_metadata.version.?);
|
||||
try std.testing.expectEqualStrings("2011-06-15", metadata.service_metadata.version.?);
|
||||
}
|
||||
test "can filter services" {
|
||||
const filtered_services = Services(.{ .sts, .wafv2 }){};
|
||||
try expectEqualStrings("2011-06-15", filtered_services.sts.version.?);
|
||||
try std.testing.expectEqualStrings("2011-06-15", filtered_services.sts.version.?);
|
||||
}
|
||||
test "can reify type" {
|
||||
const F = Services(.{.lambda});
|
||||
const info = @typeInfo(F).@"struct";
|
||||
try std.testing.expectEqual(@as(usize, 1), info.fields.len);
|
||||
try std.testing.expectEqualStrings("lambda", info.fields[0].name);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -168,7 +168,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
|||
},
|
||||
);
|
||||
if (@errorReturnTrace()) |trace| {
|
||||
std.debug.dumpStackTrace(trace.*);
|
||||
std.debug.dumpStackTrace(trace);
|
||||
}
|
||||
}
|
||||
return e;
|
||||
|
|
@ -193,7 +193,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
|||
},
|
||||
);
|
||||
if (@errorReturnTrace()) |trace| {
|
||||
std.debug.dumpStackTrace(trace.*);
|
||||
std.debug.dumpStackTrace(trace);
|
||||
}
|
||||
}
|
||||
return e;
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue