Compare commits
30 Commits
36cc72c36a
...
7dcf3d3a2e
Author | SHA1 | Date | |
---|---|---|---|
7dcf3d3a2e | |||
d442671275 | |||
213627c305 | |||
47ab9f6064 | |||
866a89d8ae | |||
b8df9e3610 | |||
d1d0b294d7 | |||
8a80cbda4a | |||
444173afd2 | |||
b6cdb6f7a7 | |||
f7106d0904 | |||
3f5e49662f | |||
6df02b1074 | |||
55298f7575 | |||
298f895bfe | |||
79d73cf09f | |||
3a027b6cd9 | |||
69d8151ac8 | |||
2db4188dbc | |||
2c9a80e363 | |||
d06c8da3bb | |||
e46a008bc5 | |||
31324c7e83 | |||
ed48901c92 | |||
7e3796d416 | |||
69da301de2 | |||
7f80ae45f0 | |||
dd1d86476c | |||
888f763bbe | |||
fcf456137f |
|
@ -1,6 +1,10 @@
|
||||||
name: AWS-Zig Build
|
name: AWS-Zig Build
|
||||||
run-name: ${{ github.actor }} building AWS Zig SDK
|
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||||
on: [push]
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- '*'
|
||||||
|
- '!zig-develop*'
|
||||||
env:
|
env:
|
||||||
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
|
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
|
||||||
|
|
83
.gitea/workflows/zig-mach.yaml
Normal file
83
.gitea/workflows/zig-mach.yaml
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
name: aws-zig mach nominated build
|
||||||
|
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 12 * * *' # noon UTC, 4AM Pacific
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'zig-develop*'
|
||||||
|
env:
|
||||||
|
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
|
||||||
|
jobs:
|
||||||
|
build-zig-nightly:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
# Need to use the default container with node and all that, so we can
|
||||||
|
# use JS-based actions like actions/checkout@v3...
|
||||||
|
# container:
|
||||||
|
# image: alpine:3.15.0
|
||||||
|
env:
|
||||||
|
ZIG_VERSION: mach-latest
|
||||||
|
ARCH: x86_64
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
# ARCH is fine, but we can't substitute directly because zig
|
||||||
|
# uses x86_64 instead of amd64. They also use aarch64 instead of arm64.
|
||||||
|
#
|
||||||
|
# However, arm64/linux isn't quite fully tier 1 yet, so this is more of a
|
||||||
|
# TODO: https://github.com/ziglang/zig/issues/2443
|
||||||
|
- name: Install zig
|
||||||
|
run: |
|
||||||
|
apt-get update && apt-get install -y jq
|
||||||
|
file="$(curl -Osw '%{filename_effective}' "$(curl -s https://machengine.org/zig/index.json |jq -r '."'${ZIG_VERSION}'"."x86_64-linux".tarball')")"
|
||||||
|
tar x -C /usr/local -f "${file}"
|
||||||
|
ln -s /usr/local/"${file%%.tar.xz}"/zig /usr/local/bin/zig
|
||||||
|
zig version
|
||||||
|
- name: Run tests
|
||||||
|
run: zig build test --verbose
|
||||||
|
- name: Build example
|
||||||
|
run: ( cd example && zig build ) # Make sure example builds
|
||||||
|
# Zig package manager expects everything to be inside a directory in the archive,
|
||||||
|
# which it then strips out on download. So we need to shove everything inside a directory
|
||||||
|
# the way GitHub/Gitea does for repo archives
|
||||||
|
#
|
||||||
|
# Also, zig tar process doesn't handle gnu format for long names, nor does it seam to
|
||||||
|
# handle posix long name semantics cleanly either. ustar works. This
|
||||||
|
# should be using git archive, but we need our generated code to be part of it
|
||||||
|
- name: Package source code with generated models
|
||||||
|
run: |
|
||||||
|
tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||||
|
--format ustar \
|
||||||
|
--exclude 'zig-*' \
|
||||||
|
--transform 's,^,${{ github.sha }}/,' *
|
||||||
|
# - name: Sign
|
||||||
|
# id: sign
|
||||||
|
# uses: https://git.lerch.org/lobo/action-hsm-sign@v1
|
||||||
|
# with:
|
||||||
|
# pin: ${{ secrets.HSM_USER_PIN }}
|
||||||
|
# files: ???
|
||||||
|
# public_key: 'https://emil.lerch.org/serverpublic.pem'
|
||||||
|
# - run: |
|
||||||
|
# echo "Source 0 should be ./bar: ${{ steps.sign.outputs.SOURCE_0 }}"
|
||||||
|
# - run: |
|
||||||
|
# echo "Signature 0 should be ./bar.sig: ${{ steps.sign.outputs.SIG_0 }}"
|
||||||
|
# - run: echo "URL of bar (0) is ${{ steps.sign.outputs.URL_0 }}"
|
||||||
|
# - run: |
|
||||||
|
# echo "Source 1 should be ./foo: ${{ steps.sign.outputs.SOURCE_1 }}"
|
||||||
|
# - run: |
|
||||||
|
# echo "Signature 1 should be ./foo.sig: ${{ steps.sign.outputs.SIG_1 }}"
|
||||||
|
# - run: echo "URL of foo (1) is ${{ steps.sign.outputs.URL_1 }}"
|
||||||
|
- name: Publish source code with generated models
|
||||||
|
run: |
|
||||||
|
curl --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
|
||||||
|
--upload-file ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||||
|
https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}-with-models.tar.gz
|
||||||
|
- name: Notify
|
||||||
|
uses: https://git.lerch.org/lobo/action-notify-ntfy@v2
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
host: ${{ secrets.NTFY_HOST }}
|
||||||
|
topic: ${{ secrets.NTFY_TOPIC }}
|
||||||
|
user: ${{ secrets.NTFY_USER }}
|
||||||
|
password: ${{ secrets.NTFY_PASSWORD }}
|
81
.gitea/workflows/zig-nightly.yaml
Normal file
81
.gitea/workflows/zig-nightly.yaml
Normal file
|
@ -0,0 +1,81 @@
|
||||||
|
name: aws-zig nightly build
|
||||||
|
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'zig-develop*'
|
||||||
|
env:
|
||||||
|
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
|
||||||
|
jobs:
|
||||||
|
build-zig-nightly:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
# Need to use the default container with node and all that, so we can
|
||||||
|
# use JS-based actions like actions/checkout@v3...
|
||||||
|
# container:
|
||||||
|
# image: alpine:3.15.0
|
||||||
|
env:
|
||||||
|
ZIG_VERSION: master
|
||||||
|
ARCH: x86_64
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
# ARCH is fine, but we can't substitute directly because zig
|
||||||
|
# uses x86_64 instead of amd64. They also use aarch64 instead of arm64.
|
||||||
|
#
|
||||||
|
# However, arm64/linux isn't quite fully tier 1 yet, so this is more of a
|
||||||
|
# TODO: https://github.com/ziglang/zig/issues/2443
|
||||||
|
- name: Install zig
|
||||||
|
run: |
|
||||||
|
apt-get update && apt-get install -y jq
|
||||||
|
file="$(curl -Osw '%{filename_effective}' "$(curl -s https://ziglang.org/download/index.json |jq -r '."'${ZIG_VERSION}'"."x86_64-linux".tarball')")"
|
||||||
|
tar x -C /usr/local -f "${file}"
|
||||||
|
ln -s /usr/local/"${file%%.tar.xz}"/zig /usr/local/bin/zig
|
||||||
|
zig version
|
||||||
|
- name: Run tests
|
||||||
|
run: zig build test --verbose
|
||||||
|
- name: Build example
|
||||||
|
run: ( cd example && zig build ) # Make sure example builds
|
||||||
|
# Zig package manager expects everything to be inside a directory in the archive,
|
||||||
|
# which it then strips out on download. So we need to shove everything inside a directory
|
||||||
|
# the way GitHub/Gitea does for repo archives
|
||||||
|
#
|
||||||
|
# Also, zig tar process doesn't handle gnu format for long names, nor does it seam to
|
||||||
|
# handle posix long name semantics cleanly either. ustar works. This
|
||||||
|
# should be using git archive, but we need our generated code to be part of it
|
||||||
|
- name: Package source code with generated models
|
||||||
|
run: |
|
||||||
|
tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||||
|
--format ustar \
|
||||||
|
--exclude 'zig-*' \
|
||||||
|
--transform 's,^,${{ github.sha }}/,' *
|
||||||
|
# - name: Sign
|
||||||
|
# id: sign
|
||||||
|
# uses: https://git.lerch.org/lobo/action-hsm-sign@v1
|
||||||
|
# with:
|
||||||
|
# pin: ${{ secrets.HSM_USER_PIN }}
|
||||||
|
# files: ???
|
||||||
|
# public_key: 'https://emil.lerch.org/serverpublic.pem'
|
||||||
|
# - run: |
|
||||||
|
# echo "Source 0 should be ./bar: ${{ steps.sign.outputs.SOURCE_0 }}"
|
||||||
|
# - run: |
|
||||||
|
# echo "Signature 0 should be ./bar.sig: ${{ steps.sign.outputs.SIG_0 }}"
|
||||||
|
# - run: echo "URL of bar (0) is ${{ steps.sign.outputs.URL_0 }}"
|
||||||
|
# - run: |
|
||||||
|
# echo "Source 1 should be ./foo: ${{ steps.sign.outputs.SOURCE_1 }}"
|
||||||
|
# - run: |
|
||||||
|
# echo "Signature 1 should be ./foo.sig: ${{ steps.sign.outputs.SIG_1 }}"
|
||||||
|
# - run: echo "URL of foo (1) is ${{ steps.sign.outputs.URL_1 }}"
|
||||||
|
- name: Publish source code with generated models
|
||||||
|
run: |
|
||||||
|
curl --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
|
||||||
|
--upload-file ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||||
|
https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}-with-models.tar.gz
|
||||||
|
- name: Notify
|
||||||
|
uses: https://git.lerch.org/lobo/action-notify-ntfy@v2
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
host: ${{ secrets.NTFY_HOST }}
|
||||||
|
topic: ${{ secrets.NTFY_TOPIC }}
|
||||||
|
user: ${{ secrets.NTFY_USER }}
|
||||||
|
password: ${{ secrets.NTFY_PASSWORD }}
|
31
.github/workflows/build.yaml
vendored
Normal file
31
.github/workflows/build.yaml
vendored
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
name: AWS-Zig Build
|
||||||
|
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- '*'
|
||||||
|
- '!zig-develop*'
|
||||||
|
jobs:
|
||||||
|
build-zig-0.11.0-amd64-host:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
ZIG_VERSION: 0.11.0
|
||||||
|
ARCH: x86_64
|
||||||
|
if: ${{ github.env.GITEA_ACTIONS != 'true' }}
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
# ARCH is fine, but we can't substitute directly because zig
|
||||||
|
# uses x86_64 instead of amd64. They also use aarch64 instead of arm64.
|
||||||
|
#
|
||||||
|
# However, arm64/linux isn't quite fully tier 1 yet, so this is more of a
|
||||||
|
# TODO: https://github.com/ziglang/zig/issues/2443
|
||||||
|
- name: Install zig
|
||||||
|
run: |
|
||||||
|
wget -q https://ziglang.org/download/${ZIG_VERSION}/zig-linux-${ARCH}-${ZIG_VERSION}.tar.xz
|
||||||
|
sudo tar x -C /usr/local -f zig-linux-${ARCH}-${ZIG_VERSION}.tar.xz
|
||||||
|
sudo ln -s /usr/local/zig-linux-${ARCH}-${ZIG_VERSION}/zig /usr/local/bin/zig
|
||||||
|
- name: Run tests
|
||||||
|
run: zig build test --verbose
|
||||||
|
- name: Build example
|
||||||
|
run: ( cd example && zig build ) # Make sure example builds
|
38
.github/workflows/zig-mach.yaml
vendored
Normal file
38
.github/workflows/zig-mach.yaml
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
name: aws-zig mach nominated build
|
||||||
|
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 12 * * *' # noon UTC, 4AM Pacific
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'zig-develop*'
|
||||||
|
jobs:
|
||||||
|
build-zig-nightly:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
# Need to use the default container with node and all that, so we can
|
||||||
|
# use JS-based actions like actions/checkout@v3...
|
||||||
|
# container:
|
||||||
|
# image: alpine:3.15.0
|
||||||
|
env:
|
||||||
|
ZIG_VERSION: mach-latest
|
||||||
|
ARCH: x86_64
|
||||||
|
if: ${{ github.env.GITEA_ACTIONS != 'true' }}
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
# ARCH is fine, but we can't substitute directly because zig
|
||||||
|
# uses x86_64 instead of amd64. They also use aarch64 instead of arm64.
|
||||||
|
#
|
||||||
|
# However, arm64/linux isn't quite fully tier 1 yet, so this is more of a
|
||||||
|
# TODO: https://github.com/ziglang/zig/issues/2443
|
||||||
|
- name: Install zig
|
||||||
|
run: |
|
||||||
|
apt-get update && apt-get install -y jq
|
||||||
|
file="$(curl -Osw '%{filename_effective}' "$(curl -s https://machengine.org/zig/index.json |jq -r '."'${ZIG_VERSION}'"."x86_64-linux".tarball')")"
|
||||||
|
sudo tar x -C /usr/local -f "${file}"
|
||||||
|
sudo ln -s /usr/local/"${file%%.tar.xz}"/zig /usr/local/bin/zig
|
||||||
|
zig version
|
||||||
|
- name: Run tests
|
||||||
|
run: zig build test --verbose
|
||||||
|
- name: Build example
|
||||||
|
run: ( cd example && zig build ) # Make sure example builds
|
36
.github/workflows/zig-nightly.yaml
vendored
Normal file
36
.github/workflows/zig-nightly.yaml
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
name: aws-zig nightly build
|
||||||
|
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'zig-develop*'
|
||||||
|
jobs:
|
||||||
|
build-zig-nightly:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
# Need to use the default container with node and all that, so we can
|
||||||
|
# use JS-based actions like actions/checkout@v3...
|
||||||
|
# container:
|
||||||
|
# image: alpine:3.15.0
|
||||||
|
env:
|
||||||
|
ZIG_VERSION: master
|
||||||
|
ARCH: x86_64
|
||||||
|
if: ${{ github.env.GITEA_ACTIONS != 'true' }}
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
# ARCH is fine, but we can't substitute directly because zig
|
||||||
|
# uses x86_64 instead of amd64. They also use aarch64 instead of arm64.
|
||||||
|
#
|
||||||
|
# However, arm64/linux isn't quite fully tier 1 yet, so this is more of a
|
||||||
|
# TODO: https://github.com/ziglang/zig/issues/2443
|
||||||
|
- name: Install zig
|
||||||
|
run: |
|
||||||
|
apt-get update && apt-get install -y jq
|
||||||
|
file="$(curl -Osw '%{filename_effective}' "$(curl -s https://ziglang.org/download/index.json |jq -r '."'${ZIG_VERSION}'"."x86_64-linux".tarball')")"
|
||||||
|
sudo tar x -C /usr/local -f "${file}"
|
||||||
|
sudo ln -s /usr/local/"${file%%.tar.xz}"/zig /usr/local/bin/zig
|
||||||
|
zig version
|
||||||
|
- name: Run tests
|
||||||
|
run: zig build test --verbose
|
||||||
|
- name: Build example
|
||||||
|
run: ( cd example && zig build ) # Make sure example builds
|
521
Package.zig
521
Package.zig
|
@ -1,521 +0,0 @@
|
||||||
const builtin = @import("builtin");
|
|
||||||
const std = @import("std");
|
|
||||||
const testing = std.testing;
|
|
||||||
const Hasher = @import("codegen/src/Hasher.zig");
|
|
||||||
|
|
||||||
/// This is 128 bits - Even with 2^54 cache entries, the probably of a collision would be under 10^-6
|
|
||||||
const bin_digest_len = 16;
|
|
||||||
const hex_digest_len = bin_digest_len * 2;
|
|
||||||
|
|
||||||
const Package = @This();
|
|
||||||
|
|
||||||
root_src_directory: std.Build.Cache.Directory,
|
|
||||||
|
|
||||||
/// Whether to free `root_src_directory` on `destroy`.
|
|
||||||
root_src_directory_owned: bool = false,
|
|
||||||
allocator: std.mem.Allocator,
|
|
||||||
|
|
||||||
pub const Dependency = struct {
|
|
||||||
url: []const u8,
|
|
||||||
hash: ?[]const u8,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub fn deinit(self: *Package) void {
|
|
||||||
if (self.root_src_directory_owned)
|
|
||||||
self.root_src_directory.closeAndFree(self.allocator);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn fetchOneAndUnpack(
|
|
||||||
allocator: std.mem.Allocator,
|
|
||||||
cache_directory: []const u8, // directory to store things
|
|
||||||
dep: Dependency, // thing to download
|
|
||||||
) !*Package {
|
|
||||||
var http_client: std.http.Client = .{ .allocator = allocator };
|
|
||||||
defer http_client.deinit();
|
|
||||||
|
|
||||||
const global_cache_directory: std.Build.Cache.Directory = .{
|
|
||||||
.handle = try std.fs.cwd().makeOpenPath(cache_directory, .{}),
|
|
||||||
.path = cache_directory,
|
|
||||||
};
|
|
||||||
var thread_pool: std.Thread.Pool = undefined;
|
|
||||||
try thread_pool.init(.{ .allocator = allocator });
|
|
||||||
defer thread_pool.deinit();
|
|
||||||
var progress: std.Progress = .{ .dont_print_on_dumb = true };
|
|
||||||
const root_prog_node = progress.start("Fetch Packages", 0);
|
|
||||||
defer root_prog_node.end();
|
|
||||||
return try fetchAndUnpack(
|
|
||||||
&thread_pool,
|
|
||||||
&http_client,
|
|
||||||
global_cache_directory,
|
|
||||||
dep,
|
|
||||||
dep.url,
|
|
||||||
root_prog_node,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn fetchAndUnpack(
|
|
||||||
thread_pool: *std.Thread.Pool, // thread pool for hashing things in parallel
|
|
||||||
http_client: *std.http.Client, // client to download stuff
|
|
||||||
global_cache_directory: std.Build.Cache.Directory, // directory to store things
|
|
||||||
dep: Dependency, // thing to download
|
|
||||||
fqn: []const u8, // used as name for thing downloaded
|
|
||||||
root_prog_node: *std.Progress.Node, // used for outputting to terminal
|
|
||||||
) !*Package {
|
|
||||||
const gpa = http_client.allocator;
|
|
||||||
const s = std.fs.path.sep_str;
|
|
||||||
|
|
||||||
// Check if the expected_hash is already present in the global package
|
|
||||||
// cache, and thereby avoid both fetching and unpacking.
|
|
||||||
if (dep.hash) |h| cached: {
|
|
||||||
const hex_digest = h[0..Hasher.hex_multihash_len];
|
|
||||||
const pkg_dir_sub_path = "p" ++ s ++ hex_digest;
|
|
||||||
|
|
||||||
const build_root = try global_cache_directory.join(gpa, &.{pkg_dir_sub_path});
|
|
||||||
errdefer gpa.free(build_root);
|
|
||||||
|
|
||||||
var pkg_dir = global_cache_directory.handle.openDir(pkg_dir_sub_path, .{}) catch |err| switch (err) {
|
|
||||||
error.FileNotFound => break :cached,
|
|
||||||
else => |e| return e,
|
|
||||||
};
|
|
||||||
errdefer pkg_dir.close();
|
|
||||||
|
|
||||||
root_prog_node.completeOne();
|
|
||||||
|
|
||||||
const ptr = try gpa.create(Package);
|
|
||||||
errdefer gpa.destroy(ptr);
|
|
||||||
|
|
||||||
ptr.* = .{
|
|
||||||
.root_src_directory = .{
|
|
||||||
.path = build_root, // TODO: This leaks memory somehow (should be cleaned in deinit()
|
|
||||||
.handle = pkg_dir,
|
|
||||||
},
|
|
||||||
.root_src_directory_owned = true,
|
|
||||||
.allocator = gpa,
|
|
||||||
};
|
|
||||||
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
var pkg_prog_node = root_prog_node.start(fqn, 0);
|
|
||||||
defer pkg_prog_node.end();
|
|
||||||
pkg_prog_node.activate();
|
|
||||||
pkg_prog_node.context.refresh();
|
|
||||||
|
|
||||||
const uri = try std.Uri.parse(dep.url);
|
|
||||||
|
|
||||||
const rand_int = std.crypto.random.int(u64);
|
|
||||||
const tmp_dir_sub_path = "tmp" ++ s ++ Hasher.hex64(rand_int);
|
|
||||||
|
|
||||||
const actual_hash = a: {
|
|
||||||
var tmp_directory: std.Build.Cache.Directory = d: {
|
|
||||||
const path = try global_cache_directory.join(gpa, &.{tmp_dir_sub_path});
|
|
||||||
errdefer gpa.free(path);
|
|
||||||
|
|
||||||
const iterable_dir = try global_cache_directory.handle.makeOpenPathIterable(tmp_dir_sub_path, .{});
|
|
||||||
errdefer iterable_dir.close();
|
|
||||||
|
|
||||||
break :d .{
|
|
||||||
.path = path,
|
|
||||||
.handle = iterable_dir.dir,
|
|
||||||
};
|
|
||||||
};
|
|
||||||
defer tmp_directory.closeAndFree(gpa);
|
|
||||||
|
|
||||||
var h = std.http.Headers{ .allocator = gpa };
|
|
||||||
defer h.deinit();
|
|
||||||
|
|
||||||
var req = try http_client.request(.GET, uri, h, .{});
|
|
||||||
defer req.deinit();
|
|
||||||
|
|
||||||
try req.start();
|
|
||||||
try req.wait();
|
|
||||||
|
|
||||||
if (req.response.status != .ok) {
|
|
||||||
std.log.err("Expected response status '200 OK' got '{} {s}'", .{
|
|
||||||
@intFromEnum(req.response.status),
|
|
||||||
req.response.status.phrase() orelse "",
|
|
||||||
});
|
|
||||||
return error.UnexpectedResponseStatus;
|
|
||||||
}
|
|
||||||
|
|
||||||
const content_type = req.response.headers.getFirstValue("Content-Type") orelse
|
|
||||||
return error.MissingContentTypeHeader;
|
|
||||||
|
|
||||||
var prog_reader: ProgressReader(std.http.Client.Request.Reader) = .{
|
|
||||||
.child_reader = req.reader(),
|
|
||||||
.prog_node = &pkg_prog_node,
|
|
||||||
.unit = if (req.response.content_length) |content_length| unit: {
|
|
||||||
const kib = content_length / 1024;
|
|
||||||
const mib = kib / 1024;
|
|
||||||
if (mib > 0) {
|
|
||||||
pkg_prog_node.setEstimatedTotalItems(@intCast(mib));
|
|
||||||
pkg_prog_node.setUnit("MiB");
|
|
||||||
break :unit .mib;
|
|
||||||
} else {
|
|
||||||
pkg_prog_node.setEstimatedTotalItems(@intCast(@max(1, kib)));
|
|
||||||
pkg_prog_node.setUnit("KiB");
|
|
||||||
break :unit .kib;
|
|
||||||
}
|
|
||||||
} else .any,
|
|
||||||
};
|
|
||||||
pkg_prog_node.context.refresh();
|
|
||||||
|
|
||||||
if (std.ascii.eqlIgnoreCase(content_type, "application/gzip") or
|
|
||||||
std.ascii.eqlIgnoreCase(content_type, "application/x-gzip") or
|
|
||||||
std.ascii.eqlIgnoreCase(content_type, "application/tar+gzip"))
|
|
||||||
{
|
|
||||||
// I observed the gzip stream to read 1 byte at a time, so I am using a
|
|
||||||
// buffered reader on the front of it.
|
|
||||||
try unpackTarball(gpa, prog_reader.reader(), tmp_directory.handle, std.compress.gzip);
|
|
||||||
} else if (std.ascii.eqlIgnoreCase(content_type, "application/x-xz")) {
|
|
||||||
// I have not checked what buffer sizes the xz decompression implementation uses
|
|
||||||
// by default, so the same logic applies for buffering the reader as for gzip.
|
|
||||||
try unpackTarball(gpa, prog_reader.reader(), tmp_directory.handle, std.compress.xz);
|
|
||||||
} else if (std.ascii.eqlIgnoreCase(content_type, "application/octet-stream")) {
|
|
||||||
// support gitlab tarball urls such as https://gitlab.com/<namespace>/<project>/-/archive/<sha>/<project>-<sha>.tar.gz
|
|
||||||
// whose content-disposition header is: 'attachment; filename="<project>-<sha>.tar.gz"'
|
|
||||||
const content_disposition = req.response.headers.getFirstValue("Content-Disposition") orelse
|
|
||||||
return error.@"Missing 'Content-Disposition' header for Content-Type=application/octet-stream";
|
|
||||||
if (isTarAttachment(content_disposition)) {
|
|
||||||
try unpackTarball(gpa, prog_reader.reader(), tmp_directory.handle, std.compress.gzip);
|
|
||||||
} else {
|
|
||||||
std.log.err("Unsupported 'Content-Disposition' header value: '{s}' for Content-Type=application/octet-stream", .{content_disposition});
|
|
||||||
return error.UnsupportedContentDispositionHeader;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
std.log.err("Unsupported 'Content-Type' header value: '{s}'", .{content_type});
|
|
||||||
return error.UnsupportedContentTypeHeader;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Download completed - stop showing downloaded amount as progress
|
|
||||||
pkg_prog_node.setEstimatedTotalItems(0);
|
|
||||||
pkg_prog_node.setCompletedItems(0);
|
|
||||||
pkg_prog_node.context.refresh();
|
|
||||||
|
|
||||||
// TODO: delete files not included in the package prior to computing the package hash.
|
|
||||||
// for example, if the ini file has directives to include/not include certain files,
|
|
||||||
// apply those rules directly to the filesystem right here. This ensures that files
|
|
||||||
// not protected by the hash are not present on the file system.
|
|
||||||
|
|
||||||
// TODO: raise an error for files that have illegal paths on some operating systems.
|
|
||||||
// For example, on Linux a path with a backslash should raise an error here.
|
|
||||||
// Of course, if the ignore rules above omit the file from the package, then everything
|
|
||||||
// is fine and no error should be raised.
|
|
||||||
|
|
||||||
break :a try Hasher.computeDirectoryHash(thread_pool, .{ .dir = tmp_directory.handle }, &.{});
|
|
||||||
};
|
|
||||||
|
|
||||||
const pkg_dir_sub_path = "p" ++ s ++ Hasher.hexDigest(actual_hash);
|
|
||||||
try renameTmpIntoCache(global_cache_directory.handle, tmp_dir_sub_path, pkg_dir_sub_path);
|
|
||||||
|
|
||||||
const actual_hex = Hasher.hexDigest(actual_hash);
|
|
||||||
if (dep.hash) |h| {
|
|
||||||
if (!std.mem.eql(u8, h, &actual_hex)) {
|
|
||||||
std.log.err("hash mismatch: expected: {s}, found: {s}", .{
|
|
||||||
h, actual_hex,
|
|
||||||
});
|
|
||||||
return error.HashMismatch;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
std.log.err("No hash supplied. Expecting hash \"{s}\"", .{actual_hex});
|
|
||||||
return error.NoHashSupplied;
|
|
||||||
}
|
|
||||||
|
|
||||||
const build_root = try global_cache_directory.join(gpa, &.{pkg_dir_sub_path});
|
|
||||||
defer gpa.free(build_root);
|
|
||||||
|
|
||||||
const mod = try createWithDir(gpa, global_cache_directory, pkg_dir_sub_path);
|
|
||||||
return mod;
|
|
||||||
}
|
|
||||||
fn ProgressReader(comptime ReaderType: type) type {
|
|
||||||
return struct {
|
|
||||||
child_reader: ReaderType,
|
|
||||||
bytes_read: u64 = 0,
|
|
||||||
prog_node: *std.Progress.Node,
|
|
||||||
unit: enum {
|
|
||||||
kib,
|
|
||||||
mib,
|
|
||||||
any,
|
|
||||||
},
|
|
||||||
|
|
||||||
pub const Error = ReaderType.Error;
|
|
||||||
pub const Reader = std.io.Reader(*@This(), Error, read);
|
|
||||||
|
|
||||||
pub fn read(self: *@This(), buf: []u8) Error!usize {
|
|
||||||
const amt = try self.child_reader.read(buf);
|
|
||||||
self.bytes_read += amt;
|
|
||||||
const kib = self.bytes_read / 1024;
|
|
||||||
const mib = kib / 1024;
|
|
||||||
switch (self.unit) {
|
|
||||||
.kib => self.prog_node.setCompletedItems(@intCast(kib)),
|
|
||||||
.mib => self.prog_node.setCompletedItems(@intCast(mib)),
|
|
||||||
.any => {
|
|
||||||
if (mib > 0) {
|
|
||||||
self.prog_node.setUnit("MiB");
|
|
||||||
self.prog_node.setCompletedItems(@intCast(mib));
|
|
||||||
} else {
|
|
||||||
self.prog_node.setUnit("KiB");
|
|
||||||
self.prog_node.setCompletedItems(@intCast(kib));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
self.prog_node.context.maybeRefresh();
|
|
||||||
return amt;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn reader(self: *@This()) Reader {
|
|
||||||
return .{ .context = self };
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
fn isTarAttachment(content_disposition: []const u8) bool {
|
|
||||||
const disposition_type_end = std.ascii.indexOfIgnoreCase(content_disposition, "attachment;") orelse return false;
|
|
||||||
|
|
||||||
var value_start = std.ascii.indexOfIgnoreCasePos(content_disposition, disposition_type_end + 1, "filename") orelse return false;
|
|
||||||
value_start += "filename".len;
|
|
||||||
if (content_disposition[value_start] == '*') {
|
|
||||||
value_start += 1;
|
|
||||||
}
|
|
||||||
if (content_disposition[value_start] != '=') return false;
|
|
||||||
value_start += 1;
|
|
||||||
|
|
||||||
var value_end = std.mem.indexOfPos(u8, content_disposition, value_start, ";") orelse content_disposition.len;
|
|
||||||
if (content_disposition[value_end - 1] == '\"') {
|
|
||||||
value_end -= 1;
|
|
||||||
}
|
|
||||||
return std.ascii.endsWithIgnoreCase(content_disposition[value_start..value_end], ".tar.gz");
|
|
||||||
}
|
|
||||||
fn renameTmpIntoCache(
|
|
||||||
cache_dir: std.fs.Dir,
|
|
||||||
tmp_dir_sub_path: []const u8,
|
|
||||||
dest_dir_sub_path: []const u8,
|
|
||||||
) !void {
|
|
||||||
std.debug.assert(dest_dir_sub_path[1] == std.fs.path.sep);
|
|
||||||
var handled_missing_dir = false;
|
|
||||||
while (true) {
|
|
||||||
cache_dir.rename(tmp_dir_sub_path, dest_dir_sub_path) catch |err| switch (err) {
|
|
||||||
error.FileNotFound => {
|
|
||||||
if (handled_missing_dir) return err;
|
|
||||||
cache_dir.makeDir(dest_dir_sub_path[0..1]) catch |mkd_err| switch (mkd_err) {
|
|
||||||
error.PathAlreadyExists => handled_missing_dir = true,
|
|
||||||
else => |e| return e,
|
|
||||||
};
|
|
||||||
continue;
|
|
||||||
},
|
|
||||||
error.PathAlreadyExists, error.AccessDenied => {
|
|
||||||
// Package has been already downloaded and may already be in use on the system.
|
|
||||||
cache_dir.deleteTree(tmp_dir_sub_path) catch |del_err| {
|
|
||||||
std.log.warn("unable to delete temp directory: {s}", .{@errorName(del_err)});
|
|
||||||
};
|
|
||||||
},
|
|
||||||
else => |e| return e,
|
|
||||||
};
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn createWithDir(
|
|
||||||
gpa: std.mem.Allocator,
|
|
||||||
directory: std.Build.Cache.Directory,
|
|
||||||
/// Relative to `directory`. If null, means `directory` is the root src dir
|
|
||||||
/// and is owned externally.
|
|
||||||
root_src_dir_path: ?[]const u8,
|
|
||||||
) !*Package {
|
|
||||||
const ptr = try gpa.create(Package);
|
|
||||||
errdefer gpa.destroy(ptr);
|
|
||||||
|
|
||||||
if (root_src_dir_path) |p| {
|
|
||||||
const owned_dir_path = try directory.join(gpa, &[1][]const u8{p});
|
|
||||||
errdefer gpa.free(owned_dir_path);
|
|
||||||
|
|
||||||
ptr.* = .{
|
|
||||||
.root_src_directory = .{
|
|
||||||
.path = owned_dir_path,
|
|
||||||
.handle = try directory.handle.openDir(p, .{}),
|
|
||||||
},
|
|
||||||
.root_src_directory_owned = true,
|
|
||||||
.allocator = gpa,
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
ptr.* = .{
|
|
||||||
.root_src_directory = directory,
|
|
||||||
.root_src_directory_owned = false,
|
|
||||||
.allocator = gpa,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
// Create/Write a file, close it, then grab its stat.mtime timestamp.
|
|
||||||
fn testGetCurrentFileTimestamp(dir: std.fs.Dir) !i128 {
|
|
||||||
const test_out_file = "test-filetimestamp.tmp";
|
|
||||||
|
|
||||||
var file = try dir.createFile(test_out_file, .{
|
|
||||||
.read = true,
|
|
||||||
.truncate = true,
|
|
||||||
});
|
|
||||||
defer {
|
|
||||||
file.close();
|
|
||||||
dir.deleteFile(test_out_file) catch {};
|
|
||||||
}
|
|
||||||
|
|
||||||
return (try file.stat()).mtime;
|
|
||||||
}
|
|
||||||
|
|
||||||
// These functions come from src/Package.zig, src/Manifest.zig in the compiler,
|
|
||||||
// not the standard library
|
|
||||||
fn unpackTarball(
|
|
||||||
gpa: std.mem.Allocator,
|
|
||||||
req_reader: anytype,
|
|
||||||
out_dir: std.fs.Dir,
|
|
||||||
comptime compression: type,
|
|
||||||
) !void {
|
|
||||||
var br = std.io.bufferedReaderSize(std.crypto.tls.max_ciphertext_record_len, req_reader);
|
|
||||||
|
|
||||||
var decompress = try compression.decompress(gpa, br.reader());
|
|
||||||
defer decompress.deinit();
|
|
||||||
|
|
||||||
try std.tar.pipeToFileSystem(out_dir, decompress.reader(), .{
|
|
||||||
.strip_components = 1,
|
|
||||||
// TODO: we would like to set this to executable_bit_only, but two
|
|
||||||
// things need to happen before that:
|
|
||||||
// 1. the tar implementation needs to support it
|
|
||||||
// 2. the hashing algorithm here needs to support detecting the is_executable
|
|
||||||
// bit on Windows from the ACLs (see the isExecutable function).
|
|
||||||
.mode_mode = .ignore,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
test {
|
|
||||||
std.testing.refAllDecls(@This());
|
|
||||||
}
|
|
||||||
test "cache a file and recall it" {
|
|
||||||
if (builtin.os.tag == .wasi) {
|
|
||||||
// https://github.com/ziglang/zig/issues/5437
|
|
||||||
|
|
||||||
return error.SkipZigTest;
|
|
||||||
}
|
|
||||||
|
|
||||||
var tmp = testing.tmpDir(.{});
|
|
||||||
defer tmp.cleanup();
|
|
||||||
|
|
||||||
const temp_file = "test.txt";
|
|
||||||
const temp_file2 = "test2.txt";
|
|
||||||
const temp_manifest_dir = "temp_manifest_dir";
|
|
||||||
|
|
||||||
try tmp.dir.writeFile(temp_file, "Hello, world!\n");
|
|
||||||
try tmp.dir.writeFile(temp_file2, "yo mamma\n");
|
|
||||||
|
|
||||||
// Wait for file timestamps to tick
|
|
||||||
|
|
||||||
const initial_time = try testGetCurrentFileTimestamp(tmp.dir);
|
|
||||||
while ((try testGetCurrentFileTimestamp(tmp.dir)) == initial_time) {
|
|
||||||
std.time.sleep(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
var digest1: [hex_digest_len]u8 = undefined;
|
|
||||||
var digest2: [hex_digest_len]u8 = undefined;
|
|
||||||
|
|
||||||
{
|
|
||||||
var cache = std.build.Cache{
|
|
||||||
.gpa = testing.allocator,
|
|
||||||
.manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
|
|
||||||
};
|
|
||||||
cache.addPrefix(.{ .path = null, .handle = tmp.dir });
|
|
||||||
defer cache.manifest_dir.close();
|
|
||||||
|
|
||||||
{
|
|
||||||
var ch = cache.obtain();
|
|
||||||
defer ch.deinit();
|
|
||||||
|
|
||||||
ch.hash.add(true);
|
|
||||||
ch.hash.add(@as(u16, 1234));
|
|
||||||
ch.hash.addBytes("1234");
|
|
||||||
_ = try ch.addFile(temp_file, null);
|
|
||||||
|
|
||||||
// There should be nothing in the cache
|
|
||||||
|
|
||||||
try testing.expectEqual(false, try ch.hit());
|
|
||||||
|
|
||||||
digest1 = ch.final();
|
|
||||||
try ch.writeManifest();
|
|
||||||
}
|
|
||||||
{
|
|
||||||
var ch = cache.obtain();
|
|
||||||
defer ch.deinit();
|
|
||||||
|
|
||||||
ch.hash.add(true);
|
|
||||||
ch.hash.add(@as(u16, 1234));
|
|
||||||
ch.hash.addBytes("1234");
|
|
||||||
_ = try ch.addFile(temp_file, null);
|
|
||||||
|
|
||||||
// Cache hit! We just "built" the same file
|
|
||||||
|
|
||||||
try testing.expect(try ch.hit());
|
|
||||||
digest2 = ch.final();
|
|
||||||
|
|
||||||
try testing.expectEqual(false, ch.have_exclusive_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
try testing.expectEqual(digest1, digest2);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
test "fetch and unpack" {
|
|
||||||
const alloc = std.testing.allocator;
|
|
||||||
var http_client: std.http.Client = .{ .allocator = alloc };
|
|
||||||
defer http_client.deinit();
|
|
||||||
|
|
||||||
const global_cache_directory: std.Build.Cache.Directory = .{
|
|
||||||
.handle = try std.fs.cwd().makeOpenPath("test-pkg", .{}),
|
|
||||||
.path = "test-pkg",
|
|
||||||
};
|
|
||||||
var thread_pool: std.Thread.Pool = undefined;
|
|
||||||
try thread_pool.init(.{ .allocator = alloc });
|
|
||||||
defer thread_pool.deinit();
|
|
||||||
var progress: std.Progress = .{ .dont_print_on_dumb = true };
|
|
||||||
const root_prog_node = progress.start("Fetch Packages", 0);
|
|
||||||
defer root_prog_node.end();
|
|
||||||
const pkg = try fetchAndUnpack(
|
|
||||||
&thread_pool,
|
|
||||||
&http_client,
|
|
||||||
global_cache_directory,
|
|
||||||
.{
|
|
||||||
.url = "https://github.com/aws/aws-sdk-go-v2/archive/7502ff360b1c3b79cbe117437327f6ff5fb89f65.tar.gz",
|
|
||||||
.hash = "1220a414719bff14c9362fb1c695e3346fa12ec2e728bae5757a57aae7738916ffd2",
|
|
||||||
},
|
|
||||||
"https://github.com/aws/aws-sdk-go-v2/archive/7502ff360b1c3b79cbe117437327f6ff5fb89f65.tar.gz",
|
|
||||||
root_prog_node,
|
|
||||||
);
|
|
||||||
defer alloc.destroy(pkg);
|
|
||||||
defer pkg.deinit();
|
|
||||||
}
|
|
||||||
test "fetch one and unpack" {
|
|
||||||
const pkg = try fetchOneAndUnpack(
|
|
||||||
std.testing.allocator,
|
|
||||||
"test-pkg",
|
|
||||||
.{
|
|
||||||
.url = "https://github.com/aws/aws-sdk-go-v2/archive/7502ff360b1c3b79cbe117437327f6ff5fb89f65.tar.gz",
|
|
||||||
.hash = "1220a414719bff14c9362fb1c695e3346fa12ec2e728bae5757a57aae7738916ffd2",
|
|
||||||
},
|
|
||||||
);
|
|
||||||
defer std.testing.allocator.destroy(pkg);
|
|
||||||
defer pkg.deinit();
|
|
||||||
try std.testing.expectEqualStrings(
|
|
||||||
"test-pkg/p/1220a414719bff14c9362fb1c695e3346fa12ec2e728bae5757a57aae7738916ffd2",
|
|
||||||
pkg.root_src_directory.path.?,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
test "isTarAttachment" {
|
|
||||||
try std.testing.expect(isTarAttachment("attaChment; FILENAME=\"stuff.tar.gz\"; size=42"));
|
|
||||||
try std.testing.expect(isTarAttachment("attachment; filename*=\"stuff.tar.gz\""));
|
|
||||||
try std.testing.expect(isTarAttachment("ATTACHMENT; filename=\"stuff.tar.gz\""));
|
|
||||||
try std.testing.expect(isTarAttachment("attachment; FileName=\"stuff.tar.gz\""));
|
|
||||||
try std.testing.expect(isTarAttachment("attachment; FileName*=UTF-8\'\'xyz%2Fstuff.tar.gz"));
|
|
||||||
|
|
||||||
try std.testing.expect(!isTarAttachment("attachment FileName=\"stuff.tar.gz\""));
|
|
||||||
try std.testing.expect(!isTarAttachment("attachment; FileName=\"stuff.tar\""));
|
|
||||||
try std.testing.expect(!isTarAttachment("attachment; FileName\"stuff.gz\""));
|
|
||||||
try std.testing.expect(!isTarAttachment("attachment; size=42"));
|
|
||||||
try std.testing.expect(!isTarAttachment("inline; size=42"));
|
|
||||||
try std.testing.expect(!isTarAttachment("FileName=\"stuff.tar.gz\"; attachment;"));
|
|
||||||
try std.testing.expect(!isTarAttachment("FileName=\"stuff.tar.gz\";"));
|
|
||||||
}
|
|
107
README.md
107
README.md
|
@ -1,10 +1,17 @@
|
||||||
AWS SDK for Zig
|
AWS SDK for Zig
|
||||||
===============
|
===============
|
||||||
|
|
||||||
[![Build Status](https://actions-status.lerch.org/lobo/aws-sdk-for-zig/build)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=build.yaml&state=closed)
|
[Last Mach Nominated Zig Version](https://machengine.org/about/nominated-zig/):
|
||||||
|
|
||||||
**NOTE: THIS SDK IS CURRENTLY UNUSABLE FOR SEVERAL IMPORTANT AWS SERVICES
|
[![Build Status: Zig 0.12.0-dev.3180+83e578a18](https://actions-status.lerch.org/lobo/aws-sdk-for-zig/zig-mach)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-mach.yaml&state=closed)
|
||||||
WITHOUT A PROXY. SEE LIMITATIONS SECTION BELOW**
|
|
||||||
|
[Nightly Zig](https://ziglang.org/download/):
|
||||||
|
|
||||||
|
[![Build Status: Zig Nightly](https://actions-status.lerch.org/lobo/aws-sdk-for-zig/zig-nightly)](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-nightly.yaml&state=closed)
|
||||||
|
|
||||||
|
**NOTE: TLS 1.3 support is still deploying across AWS. Some services, especially S3,
|
||||||
|
may or may not be available without a proxy, depending on the region.
|
||||||
|
See limitations section below**
|
||||||
|
|
||||||
Current executable size for the demo is 980k after compiling with -Doptimize=ReleaseSmall
|
Current executable size for the demo is 980k after compiling with -Doptimize=ReleaseSmall
|
||||||
in x86_linux, and will vary based on services used. Tested targets:
|
in x86_linux, and will vary based on services used. Tested targets:
|
||||||
|
@ -19,6 +26,16 @@ in x86_linux, and will vary based on services used. Tested targets:
|
||||||
|
|
||||||
Tested targets are built, but not continuously tested, by CI.
|
Tested targets are built, but not continuously tested, by CI.
|
||||||
|
|
||||||
|
Zig-Develop Branch
|
||||||
|
------------------
|
||||||
|
|
||||||
|
This branch is intended for use with the in-development version of Zig. This
|
||||||
|
starts with 0.12.0-dev.3180+83e578a18. I will try to keep this branch up to date
|
||||||
|
with latest, but with a special eye towards aligning with [Mach Engine's Nominated
|
||||||
|
Zig Versions](https://machengine.org/about/nominated-zig/). As nightly zig versions
|
||||||
|
disappear off the downloads page (and back end server), we can use the mirroring
|
||||||
|
that the Mach Engine participates in to pull these versions.
|
||||||
|
|
||||||
Building
|
Building
|
||||||
--------
|
--------
|
||||||
|
|
||||||
|
@ -44,32 +61,15 @@ for working with services. For local testing or alternative endpoints, there's
|
||||||
no real standard, so there is code to look for `AWS_ENDPOINT_URL` environment
|
no real standard, so there is code to look for `AWS_ENDPOINT_URL` environment
|
||||||
variable that will supersede all other configuration.
|
variable that will supersede all other configuration.
|
||||||
|
|
||||||
Other branches
|
|
||||||
--------------
|
|
||||||
|
|
||||||
The default branch is fully functional but requires TLS 1.3. Until AWS Services
|
|
||||||
support TLS 1.3 at the end of 2023, the [0.9.0 branch](https://git.lerch.org/lobo/aws-sdk-for-zig/src/branch/0.9.0)
|
|
||||||
may be of use. More details below in limitations. This branch overall is
|
|
||||||
superior, as is the 0.11 compiler, but if you need a service that doesn't support
|
|
||||||
TLS 1.3 and you need it right away, feel free to use that branch. Note I do not
|
|
||||||
intend to update code in the 0.9.0 branch, but will accept PRs.
|
|
||||||
|
|
||||||
An [old branch based on aws-crt](https://github.com/elerch/aws-sdk-for-zig/tree/aws-crt) exists
|
|
||||||
for posterity, and supports x86_64 linux. The old branch is deprecated, so if
|
|
||||||
there are issues you see that work correctly in the aws-crt branch, please
|
|
||||||
file an issue. I can't think of a reason to use this branch any more. I do not
|
|
||||||
intend to entertain PRs on this branch, but reach out if you think it is important.
|
|
||||||
|
|
||||||
Limitations
|
Limitations
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
The zig 0.11 HTTP client supports TLS 1.3 only. This, IMHO, is a reasonable
|
The zig 0.11 HTTP client supports TLS 1.3 only. AWS has committed to
|
||||||
restriction given its introduction 5 years ago, but is inflicting some short
|
[TLS 1.3 support across all services by the end of 2023](https://aws.amazon.com/blogs/security/faster-aws-cloud-connections-with-tls-1-3/),
|
||||||
term pain on this project as AWS has not yet fully implemented the protocol. AWS has
|
but a few services as of April 1, 2024 have not been upgraded, and S3 is
|
||||||
committed to [TLS 1.3 support across all services by the end of 2023](https://aws.amazon.com/blogs/security/faster-aws-cloud-connections-with-tls-1-3/), but many (most) services as of August 28th have not yet
|
a bit intermittent. Proxy support has been added, so to get to the services that
|
||||||
been upgraded. Proxy support has been added, so to get to the services that
|
|
||||||
do not yet support TLS 1.3, you can use something like [mitmproxy](https://mitmproxy.org/)
|
do not yet support TLS 1.3, you can use something like [mitmproxy](https://mitmproxy.org/)
|
||||||
to proxy those requests. Of course, this is not a good production solution...
|
to proxy those requests until roll out is complete.
|
||||||
|
|
||||||
WebIdentityToken is not yet implemented.
|
WebIdentityToken is not yet implemented.
|
||||||
|
|
||||||
|
@ -87,62 +87,23 @@ TODO List:
|
||||||
* Implement timeouts and other TODO's in the code
|
* Implement timeouts and other TODO's in the code
|
||||||
* Add option to cache signature keys
|
* Add option to cache signature keys
|
||||||
|
|
||||||
Compiler wishlist/watchlist:
|
Services without TLS 1.3 support (4 services out of 255 total)
|
||||||
|
|
||||||
* [comptime allocations](https://github.com/ziglang/zig/issues/1291) so we can read files, etc (or is there another way)
|
|
||||||
|
|
||||||
Services without TLS 1.3 support (46 services out of 255 total)
|
|
||||||
---------------------------------------------------------------
|
---------------------------------------------------------------
|
||||||
|
|
||||||
NOTE THAT EC2, S3, Lambda, DynamoDB, SNS, SQS are all part of this list!!
|
The following service list is based on limited testing against us-west-2
|
||||||
|
region. Your mileage may vary, as there are thousands of endpoints against
|
||||||
|
many regions. It appears the TLS 1.3 rollout is fairly far along at
|
||||||
|
this point.
|
||||||
|
|
||||||
|
NOTE ON S3: For me, S3 is currently intermittently available using TLS 1.3, so
|
||||||
|
it appears deployments are in progress. The last couple days it has been
|
||||||
|
not been available consistently, so I have added it back to the list.
|
||||||
|
|
||||||
```
|
```
|
||||||
cloudsearch
|
|
||||||
codecommit
|
|
||||||
codestar
|
|
||||||
cognito-identity
|
|
||||||
cognito-idp
|
|
||||||
cognito-sync
|
|
||||||
data.iot
|
data.iot
|
||||||
data.jobs.iot
|
|
||||||
dax
|
|
||||||
discovery
|
|
||||||
dynamodb
|
|
||||||
ec2
|
|
||||||
elasticache
|
|
||||||
elasticbeanstalk
|
|
||||||
elasticloadbalancing
|
|
||||||
featurestore-runtime.sagemaker
|
|
||||||
forecast
|
|
||||||
forecastquery
|
|
||||||
glacier
|
|
||||||
ingest.timestream
|
|
||||||
iotsitewise
|
|
||||||
kinesis
|
|
||||||
kinesisvideo
|
|
||||||
lambda
|
|
||||||
logs
|
|
||||||
models.lex
|
models.lex
|
||||||
monitoring
|
|
||||||
oidc
|
|
||||||
opsworks
|
opsworks
|
||||||
personalize-events
|
|
||||||
personalize-runtime
|
|
||||||
portal.sso
|
|
||||||
query.timestream
|
|
||||||
redshift
|
|
||||||
runtime.lex
|
|
||||||
runtime.sagemaker
|
|
||||||
runtime-v2-lex
|
|
||||||
s3
|
s3
|
||||||
sns
|
|
||||||
sqs
|
|
||||||
sso
|
|
||||||
storagegateway
|
|
||||||
streams.dynamodb
|
|
||||||
sts
|
|
||||||
support
|
|
||||||
wafv2
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Dependency tree
|
Dependency tree
|
||||||
|
|
97
build.zig
97
build.zig
|
@ -1,12 +1,8 @@
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
const Builder = @import("std").build.Builder;
|
const Builder = @import("std").Build;
|
||||||
const Package = @import("Package.zig");
|
|
||||||
|
|
||||||
const models_url = "https://github.com/aws/aws-sdk-go-v2/archive/7502ff360b1c3b79cbe117437327f6ff5fb89f65.tar.gz";
|
|
||||||
const models_hash: ?[]const u8 = "1220a414719bff14c9362fb1c695e3346fa12ec2e728bae5757a57aae7738916ffd2";
|
|
||||||
const models_subdir = "codegen/sdk-codegen/aws-models/"; // note will probably not work on windows
|
const models_subdir = "codegen/sdk-codegen/aws-models/"; // note will probably not work on windows
|
||||||
const models_dir = "p" ++ std.fs.path.sep_str ++ (models_hash orelse "") ++ std.fs.path.sep_str ++ models_subdir;
|
|
||||||
|
|
||||||
const test_targets = [_]std.zig.CrossTarget{
|
const test_targets = [_]std.zig.CrossTarget{
|
||||||
.{}, // native
|
.{}, // native
|
||||||
|
@ -79,22 +75,18 @@ pub fn build(b: *Builder) !void {
|
||||||
.optimize = optimize,
|
.optimize = optimize,
|
||||||
});
|
});
|
||||||
const smithy_module = smithy_dep.module("smithy");
|
const smithy_module = smithy_dep.module("smithy");
|
||||||
exe.addModule("smithy", smithy_module); // not sure this should be here...
|
exe.root_module.addImport("smithy", smithy_module); // not sure this should be here...
|
||||||
|
|
||||||
// Expose module to others
|
// Expose module to others
|
||||||
_ = b.addModule("aws", .{
|
_ = b.addModule("aws", .{
|
||||||
.source_file = .{ .path = "src/aws.zig" },
|
.root_source_file = .{ .path = "src/aws.zig" },
|
||||||
.dependencies = &[_]std.build.ModuleDependency{
|
.imports = &.{.{ .name = "smithy", .module = smithy_module }},
|
||||||
.{ .name = "smithy", .module = smithy_module },
|
|
||||||
},
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Expose module to others
|
// Expose module to others
|
||||||
_ = b.addModule("aws-signing", .{
|
_ = b.addModule("aws-signing", .{
|
||||||
.source_file = .{ .path = "src/aws_signing.zig" },
|
.root_source_file = .{ .path = "src/aws_signing.zig" },
|
||||||
.dependencies = &[_]std.build.ModuleDependency{
|
.imports = &.{.{ .name = "smithy", .module = smithy_module }},
|
||||||
.{ .name = "smithy", .module = smithy_module },
|
|
||||||
},
|
|
||||||
});
|
});
|
||||||
// TODO: This does not work correctly due to https://github.com/ziglang/zig/issues/16354
|
// TODO: This does not work correctly due to https://github.com/ziglang/zig/issues/16354
|
||||||
//
|
//
|
||||||
|
@ -118,9 +110,6 @@ pub fn build(b: *Builder) !void {
|
||||||
const run_step = b.step("run", "Run the app");
|
const run_step = b.step("run", "Run the app");
|
||||||
run_step.dependOn(&run_cmd.step);
|
run_step.dependOn(&run_cmd.step);
|
||||||
|
|
||||||
const fm = b.step("fetch", "Fetch model files");
|
|
||||||
var fetch_step = FetchStep.create(b, models_url, models_hash);
|
|
||||||
fm.dependOn(&fetch_step.step);
|
|
||||||
const gen_step = blk: {
|
const gen_step = blk: {
|
||||||
const cg = b.step("gen", "Generate zig service code from smithy models");
|
const cg = b.step("gen", "Generate zig service code from smithy models");
|
||||||
|
|
||||||
|
@ -128,21 +117,35 @@ pub fn build(b: *Builder) !void {
|
||||||
.name = "codegen",
|
.name = "codegen",
|
||||||
.root_source_file = .{ .path = "codegen/src/main.zig" },
|
.root_source_file = .{ .path = "codegen/src/main.zig" },
|
||||||
// We need this generated for the host, not the real target
|
// We need this generated for the host, not the real target
|
||||||
// .target = target,
|
.target = b.host,
|
||||||
.optimize = if (b.verbose) .Debug else .ReleaseSafe,
|
.optimize = if (b.verbose) .Debug else .ReleaseSafe,
|
||||||
});
|
});
|
||||||
cg_exe.addModule("smithy", smithy_dep.module("smithy"));
|
cg_exe.root_module.addImport("smithy", smithy_dep.module("smithy"));
|
||||||
var cg_cmd = b.addRunArtifact(cg_exe);
|
var cg_cmd = b.addRunArtifact(cg_exe);
|
||||||
cg_cmd.addArg("--models");
|
cg_cmd.addArg("--models");
|
||||||
|
const hash = hash_blk: {
|
||||||
|
for (b.available_deps) |dep| {
|
||||||
|
const dep_name = dep.@"0";
|
||||||
|
const dep_hash = dep.@"1";
|
||||||
|
if (std.mem.eql(u8, dep_name, "models"))
|
||||||
|
break :hash_blk dep_hash;
|
||||||
|
}
|
||||||
|
return error.DependencyNamedModelsNotFoundInBuildZigZon;
|
||||||
|
};
|
||||||
cg_cmd.addArg(try std.fs.path.join(
|
cg_cmd.addArg(try std.fs.path.join(
|
||||||
b.allocator,
|
b.allocator,
|
||||||
&[_][]const u8{ b.global_cache_root.path.?, models_dir },
|
&[_][]const u8{
|
||||||
|
b.graph.global_cache_root.path.?,
|
||||||
|
"p",
|
||||||
|
hash,
|
||||||
|
models_subdir,
|
||||||
|
},
|
||||||
));
|
));
|
||||||
cg_cmd.addArg("--output");
|
cg_cmd.addArg("--output");
|
||||||
cg_cmd.addDirectoryArg(std.Build.FileSource.relative("src/models"));
|
cg_cmd.addDirectoryArg(std.Build.LazyPath.relative("src/models"));
|
||||||
if (b.verbose)
|
if (b.verbose)
|
||||||
cg_cmd.addArg("--verbose");
|
cg_cmd.addArg("--verbose");
|
||||||
cg_cmd.step.dependOn(&fetch_step.step);
|
// cg_cmd.step.dependOn(&fetch_step.step);
|
||||||
// TODO: this should use zig_exe from std.Build
|
// TODO: this should use zig_exe from std.Build
|
||||||
// codegen should store a hash in a comment
|
// codegen should store a hash in a comment
|
||||||
// this would be hash of the exe that created the file
|
// this would be hash of the exe that created the file
|
||||||
|
@ -173,10 +176,10 @@ pub fn build(b: *Builder) !void {
|
||||||
// but does not run it.
|
// but does not run it.
|
||||||
const unit_tests = b.addTest(.{
|
const unit_tests = b.addTest(.{
|
||||||
.root_source_file = .{ .path = "src/aws.zig" },
|
.root_source_file = .{ .path = "src/aws.zig" },
|
||||||
.target = t,
|
.target = b.resolveTargetQuery(t),
|
||||||
.optimize = optimize,
|
.optimize = optimize,
|
||||||
});
|
});
|
||||||
unit_tests.addModule("smithy", smithy_dep.module("smithy"));
|
unit_tests.root_module.addImport("smithy", smithy_dep.module("smithy"));
|
||||||
unit_tests.step.dependOn(gen_step);
|
unit_tests.step.dependOn(gen_step);
|
||||||
|
|
||||||
const run_unit_tests = b.addRunArtifact(unit_tests);
|
const run_unit_tests = b.addRunArtifact(unit_tests);
|
||||||
|
@ -186,49 +189,3 @@ pub fn build(b: *Builder) !void {
|
||||||
}
|
}
|
||||||
b.installArtifact(exe);
|
b.installArtifact(exe);
|
||||||
}
|
}
|
||||||
const FetchStep = struct {
|
|
||||||
step: std.Build.Step,
|
|
||||||
url: []const u8,
|
|
||||||
hash: ?[]const u8,
|
|
||||||
|
|
||||||
pub fn create(owner: *std.Build, url: []const u8, hash: ?[]const u8) *FetchStep {
|
|
||||||
const fs = owner.allocator.create(FetchStep) catch @panic("OOM");
|
|
||||||
fs.* = .{
|
|
||||||
.step = std.Build.Step.init(.{
|
|
||||||
.id = .custom,
|
|
||||||
.name = "FetchStep",
|
|
||||||
.owner = owner,
|
|
||||||
.makeFn = make,
|
|
||||||
}),
|
|
||||||
.url = url,
|
|
||||||
.hash = hash,
|
|
||||||
};
|
|
||||||
return fs;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn make(step: *std.Build.Step, prog_node: *std.Progress.Node) !void {
|
|
||||||
const b = step.owner;
|
|
||||||
const self = @fieldParentPtr(FetchStep, "step", step);
|
|
||||||
|
|
||||||
const alloc = b.allocator;
|
|
||||||
var http_client: std.http.Client = .{ .allocator = alloc };
|
|
||||||
defer http_client.deinit();
|
|
||||||
|
|
||||||
var thread_pool: std.Thread.Pool = undefined;
|
|
||||||
try thread_pool.init(.{ .allocator = alloc });
|
|
||||||
defer thread_pool.deinit();
|
|
||||||
const pkg = try Package.fetchAndUnpack(
|
|
||||||
&thread_pool,
|
|
||||||
&http_client,
|
|
||||||
b.global_cache_root,
|
|
||||||
.{
|
|
||||||
.url = self.url,
|
|
||||||
.hash = self.hash,
|
|
||||||
},
|
|
||||||
self.url,
|
|
||||||
prog_node,
|
|
||||||
);
|
|
||||||
defer alloc.destroy(pkg);
|
|
||||||
defer pkg.deinit();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
|
@ -1,11 +1,16 @@
|
||||||
.{
|
.{
|
||||||
.name = "aws-zig",
|
.name = "aws-zig",
|
||||||
.version = "0.0.1",
|
.version = "0.0.1",
|
||||||
|
.paths = .{""},
|
||||||
|
|
||||||
.dependencies = .{
|
.dependencies = .{
|
||||||
.smithy = .{
|
.smithy = .{
|
||||||
.url = "https://git.lerch.org/lobo/smithy/archive/41b61745d25a65817209dd5dddbb5f9b66896a99.tar.gz",
|
.url = "https://git.lerch.org/lobo/smithy/archive/17f115d9c60ce598a314b18ae89828eef2955915.tar.gz",
|
||||||
.hash = "122087deb0ae309b2258d59b40d82fe5921fdfc35b420bb59033244851f7f276fa34",
|
.hash = "1220dbec78f1a5188d9e4b7a0df885cbb363d7625ceae39d0e05567d4adbc6fb9786",
|
||||||
|
},
|
||||||
|
.models = .{
|
||||||
|
.url = "https://github.com/aws/aws-sdk-go-v2/archive/58cf6509525a12d64fd826da883bfdbacbd2f00e.tar.gz",
|
||||||
|
.hash = "122017a2f3081ce83c23e0c832feb1b8b4176d507b6077f522855dc774bcf83ee315",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,13 +77,13 @@ pub fn hex64(x: u64) [16]u8 {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const walkerFn = *const fn (std.fs.IterableDir.Walker.WalkerEntry) bool;
|
pub const walkerFn = *const fn (std.fs.Dir.Walker.WalkerEntry) bool;
|
||||||
|
|
||||||
fn included(entry: std.fs.IterableDir.Walker.WalkerEntry) bool {
|
fn included(entry: std.fs.Dir.Walker.WalkerEntry) bool {
|
||||||
_ = entry;
|
_ = entry;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
fn excluded(entry: std.fs.IterableDir.Walker.WalkerEntry) bool {
|
fn excluded(entry: std.fs.Dir.Walker.WalkerEntry) bool {
|
||||||
_ = entry;
|
_ = entry;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -96,7 +96,7 @@ pub const ComputeDirectoryOptions = struct {
|
||||||
|
|
||||||
pub fn computeDirectoryHash(
|
pub fn computeDirectoryHash(
|
||||||
thread_pool: *std.Thread.Pool,
|
thread_pool: *std.Thread.Pool,
|
||||||
dir: std.fs.IterableDir,
|
dir: std.fs.Dir,
|
||||||
options: *ComputeDirectoryOptions,
|
options: *ComputeDirectoryOptions,
|
||||||
) ![Hash.digest_length]u8 {
|
) ![Hash.digest_length]u8 {
|
||||||
const gpa = thread_pool.allocator;
|
const gpa = thread_pool.allocator;
|
||||||
|
@ -138,7 +138,7 @@ pub fn computeDirectoryHash(
|
||||||
.failure = undefined, // to be populated by the worker
|
.failure = undefined, // to be populated by the worker
|
||||||
};
|
};
|
||||||
wait_group.start();
|
wait_group.start();
|
||||||
try thread_pool.spawn(workerHashFile, .{ dir.dir, hashed_file, &wait_group });
|
try thread_pool.spawn(workerHashFile, .{ dir, hashed_file, &wait_group });
|
||||||
|
|
||||||
try all_files.append(hashed_file);
|
try all_files.append(hashed_file);
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ pub fn main() anyerror!void {
|
||||||
|
|
||||||
var output_dir = std.fs.cwd();
|
var output_dir = std.fs.cwd();
|
||||||
defer if (output_dir.fd > 0) output_dir.close();
|
defer if (output_dir.fd > 0) output_dir.close();
|
||||||
var models_dir: ?std.fs.IterableDir = null;
|
var models_dir: ?std.fs.Dir = null;
|
||||||
defer if (models_dir) |*m| m.close();
|
defer if (models_dir) |*m| m.close();
|
||||||
for (args, 0..) |arg, i| {
|
for (args, 0..) |arg, i| {
|
||||||
if (std.mem.eql(u8, "--help", arg) or
|
if (std.mem.eql(u8, "--help", arg) or
|
||||||
|
@ -31,7 +31,7 @@ pub fn main() anyerror!void {
|
||||||
if (std.mem.eql(u8, "--output", arg))
|
if (std.mem.eql(u8, "--output", arg))
|
||||||
output_dir = try output_dir.makeOpenPath(args[i + 1], .{});
|
output_dir = try output_dir.makeOpenPath(args[i + 1], .{});
|
||||||
if (std.mem.eql(u8, "--models", arg))
|
if (std.mem.eql(u8, "--models", arg))
|
||||||
models_dir = try std.fs.cwd().openIterableDir(args[i + 1], .{});
|
models_dir = try std.fs.cwd().openDir(args[i + 1], .{ .iterate = true });
|
||||||
}
|
}
|
||||||
// TODO: Seems like we should remove this in favor of a package
|
// TODO: Seems like we should remove this in favor of a package
|
||||||
try output_dir.writeFile("json.zig", json_zig);
|
try output_dir.writeFile("json.zig", json_zig);
|
||||||
|
@ -75,7 +75,7 @@ pub fn main() anyerror!void {
|
||||||
defer cwd.close();
|
defer cwd.close();
|
||||||
defer cwd.setAsCwd() catch unreachable;
|
defer cwd.setAsCwd() catch unreachable;
|
||||||
|
|
||||||
try m.dir.setAsCwd();
|
try m.setAsCwd();
|
||||||
try processDirectories(m, output_dir);
|
try processDirectories(m, output_dir);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -87,7 +87,7 @@ const OutputManifest = struct {
|
||||||
model_dir_hash_digest: [Hasher.hex_multihash_len]u8,
|
model_dir_hash_digest: [Hasher.hex_multihash_len]u8,
|
||||||
output_dir_hash_digest: [Hasher.hex_multihash_len]u8,
|
output_dir_hash_digest: [Hasher.hex_multihash_len]u8,
|
||||||
};
|
};
|
||||||
fn processDirectories(models_dir: std.fs.IterableDir, output_dir: std.fs.Dir) !void {
|
fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir) !void {
|
||||||
// Let's get ready to hash!!
|
// Let's get ready to hash!!
|
||||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||||
defer arena.deinit();
|
defer arena.deinit();
|
||||||
|
@ -131,15 +131,15 @@ fn processDirectories(models_dir: std.fs.IterableDir, output_dir: std.fs.Dir) !v
|
||||||
}
|
}
|
||||||
|
|
||||||
var model_digest: ?[Hasher.hex_multihash_len]u8 = null;
|
var model_digest: ?[Hasher.hex_multihash_len]u8 = null;
|
||||||
fn calculateDigests(models_dir: std.fs.IterableDir, output_dir: std.fs.Dir, thread_pool: *std.Thread.Pool) !OutputManifest {
|
fn calculateDigests(models_dir: std.fs.Dir, output_dir: std.fs.Dir, thread_pool: *std.Thread.Pool) !OutputManifest {
|
||||||
const model_hash = if (model_digest) |m| m[0..Hasher.digest_len].* else try Hasher.computeDirectoryHash(thread_pool, models_dir, @constCast(&Hasher.ComputeDirectoryOptions{
|
const model_hash = if (model_digest) |m| m[0..Hasher.digest_len].* else try Hasher.computeDirectoryHash(thread_pool, models_dir, @constCast(&Hasher.ComputeDirectoryOptions{
|
||||||
.isIncluded = struct {
|
.isIncluded = struct {
|
||||||
pub fn include(entry: std.fs.IterableDir.Walker.WalkerEntry) bool {
|
pub fn include(entry: std.fs.Dir.Walker.WalkerEntry) bool {
|
||||||
return std.mem.endsWith(u8, entry.basename, ".json");
|
return std.mem.endsWith(u8, entry.basename, ".json");
|
||||||
}
|
}
|
||||||
}.include,
|
}.include,
|
||||||
.isExcluded = struct {
|
.isExcluded = struct {
|
||||||
pub fn exclude(entry: std.fs.IterableDir.Walker.WalkerEntry) bool {
|
pub fn exclude(entry: std.fs.Dir.Walker.WalkerEntry) bool {
|
||||||
_ = entry;
|
_ = entry;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -148,14 +148,14 @@ fn calculateDigests(models_dir: std.fs.IterableDir, output_dir: std.fs.Dir, thre
|
||||||
}));
|
}));
|
||||||
if (verbose) std.log.info("Model directory hash: {s}", .{model_digest orelse Hasher.hexDigest(model_hash)});
|
if (verbose) std.log.info("Model directory hash: {s}", .{model_digest orelse Hasher.hexDigest(model_hash)});
|
||||||
|
|
||||||
const output_hash = try Hasher.computeDirectoryHash(thread_pool, try output_dir.openIterableDir(".", .{}), @constCast(&Hasher.ComputeDirectoryOptions{
|
const output_hash = try Hasher.computeDirectoryHash(thread_pool, try output_dir.openDir(".", .{ .iterate = true }), @constCast(&Hasher.ComputeDirectoryOptions{
|
||||||
.isIncluded = struct {
|
.isIncluded = struct {
|
||||||
pub fn include(entry: std.fs.IterableDir.Walker.WalkerEntry) bool {
|
pub fn include(entry: std.fs.Dir.Walker.WalkerEntry) bool {
|
||||||
return std.mem.endsWith(u8, entry.basename, ".zig");
|
return std.mem.endsWith(u8, entry.basename, ".zig");
|
||||||
}
|
}
|
||||||
}.include,
|
}.include,
|
||||||
.isExcluded = struct {
|
.isExcluded = struct {
|
||||||
pub fn exclude(entry: std.fs.IterableDir.Walker.WalkerEntry) bool {
|
pub fn exclude(entry: std.fs.Dir.Walker.WalkerEntry) bool {
|
||||||
_ = entry;
|
_ = entry;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -249,12 +249,22 @@ fn addReference(id: []const u8, map: *std.StringHashMap(u64)) !void {
|
||||||
}
|
}
|
||||||
fn countAllReferences(shape_ids: [][]const u8, shapes: std.StringHashMap(smithy.ShapeInfo), shape_references: *std.StringHashMap(u64), stack: *std.ArrayList([]const u8)) anyerror!void {
|
fn countAllReferences(shape_ids: [][]const u8, shapes: std.StringHashMap(smithy.ShapeInfo), shape_references: *std.StringHashMap(u64), stack: *std.ArrayList([]const u8)) anyerror!void {
|
||||||
for (shape_ids) |id| {
|
for (shape_ids) |id| {
|
||||||
try countReferences(shapes.get(id).?, shapes, shape_references, stack);
|
const shape = shapes.get(id);
|
||||||
|
if (shape == null) {
|
||||||
|
std.log.err("Error - could not find shape with id {s}", .{id});
|
||||||
|
return error.ShapeNotFound;
|
||||||
|
}
|
||||||
|
try countReferences(shape.?, shapes, shape_references, stack);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn countTypeMembersReferences(type_members: []smithy.TypeMember, shapes: std.StringHashMap(smithy.ShapeInfo), shape_references: *std.StringHashMap(u64), stack: *std.ArrayList([]const u8)) anyerror!void {
|
fn countTypeMembersReferences(type_members: []smithy.TypeMember, shapes: std.StringHashMap(smithy.ShapeInfo), shape_references: *std.StringHashMap(u64), stack: *std.ArrayList([]const u8)) anyerror!void {
|
||||||
for (type_members) |m| {
|
for (type_members) |m| {
|
||||||
try countReferences(shapes.get(m.target).?, shapes, shape_references, stack);
|
const target = shapes.get(m.target);
|
||||||
|
if (target == null) {
|
||||||
|
std.log.err("Error - could not find target {s}", .{m.target});
|
||||||
|
return error.TargetNotFound;
|
||||||
|
}
|
||||||
|
try countReferences(target.?, shapes, shape_references, stack);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -285,6 +295,7 @@ fn countReferences(shape: smithy.ShapeInfo, shapes: std.StringHashMap(smithy.Sha
|
||||||
.bigInteger,
|
.bigInteger,
|
||||||
.bigDecimal,
|
.bigDecimal,
|
||||||
.timestamp,
|
.timestamp,
|
||||||
|
.unit,
|
||||||
=> {},
|
=> {},
|
||||||
.document, .member, .resource => {}, // less sure about these?
|
.document, .member, .resource => {}, // less sure about these?
|
||||||
.list => |i| try countReferences(shapes.get(i.member_target).?, shapes, shape_references, stack),
|
.list => |i| try countReferences(shapes.get(i.member_target).?, shapes, shape_references, stack),
|
||||||
|
@ -297,10 +308,25 @@ fn countReferences(shape: smithy.ShapeInfo, shapes: std.StringHashMap(smithy.Sha
|
||||||
.uniontype => |m| try countTypeMembersReferences(m.members, shapes, shape_references, stack),
|
.uniontype => |m| try countTypeMembersReferences(m.members, shapes, shape_references, stack),
|
||||||
.service => |i| try countAllReferences(i.operations, shapes, shape_references, stack),
|
.service => |i| try countAllReferences(i.operations, shapes, shape_references, stack),
|
||||||
.operation => |op| {
|
.operation => |op| {
|
||||||
if (op.input) |i| try countReferences(shapes.get(i).?, shapes, shape_references, stack);
|
if (op.input) |i| {
|
||||||
if (op.output) |i| try countReferences(shapes.get(i).?, shapes, shape_references, stack);
|
const val = shapes.get(i);
|
||||||
|
if (val == null) {
|
||||||
|
std.log.err("Error processing shape with id \"{s}\". Input shape \"{s}\" was not found", .{ shape.id, i });
|
||||||
|
return error.ShapeNotFound;
|
||||||
|
}
|
||||||
|
try countReferences(val.?, shapes, shape_references, stack);
|
||||||
|
}
|
||||||
|
if (op.output) |i| {
|
||||||
|
const val = shapes.get(i);
|
||||||
|
if (val == null) {
|
||||||
|
std.log.err("Error processing shape with id \"{s}\". Output shape \"{s}\" was not found", .{ shape.id, i });
|
||||||
|
return error.ShapeNotFound;
|
||||||
|
}
|
||||||
|
try countReferences(val.?, shapes, shape_references, stack);
|
||||||
|
}
|
||||||
if (op.errors) |i| try countAllReferences(i, shapes, shape_references, stack);
|
if (op.errors) |i| try countAllReferences(i, shapes, shape_references, stack);
|
||||||
},
|
},
|
||||||
|
.@"enum" => |m| try countTypeMembersReferences(m.members, shapes, shape_references, stack),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -347,8 +373,8 @@ fn generateServices(allocator: std.mem.Allocator, comptime _: []const u8, file:
|
||||||
var sdk_id: []const u8 = undefined;
|
var sdk_id: []const u8 = undefined;
|
||||||
const version: []const u8 = service.shape.service.version;
|
const version: []const u8 = service.shape.service.version;
|
||||||
const name: []const u8 = service.name;
|
const name: []const u8 = service.name;
|
||||||
var arn_namespace: []const u8 = undefined;
|
var arn_namespace: ?[]const u8 = undefined;
|
||||||
var sigv4_name: []const u8 = undefined;
|
var sigv4_name: ?[]const u8 = null;
|
||||||
var endpoint_prefix: []const u8 = undefined;
|
var endpoint_prefix: []const u8 = undefined;
|
||||||
var aws_protocol: smithy.AwsProtocol = undefined;
|
var aws_protocol: smithy.AwsProtocol = undefined;
|
||||||
for (service.shape.service.traits) |trait| {
|
for (service.shape.service.traits) |trait| {
|
||||||
|
@ -364,6 +390,11 @@ fn generateServices(allocator: std.mem.Allocator, comptime _: []const u8, file:
|
||||||
else => {},
|
else => {},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (sigv4_name == null) {
|
||||||
|
// This is true for CodeCatalyst, that operates a bit differently
|
||||||
|
std.log.debug("No sigv4 name found. Service '{s}' cannot be accessed via standard methods. Skipping", .{name});
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
// Service struct
|
// Service struct
|
||||||
// name of the field will be snake_case of whatever comes in from
|
// name of the field will be snake_case of whatever comes in from
|
||||||
|
@ -373,18 +404,22 @@ fn generateServices(allocator: std.mem.Allocator, comptime _: []const u8, file:
|
||||||
try writer.print("const Self = @This();\n", .{});
|
try writer.print("const Self = @This();\n", .{});
|
||||||
try writer.print("pub const version: []const u8 = \"{s}\";\n", .{version});
|
try writer.print("pub const version: []const u8 = \"{s}\";\n", .{version});
|
||||||
try writer.print("pub const sdk_id: []const u8 = \"{s}\";\n", .{sdk_id});
|
try writer.print("pub const sdk_id: []const u8 = \"{s}\";\n", .{sdk_id});
|
||||||
try writer.print("pub const arn_namespace: []const u8 = \"{s}\";\n", .{arn_namespace});
|
if (arn_namespace) |a| {
|
||||||
|
try writer.print("pub const arn_namespace: ?[]const u8 = \"{s}\";\n", .{a});
|
||||||
|
} else try writer.print("pub const arn_namespace: ?[]const u8 = null;\n", .{});
|
||||||
try writer.print("pub const endpoint_prefix: []const u8 = \"{s}\";\n", .{endpoint_prefix});
|
try writer.print("pub const endpoint_prefix: []const u8 = \"{s}\";\n", .{endpoint_prefix});
|
||||||
try writer.print("pub const sigv4_name: []const u8 = \"{s}\";\n", .{sigv4_name});
|
try writer.print("pub const sigv4_name: []const u8 = \"{s}\";\n", .{sigv4_name.?});
|
||||||
try writer.print("pub const name: []const u8 = \"{s}\";\n", .{name});
|
try writer.print("pub const name: []const u8 = \"{s}\";\n", .{name});
|
||||||
// TODO: This really should just be ".whatevs". We're fully qualifying here, which isn't typical
|
// TODO: This really should just be ".whatevs". We're fully qualifying here, which isn't typical
|
||||||
try writer.print("pub const aws_protocol: smithy.AwsProtocol = {};\n\n", .{aws_protocol});
|
try writer.print("pub const aws_protocol: smithy.AwsProtocol = {};\n\n", .{aws_protocol});
|
||||||
_ = try writer.write("pub const service_metadata: struct {\n");
|
_ = try writer.write("pub const service_metadata: struct {\n");
|
||||||
try writer.print(" version: []const u8 = \"{s}\",\n", .{version});
|
try writer.print(" version: []const u8 = \"{s}\",\n", .{version});
|
||||||
try writer.print(" sdk_id: []const u8 = \"{s}\",\n", .{sdk_id});
|
try writer.print(" sdk_id: []const u8 = \"{s}\",\n", .{sdk_id});
|
||||||
try writer.print(" arn_namespace: []const u8 = \"{s}\",\n", .{arn_namespace});
|
if (arn_namespace) |a| {
|
||||||
|
try writer.print(" arn_namespace: ?[]const u8 = \"{s}\",\n", .{a});
|
||||||
|
} else try writer.print(" arn_namespace: ?[]const u8 = null,\n", .{});
|
||||||
try writer.print(" endpoint_prefix: []const u8 = \"{s}\",\n", .{endpoint_prefix});
|
try writer.print(" endpoint_prefix: []const u8 = \"{s}\",\n", .{endpoint_prefix});
|
||||||
try writer.print(" sigv4_name: []const u8 = \"{s}\",\n", .{sigv4_name});
|
try writer.print(" sigv4_name: []const u8 = \"{s}\",\n", .{sigv4_name.?});
|
||||||
try writer.print(" name: []const u8 = \"{s}\",\n", .{name});
|
try writer.print(" name: []const u8 = \"{s}\",\n", .{name});
|
||||||
// TODO: This really should just be ".whatevs". We're fully qualifying here, which isn't typical
|
// TODO: This really should just be ".whatevs". We're fully qualifying here, which isn't typical
|
||||||
try writer.print(" aws_protocol: smithy.AwsProtocol = {},\n", .{aws_protocol});
|
try writer.print(" aws_protocol: smithy.AwsProtocol = {},\n", .{aws_protocol});
|
||||||
|
@ -496,20 +531,26 @@ fn generateOperation(allocator: std.mem.Allocator, operation: smithy.ShapeInfo,
|
||||||
try writer.print("action_name: []const u8 = \"{s}\",\n", .{operation.name});
|
try writer.print("action_name: []const u8 = \"{s}\",\n", .{operation.name});
|
||||||
try outputIndent(state, writer);
|
try outputIndent(state, writer);
|
||||||
_ = try writer.write("Request: type = ");
|
_ = try writer.write("Request: type = ");
|
||||||
if (operation.shape.operation.input) |member| {
|
if (operation.shape.operation.input == null or
|
||||||
|
(try shapeInfoForId(operation.shape.operation.input.?, state)).shape == .unit)
|
||||||
|
{
|
||||||
|
_ = try writer.write("struct {\n");
|
||||||
|
try generateMetadataFunction(operation_name, state, writer);
|
||||||
|
} else if (operation.shape.operation.input) |member| {
|
||||||
if (try generateTypeFor(member, writer, state, false)) unreachable; // we expect only structs here
|
if (try generateTypeFor(member, writer, state, false)) unreachable; // we expect only structs here
|
||||||
_ = try writer.write("\n");
|
_ = try writer.write("\n");
|
||||||
try generateMetadataFunction(operation_name, state, writer);
|
try generateMetadataFunction(operation_name, state, writer);
|
||||||
} else {
|
|
||||||
_ = try writer.write("struct {\n");
|
|
||||||
try generateMetadataFunction(operation_name, state, writer);
|
|
||||||
}
|
}
|
||||||
_ = try writer.write(",\n");
|
_ = try writer.write(",\n");
|
||||||
try outputIndent(state, writer);
|
try outputIndent(state, writer);
|
||||||
_ = try writer.write("Response: type = ");
|
_ = try writer.write("Response: type = ");
|
||||||
if (operation.shape.operation.output) |member| {
|
if (operation.shape.operation.output == null or
|
||||||
|
(try shapeInfoForId(operation.shape.operation.output.?, state)).shape == .unit)
|
||||||
|
{
|
||||||
|
_ = try writer.write("struct {}"); // we want to maintain consistency with other ops
|
||||||
|
} else if (operation.shape.operation.output) |member| {
|
||||||
if (try generateTypeFor(member, writer, state, true)) unreachable; // we expect only structs here
|
if (try generateTypeFor(member, writer, state, true)) unreachable; // we expect only structs here
|
||||||
} else _ = try writer.write("struct {}"); // we want to maintain consistency with other ops
|
}
|
||||||
_ = try writer.write(",\n");
|
_ = try writer.write(",\n");
|
||||||
|
|
||||||
if (operation.shape.operation.errors) |errors| {
|
if (operation.shape.operation.errors) |errors| {
|
||||||
|
@ -589,16 +630,21 @@ fn reuseCommonType(shape: smithy.ShapeInfo, writer: anytype, state: GenerationSt
|
||||||
}
|
}
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
fn shapeInfoForId(id: []const u8, state: GenerationState) !smithy.ShapeInfo {
|
||||||
|
return state.file_state.shapes.get(id) orelse {
|
||||||
|
std.debug.print("Shape ID not found. This is most likely a bug. Shape ID: {s}\n", .{id});
|
||||||
|
return error.InvalidType;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
/// return type is anyerror!void as this is a recursive function, so the compiler cannot properly infer error types
|
/// return type is anyerror!void as this is a recursive function, so the compiler cannot properly infer error types
|
||||||
fn generateTypeFor(shape_id: []const u8, writer: anytype, state: GenerationState, end_structure: bool) anyerror!bool {
|
fn generateTypeFor(shape_id: []const u8, writer: anytype, state: GenerationState, end_structure: bool) anyerror!bool {
|
||||||
var rc = false;
|
var rc = false;
|
||||||
|
|
||||||
// We assume it must exist
|
// We assume it must exist
|
||||||
const shape_info = state.file_state.shapes.get(shape_id) orelse {
|
const shape_info = try shapeInfoForId(shape_id, state);
|
||||||
std.debug.print("Shape ID not found. This is most likely a bug. Shape ID: {s}\n", .{shape_id});
|
|
||||||
return error.InvalidType;
|
|
||||||
};
|
|
||||||
const shape = shape_info.shape;
|
const shape = shape_info.shape;
|
||||||
|
|
||||||
// Check for ourselves up the stack
|
// Check for ourselves up the stack
|
||||||
var self_occurences: u8 = 0;
|
var self_occurences: u8 = 0;
|
||||||
for (state.type_stack.items) |i| {
|
for (state.type_stack.items) |i| {
|
||||||
|
@ -653,7 +699,12 @@ fn generateTypeFor(shape_id: []const u8, writer: anytype, state: GenerationState
|
||||||
_ = try writer.write("}");
|
_ = try writer.write("}");
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
// Document is unstructured data, so bag of bytes it is
|
||||||
|
// https://smithy.io/2.0/spec/simple-types.html#document
|
||||||
|
.document => |s| try generateSimpleTypeFor(s, "[]const u8", writer),
|
||||||
.string => |s| try generateSimpleTypeFor(s, "[]const u8", writer),
|
.string => |s| try generateSimpleTypeFor(s, "[]const u8", writer),
|
||||||
|
.unit => |s| try generateSimpleTypeFor(s, "struct {}", writer), // Would be better as void, but doing so creates inconsistency we don't want clients to have to deal with
|
||||||
|
.@"enum" => |s| try generateSimpleTypeFor(s, "[]const u8", writer), // This should be closer to uniontype, but the generated code will look ugly, and Smithy 2.0 requires that enums are open (clients accept unspecified values). So string is the best analog
|
||||||
.integer => |s| try generateSimpleTypeFor(s, "i64", writer),
|
.integer => |s| try generateSimpleTypeFor(s, "i64", writer),
|
||||||
.list => {
|
.list => {
|
||||||
_ = try writer.write("[]");
|
_ = try writer.write("[]");
|
||||||
|
@ -908,5 +959,8 @@ fn avoidReserved(snake_name: []const u8) []const u8 {
|
||||||
if (std.mem.eql(u8, snake_name, "or")) return "@\"or\"";
|
if (std.mem.eql(u8, snake_name, "or")) return "@\"or\"";
|
||||||
if (std.mem.eql(u8, snake_name, "test")) return "@\"test\"";
|
if (std.mem.eql(u8, snake_name, "test")) return "@\"test\"";
|
||||||
if (std.mem.eql(u8, snake_name, "null")) return "@\"null\"";
|
if (std.mem.eql(u8, snake_name, "null")) return "@\"null\"";
|
||||||
|
if (std.mem.eql(u8, snake_name, "export")) return "@\"export\"";
|
||||||
|
if (std.mem.eql(u8, snake_name, "union")) return "@\"union\"";
|
||||||
|
if (std.mem.eql(u8, snake_name, "enum")) return "@\"enum\"";
|
||||||
return snake_name;
|
return snake_name;
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,10 +28,22 @@ pub fn fromPascalCase(allocator: std.mem.Allocator, name: []const u8) ![]u8 {
|
||||||
// prev_codepoint/ascii_prev_char (and target_inx)
|
// prev_codepoint/ascii_prev_char (and target_inx)
|
||||||
target_inx = setNext(lowercase(curr_char), rc, target_inx);
|
target_inx = setNext(lowercase(curr_char), rc, target_inx);
|
||||||
target_inx = setNext('_', rc, target_inx);
|
target_inx = setNext('_', rc, target_inx);
|
||||||
curr_char = (try isAscii(utf8_name.nextCodepoint())).?;
|
var maybe_curr_char = (try isAscii(utf8_name.nextCodepoint()));
|
||||||
|
if (maybe_curr_char == null) {
|
||||||
|
std.log.err("Error on fromPascalCase processing name '{s}'", .{name});
|
||||||
|
}
|
||||||
|
curr_char = maybe_curr_char.?;
|
||||||
|
maybe_curr_char = (try isAscii(utf8_name.nextCodepoint()));
|
||||||
|
if (maybe_curr_char == null) {
|
||||||
|
// We have reached the end of the string (e.g. "Resource Explorer 2")
|
||||||
|
// We need to do this check before we setNext, so that we don't
|
||||||
|
// end up duplicating the last character
|
||||||
|
break;
|
||||||
|
// std.log.err("Error on fromPascalCase processing name '{s}', curr_char = '{}'", .{ name, curr_char });
|
||||||
|
}
|
||||||
target_inx = setNext(lowercase(curr_char), rc, target_inx);
|
target_inx = setNext(lowercase(curr_char), rc, target_inx);
|
||||||
prev_char = curr_char;
|
prev_char = curr_char;
|
||||||
curr_char = (try isAscii(utf8_name.nextCodepoint())).?;
|
curr_char = maybe_curr_char.?;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (between(curr_char, 'A', 'Z')) {
|
if (between(curr_char, 'A', 'Z')) {
|
||||||
|
@ -60,6 +72,7 @@ pub fn fromPascalCase(allocator: std.mem.Allocator, name: []const u8) ![]u8 {
|
||||||
target_inx = setNext(lowercase(curr_char), rc, target_inx);
|
target_inx = setNext(lowercase(curr_char), rc, target_inx);
|
||||||
|
|
||||||
rc[target_inx] = 0;
|
rc[target_inx] = 0;
|
||||||
|
_ = allocator.resize(rc, target_inx);
|
||||||
return rc[0..target_inx];
|
return rc[0..target_inx];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,3 +147,11 @@ test "IoT 1Click Devices Service" {
|
||||||
// turn into. Should it be iot_1click_... or iot_1_click...?
|
// turn into. Should it be iot_1click_... or iot_1_click...?
|
||||||
try expectEqualStrings("iot_1_click_devices_service", snake_case);
|
try expectEqualStrings("iot_1_click_devices_service", snake_case);
|
||||||
}
|
}
|
||||||
|
test "Resource Explorer 2" {
|
||||||
|
const allocator = std.testing.allocator;
|
||||||
|
const snake_case = try fromPascalCase(allocator, "Resource Explorer 2");
|
||||||
|
defer allocator.free(snake_case);
|
||||||
|
// NOTE: There is some debate amoung humans about what this should
|
||||||
|
// turn into. Should it be iot_1click_... or iot_1_click...?
|
||||||
|
try expectEqualStrings("resource_explorer_2", snake_case);
|
||||||
|
}
|
||||||
|
|
353
src/aws.zig
353
src/aws.zig
|
@ -31,7 +31,7 @@ pub const services = servicemodel.services;
|
||||||
pub const Services = servicemodel.Services;
|
pub const Services = servicemodel.Services;
|
||||||
|
|
||||||
pub const ClientOptions = struct {
|
pub const ClientOptions = struct {
|
||||||
proxy: ?std.http.Client.HttpProxy = null,
|
proxy: ?std.http.Client.Proxy = null,
|
||||||
};
|
};
|
||||||
pub const Client = struct {
|
pub const Client = struct {
|
||||||
allocator: std.mem.Allocator,
|
allocator: std.mem.Allocator,
|
||||||
|
@ -226,7 +226,7 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
defer buffer.deinit();
|
defer buffer.deinit();
|
||||||
const writer = buffer.writer();
|
const writer = buffer.writer();
|
||||||
try url.encode(options.client.allocator, request, writer, .{
|
try url.encode(options.client.allocator, request, writer, .{
|
||||||
.field_name_transformer = &queryFieldTransformer,
|
.field_name_transformer = queryFieldTransformer,
|
||||||
});
|
});
|
||||||
const continuation = if (buffer.items.len > 0) "&" else "";
|
const continuation = if (buffer.items.len > 0) "&" else "";
|
||||||
|
|
||||||
|
@ -457,6 +457,30 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn findResult(element: *xml_shaper.Element, options: xml_shaper.ParseOptions) *xml_shaper.Element {
|
||||||
|
_ = options;
|
||||||
|
// We're looking for a very specific pattern here. We want only two direct
|
||||||
|
// children. The first one must end with "Result", and the second should
|
||||||
|
// be our ResponseMetadata node
|
||||||
|
var children = element.elements();
|
||||||
|
var found_metadata = false;
|
||||||
|
var result_child: ?*xml_shaper.Element = null;
|
||||||
|
var inx: usize = 0;
|
||||||
|
while (children.next()) |child| : (inx += 1) {
|
||||||
|
if (std.mem.eql(u8, child.tag, "ResponseMetadata")) {
|
||||||
|
found_metadata = true;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (std.mem.endsWith(u8, child.tag, "Result")) {
|
||||||
|
result_child = child;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (inx > 1) return element;
|
||||||
|
return element; // It should only be those two
|
||||||
|
}
|
||||||
|
return result_child orelse element;
|
||||||
|
}
|
||||||
|
|
||||||
fn xmlReturn(request: awshttp.HttpRequest, options: Options, result: awshttp.HttpResult) !FullResponseType {
|
fn xmlReturn(request: awshttp.HttpRequest, options: Options, result: awshttp.HttpResult) !FullResponseType {
|
||||||
// Server shape be all like:
|
// Server shape be all like:
|
||||||
//
|
//
|
||||||
|
@ -481,7 +505,7 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// Big thing is that requestid, which we'll need to fetch "manually"
|
// Big thing is that requestid, which we'll need to fetch "manually"
|
||||||
const xml_options = xml_shaper.ParseOptions{ .allocator = options.client.allocator };
|
const xml_options = xml_shaper.ParseOptions{ .allocator = options.client.allocator, .elementToParse = findResult };
|
||||||
var body: []const u8 = result.body;
|
var body: []const u8 = result.body;
|
||||||
var free_body = false;
|
var free_body = false;
|
||||||
if (result.body.len < 20) {
|
if (result.body.len < 20) {
|
||||||
|
@ -710,7 +734,7 @@ fn headersFor(allocator: std.mem.Allocator, request: anytype) ![]awshttp.Header
|
||||||
return headers.toOwnedSlice();
|
return headers.toOwnedSlice();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn freeHeadersFor(allocator: std.mem.Allocator, request: anytype, headers: []awshttp.Header) void {
|
fn freeHeadersFor(allocator: std.mem.Allocator, request: anytype, headers: []const awshttp.Header) void {
|
||||||
if (!@hasDecl(@TypeOf(request), "http_header")) return;
|
if (!@hasDecl(@TypeOf(request), "http_header")) return;
|
||||||
const http_header = @TypeOf(request).http_header;
|
const http_header = @TypeOf(request).http_header;
|
||||||
const fields = std.meta.fields(@TypeOf(http_header));
|
const fields = std.meta.fields(@TypeOf(http_header));
|
||||||
|
@ -737,7 +761,7 @@ fn firstJsonKey(data: []const u8) []const u8 {
|
||||||
log.debug("First json key: {s}", .{key});
|
log.debug("First json key: {s}", .{key});
|
||||||
return key;
|
return key;
|
||||||
}
|
}
|
||||||
fn isJsonResponse(headers: []awshttp.Header) !bool {
|
fn isJsonResponse(headers: []const awshttp.Header) !bool {
|
||||||
// EC2 ignores our accept type, but technically query protocol only
|
// EC2 ignores our accept type, but technically query protocol only
|
||||||
// returns XML as well. So, we'll ignore the protocol here and just
|
// returns XML as well. So, we'll ignore the protocol here and just
|
||||||
// look at the return type
|
// look at the return type
|
||||||
|
@ -895,8 +919,7 @@ fn safeFree(allocator: std.mem.Allocator, obj: anytype) void {
|
||||||
else => {},
|
else => {},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn queryFieldTransformer(allocator: std.mem.Allocator, field_name: []const u8, options: url.EncodingOptions) anyerror![]const u8 {
|
fn queryFieldTransformer(allocator: std.mem.Allocator, field_name: []const u8) anyerror![]const u8 {
|
||||||
_ = options;
|
|
||||||
return try case.snakeToPascal(allocator, field_name);
|
return try case.snakeToPascal(allocator, field_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1339,16 +1362,17 @@ test {
|
||||||
}
|
}
|
||||||
const TestOptions = struct {
|
const TestOptions = struct {
|
||||||
allocator: std.mem.Allocator,
|
allocator: std.mem.Allocator,
|
||||||
|
arena: ?*std.heap.ArenaAllocator = null,
|
||||||
server_port: ?u16 = null,
|
server_port: ?u16 = null,
|
||||||
server_remaining_requests: usize = 1,
|
server_remaining_requests: usize = 1,
|
||||||
server_response: []const u8 = "unset",
|
server_response: []const u8 = "unset",
|
||||||
server_response_status: std.http.Status = .ok,
|
server_response_status: std.http.Status = .ok,
|
||||||
server_response_headers: [][2][]const u8 = &[_][2][]const u8{},
|
server_response_headers: []const std.http.Header = &.{},
|
||||||
server_response_transfer_encoding: ?std.http.TransferEncoding = null,
|
server_response_transfer_encoding: ?std.http.TransferEncoding = null,
|
||||||
request_body: []u8 = "",
|
request_body: []u8 = "",
|
||||||
request_method: std.http.Method = undefined,
|
request_method: std.http.Method = undefined,
|
||||||
request_target: []const u8 = undefined,
|
request_target: []const u8 = undefined,
|
||||||
request_headers: *std.http.Headers = undefined,
|
request_headers: []std.http.Header = undefined,
|
||||||
test_server_runtime_uri: ?[]u8 = null,
|
test_server_runtime_uri: ?[]u8 = null,
|
||||||
server_ready: bool = false,
|
server_ready: bool = false,
|
||||||
requests_processed: usize = 0,
|
requests_processed: usize = 0,
|
||||||
|
@ -1356,7 +1380,7 @@ const TestOptions = struct {
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
|
|
||||||
fn expectHeader(self: *Self, name: []const u8, value: []const u8) !void {
|
fn expectHeader(self: *Self, name: []const u8, value: []const u8) !void {
|
||||||
for (self.request_headers.list.items) |h|
|
for (self.request_headers) |h|
|
||||||
if (std.ascii.eqlIgnoreCase(name, h.name) and
|
if (std.ascii.eqlIgnoreCase(name, h.name) and
|
||||||
std.mem.eql(u8, value, h.value)) return;
|
std.mem.eql(u8, value, h.value)) return;
|
||||||
return error.HeaderOrValueNotFound;
|
return error.HeaderOrValueNotFound;
|
||||||
|
@ -1367,17 +1391,6 @@ const TestOptions = struct {
|
||||||
while (!self.server_ready)
|
while (!self.server_ready)
|
||||||
std.time.sleep(100);
|
std.time.sleep(100);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn deinit(self: Self) void {
|
|
||||||
if (self.requests_processed > 0) {
|
|
||||||
self.allocator.free(self.request_body);
|
|
||||||
self.allocator.free(self.request_target);
|
|
||||||
self.request_headers.deinit();
|
|
||||||
self.allocator.destroy(self.request_headers);
|
|
||||||
}
|
|
||||||
if (self.test_server_runtime_uri) |_|
|
|
||||||
self.allocator.free(self.test_server_runtime_uri.?);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// This starts a test server. We're not testing the server itself,
|
/// This starts a test server. We're not testing the server itself,
|
||||||
|
@ -1385,16 +1398,19 @@ const TestOptions = struct {
|
||||||
/// whole thing so we can just deallocate everything at once at the end,
|
/// whole thing so we can just deallocate everything at once at the end,
|
||||||
/// leaks be damned
|
/// leaks be damned
|
||||||
fn threadMain(options: *TestOptions) !void {
|
fn threadMain(options: *TestOptions) !void {
|
||||||
var server = std.http.Server.init(options.allocator, .{ .reuse_address = true });
|
// https://github.com/ziglang/zig/blob/d2be725e4b14c33dbd39054e33d926913eee3cd4/lib/compiler/std-docs.zig#L22-L54
|
||||||
// defer server.deinit();
|
|
||||||
|
options.arena = try options.allocator.create(std.heap.ArenaAllocator);
|
||||||
|
options.arena.?.* = std.heap.ArenaAllocator.init(options.allocator);
|
||||||
|
const allocator = options.arena.?.allocator();
|
||||||
|
options.allocator = allocator;
|
||||||
|
|
||||||
const address = try std.net.Address.parseIp("127.0.0.1", 0);
|
const address = try std.net.Address.parseIp("127.0.0.1", 0);
|
||||||
try server.listen(address);
|
var http_server = try address.listen(.{});
|
||||||
options.server_port = server.socket.listen_address.in.getPort();
|
options.server_port = http_server.listen_address.in.getPort();
|
||||||
|
// TODO: remove
|
||||||
options.test_server_runtime_uri = try std.fmt.allocPrint(options.allocator, "http://127.0.0.1:{d}", .{options.server_port.?});
|
options.test_server_runtime_uri = try std.fmt.allocPrint(options.allocator, "http://127.0.0.1:{d}", .{options.server_port.?});
|
||||||
log.debug("server listening at {s}", .{options.test_server_runtime_uri.?});
|
log.debug("server listening at {s}", .{options.test_server_runtime_uri.?});
|
||||||
defer server.deinit();
|
|
||||||
log.info("starting server thread, tid {d}", .{std.Thread.getCurrentId()});
|
log.info("starting server thread, tid {d}", .{std.Thread.getCurrentId()});
|
||||||
// var arena = std.heap.ArenaAllocator.init(options.allocator);
|
// var arena = std.heap.ArenaAllocator.init(options.allocator);
|
||||||
// defer arena.deinit();
|
// defer arena.deinit();
|
||||||
|
@ -1403,7 +1419,7 @@ fn threadMain(options: *TestOptions) !void {
|
||||||
// when it's time to shut down
|
// when it's time to shut down
|
||||||
while (options.server_remaining_requests > 0) {
|
while (options.server_remaining_requests > 0) {
|
||||||
options.server_remaining_requests -= 1;
|
options.server_remaining_requests -= 1;
|
||||||
processRequest(options, &server) catch |e| {
|
processRequest(options, &http_server) catch |e| {
|
||||||
log.err("Unexpected error processing request: {any}", .{e});
|
log.err("Unexpected error processing request: {any}", .{e});
|
||||||
if (@errorReturnTrace()) |trace| {
|
if (@errorReturnTrace()) |trace| {
|
||||||
std.debug.dumpStackTrace(trace.*);
|
std.debug.dumpStackTrace(trace.*);
|
||||||
|
@ -1412,76 +1428,63 @@ fn threadMain(options: *TestOptions) !void {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn processRequest(options: *TestOptions, server: *std.http.Server) !void {
|
fn processRequest(options: *TestOptions, net_server: *std.net.Server) !void {
|
||||||
options.server_ready = true;
|
options.server_ready = true;
|
||||||
errdefer options.server_ready = false;
|
errdefer options.server_ready = false;
|
||||||
log.debug(
|
log.debug(
|
||||||
"tid {d} (server): server waiting to accept. requests remaining: {d}",
|
"tid {d} (server): server waiting to accept. requests remaining: {d}",
|
||||||
.{ std.Thread.getCurrentId(), options.server_remaining_requests + 1 },
|
.{ std.Thread.getCurrentId(), options.server_remaining_requests + 1 },
|
||||||
);
|
);
|
||||||
var res = try server.accept(.{ .allocator = options.allocator });
|
var connection = try net_server.accept();
|
||||||
options.server_ready = false;
|
defer connection.stream.close();
|
||||||
defer res.deinit();
|
var read_buffer: [1024 * 16]u8 = undefined;
|
||||||
defer if (res.headers.owned and res.headers.list.items.len > 0) res.headers.deinit();
|
var http_server = std.http.Server.init(connection, &read_buffer);
|
||||||
defer _ = res.reset();
|
while (http_server.state == .ready) {
|
||||||
try res.wait(); // wait for client to send a complete request head
|
var request = http_server.receiveHead() catch |err| switch (err) {
|
||||||
|
error.HttpConnectionClosing => return,
|
||||||
|
else => {
|
||||||
|
std.log.err("closing http connection: {s}", .{@errorName(err)});
|
||||||
|
std.log.debug("Error occurred from this request: \n{s}", .{read_buffer[0..http_server.read_buffer_len]});
|
||||||
|
return;
|
||||||
|
},
|
||||||
|
};
|
||||||
|
try serveRequest(options, &request);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const errstr = "Internal Server Error\n";
|
fn serveRequest(options: *TestOptions, request: *std.http.Server.Request) !void {
|
||||||
var errbuf: [errstr.len]u8 = undefined;
|
options.server_ready = false;
|
||||||
@memcpy(&errbuf, errstr);
|
|
||||||
var response_bytes: []const u8 = errbuf[0..];
|
|
||||||
|
|
||||||
options.requests_processed += 1;
|
options.requests_processed += 1;
|
||||||
if (res.request.content_length) |l|
|
options.request_body = try (try request.reader()).readAllAlloc(options.allocator, std.math.maxInt(usize));
|
||||||
options.request_body = try res.reader().readAllAlloc(options.allocator, @as(usize, @intCast(l)))
|
options.request_method = request.head.method;
|
||||||
else
|
options.request_target = try options.allocator.dupe(u8, request.head.target);
|
||||||
options.request_body = try options.allocator.dupe(u8, "");
|
var req_headers = std.ArrayList(std.http.Header).init(options.allocator);
|
||||||
options.request_method = res.request.method;
|
defer req_headers.deinit();
|
||||||
options.request_target = try options.allocator.dupe(u8, res.request.target);
|
var it = request.iterateHeaders();
|
||||||
options.request_headers = try options.allocator.create(std.http.Headers);
|
while (it.next()) |f| {
|
||||||
options.request_headers.allocator = options.allocator;
|
const h = try options.allocator.create(std.http.Header);
|
||||||
options.request_headers.list = .{};
|
h.* = .{ .name = try options.allocator.dupe(u8, f.name), .value = try options.allocator.dupe(u8, f.value) };
|
||||||
options.request_headers.index = .{};
|
try req_headers.append(h.*);
|
||||||
options.request_headers.owned = true;
|
}
|
||||||
for (res.request.headers.list.items) |f|
|
options.request_headers = try req_headers.toOwnedSlice();
|
||||||
try options.request_headers.append(f.name, f.value);
|
|
||||||
log.debug(
|
log.debug(
|
||||||
"tid {d} (server): {d} bytes read from request",
|
"tid {d} (server): {d} bytes read from request",
|
||||||
.{ std.Thread.getCurrentId(), options.request_body.len },
|
.{ std.Thread.getCurrentId(), options.request_body.len },
|
||||||
);
|
);
|
||||||
|
|
||||||
// try response.headers.append("content-type", "text/plain");
|
// try response.headers.append("content-type", "text/plain");
|
||||||
response_bytes = serve(options, &res) catch |e| brk: {
|
try request.respond(options.server_response, .{
|
||||||
res.status = .internal_server_error;
|
.status = options.server_response_status,
|
||||||
// TODO: more about this particular request
|
.extra_headers = options.server_response_headers,
|
||||||
log.err("Unexpected error from executor processing request: {any}", .{e});
|
});
|
||||||
if (@errorReturnTrace()) |trace| {
|
|
||||||
std.debug.dumpStackTrace(trace.*);
|
|
||||||
}
|
|
||||||
break :brk "Unexpected error generating request to lambda";
|
|
||||||
};
|
|
||||||
if (options.server_response_transfer_encoding == null)
|
|
||||||
res.transfer_encoding = .{ .content_length = response_bytes.len }
|
|
||||||
else
|
|
||||||
res.transfer_encoding = .chunked;
|
|
||||||
|
|
||||||
try res.do();
|
|
||||||
_ = try res.writer().writeAll(response_bytes);
|
|
||||||
try res.finish();
|
|
||||||
log.debug(
|
log.debug(
|
||||||
"tid {d} (server): sent response",
|
"tid {d} (server): sent response",
|
||||||
.{std.Thread.getCurrentId()},
|
.{std.Thread.getCurrentId()},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn serve(options: *TestOptions, res: *std.http.Server.Response) ![]const u8 {
|
|
||||||
res.status = options.server_response_status;
|
|
||||||
for (options.server_response_headers) |h|
|
|
||||||
try res.headers.append(h[0], h[1]);
|
|
||||||
// try res.headers.append("content-length", try std.fmt.allocPrint(allocator, "{d}", .{server_response.len}));
|
|
||||||
return options.server_response;
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
// These will replicate the tests that were in src/main.zig
|
// These will replicate the tests that were in src/main.zig
|
||||||
// The server_response and server_response_headers come from logs of
|
// The server_response and server_response_headers come from logs of
|
||||||
|
@ -1503,10 +1506,10 @@ const TestSetup = struct {
|
||||||
const signing_time =
|
const signing_time =
|
||||||
date.dateTimeToTimestamp(date.parseIso8601ToDateTime("20230908T170252Z") catch @compileError("Cannot parse date")) catch @compileError("Cannot parse date");
|
date.dateTimeToTimestamp(date.parseIso8601ToDateTime("20230908T170252Z") catch @compileError("Cannot parse date")) catch @compileError("Cannot parse date");
|
||||||
|
|
||||||
fn init(allocator: std.mem.Allocator, options: TestOptions) Self {
|
fn init(options: TestOptions) Self {
|
||||||
return .{
|
return .{
|
||||||
.allocator = allocator,
|
|
||||||
.request_options = options,
|
.request_options = options,
|
||||||
|
.allocator = options.allocator,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1518,7 +1521,10 @@ const TestSetup = struct {
|
||||||
);
|
);
|
||||||
self.started = true;
|
self.started = true;
|
||||||
try self.request_options.waitForReady();
|
try self.request_options.waitForReady();
|
||||||
|
// Not sure why we're getting sprayed here, but we have an arena allocator, and this
|
||||||
|
// is testing, so yolo
|
||||||
awshttp.endpoint_override = self.request_options.test_server_runtime_uri;
|
awshttp.endpoint_override = self.request_options.test_server_runtime_uri;
|
||||||
|
log.debug("endpoint override set to {?s}", .{awshttp.endpoint_override});
|
||||||
self.creds = aws_auth.Credentials.init(
|
self.creds = aws_auth.Credentials.init(
|
||||||
self.allocator,
|
self.allocator,
|
||||||
try self.allocator.dupe(u8, "ACCESS"),
|
try self.allocator.dupe(u8, "ACCESS"),
|
||||||
|
@ -1539,9 +1545,11 @@ const TestSetup = struct {
|
||||||
self.server_thread.join();
|
self.server_thread.join();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn deinit(self: Self) void {
|
fn deinit(self: *Self) void {
|
||||||
self.request_options.deinit();
|
if (self.request_options.arena) |a| {
|
||||||
|
a.deinit();
|
||||||
|
self.allocator.destroy(a);
|
||||||
|
}
|
||||||
if (!self.started) return;
|
if (!self.started) return;
|
||||||
awshttp.endpoint_override = null;
|
awshttp.endpoint_override = null;
|
||||||
// creds.deinit(); Creds will get deinited in the course of the call. We don't want to do it twice
|
// creds.deinit(); Creds will get deinited in the course of the call. We don't want to do it twice
|
||||||
|
@ -1552,15 +1560,15 @@ const TestSetup = struct {
|
||||||
|
|
||||||
test "query_no_input: sts getCallerIdentity comptime" {
|
test "query_no_input: sts getCallerIdentity comptime" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(allocator, .{
|
var test_harness = TestSetup.init(.{
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.server_response =
|
.server_response =
|
||||||
\\{"GetCallerIdentityResponse":{"GetCallerIdentityResult":{"Account":"123456789012","Arn":"arn:aws:iam::123456789012:user/admin","UserId":"AIDAYAM4POHXHRVANDQBQ"},"ResponseMetadata":{"RequestId":"8f0d54da-1230-40f7-b4ac-95015c4b84cd"}}}
|
\\{"GetCallerIdentityResponse":{"GetCallerIdentityResult":{"Account":"123456789012","Arn":"arn:aws:iam::123456789012:user/admin","UserId":"AIDAYAM4POHXHRVANDQBQ"},"ResponseMetadata":{"RequestId":"8f0d54da-1230-40f7-b4ac-95015c4b84cd"}}}
|
||||||
,
|
,
|
||||||
.server_response_headers = @constCast(&[_][2][]const u8{
|
.server_response_headers = &.{
|
||||||
.{ "Content-Type", "application/json" },
|
.{ .name = "Content-Type", .value = "application/json" },
|
||||||
.{ "x-amzn-RequestId", "8f0d54da-1230-40f7-b4ac-95015c4b84cd" },
|
.{ .name = "x-amzn-RequestId", .value = "8f0d54da-1230-40f7-b4ac-95015c4b84cd" },
|
||||||
}),
|
},
|
||||||
});
|
});
|
||||||
defer test_harness.deinit();
|
defer test_harness.deinit();
|
||||||
const options = try test_harness.start();
|
const options = try test_harness.start();
|
||||||
|
@ -1584,23 +1592,31 @@ test "query_no_input: sts getCallerIdentity comptime" {
|
||||||
try std.testing.expectEqualStrings("123456789012", call.response.account.?);
|
try std.testing.expectEqualStrings("123456789012", call.response.account.?);
|
||||||
try std.testing.expectEqualStrings("8f0d54da-1230-40f7-b4ac-95015c4b84cd", call.response_metadata.request_id);
|
try std.testing.expectEqualStrings("8f0d54da-1230-40f7-b4ac-95015c4b84cd", call.response_metadata.request_id);
|
||||||
}
|
}
|
||||||
test "query_with_input: sqs listQueues runtime" {
|
test "query_with_input: sts getAccessKeyInfo runtime" {
|
||||||
|
// sqs switched from query to json in aws sdk for go v2 commit f5a08768ef820ff5efd62a49ba50c61c9ca5dbcb
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(allocator, .{
|
var test_harness = TestSetup.init(.{
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.server_response =
|
.server_response =
|
||||||
\\{"ListQueuesResponse":{"ListQueuesResult":{"NextExclusiveStartQueueName":null,"NextToken":null,"queueUrls":null},"ResponseMetadata":{"RequestId":"a85e390b-b866-590e-8cae-645f2bbe59c5"}}}
|
\\<GetAccessKeyInfoResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
|
||||||
|
\\ <GetAccessKeyInfoResult>
|
||||||
|
\\ <Account>123456789012</Account>
|
||||||
|
\\ </GetAccessKeyInfoResult>
|
||||||
|
\\ <ResponseMetadata>
|
||||||
|
\\ <RequestId>ec85bf29-1ef0-459a-930e-6446dd14a286</RequestId>
|
||||||
|
\\ </ResponseMetadata>
|
||||||
|
\\</GetAccessKeyInfoResponse>
|
||||||
,
|
,
|
||||||
.server_response_headers = @constCast(&[_][2][]const u8{
|
.server_response_headers = &.{
|
||||||
.{ "Content-Type", "application/json" },
|
.{ .name = "Content-Type", .value = "text/xml" },
|
||||||
.{ "x-amzn-RequestId", "a85e390b-b866-590e-8cae-645f2bbe59c5" },
|
.{ .name = "x-amzn-RequestId", .value = "ec85bf29-1ef0-459a-930e-6446dd14a286" },
|
||||||
}),
|
},
|
||||||
});
|
});
|
||||||
defer test_harness.deinit();
|
defer test_harness.deinit();
|
||||||
const options = try test_harness.start();
|
const options = try test_harness.start();
|
||||||
const sqs = (Services(.{.sqs}){}).sqs;
|
const sts = (Services(.{.sts}){}).sts;
|
||||||
const call = try test_harness.client.call(sqs.list_queues.Request{
|
const call = try test_harness.client.call(sts.get_access_key_info.Request{
|
||||||
.queue_name_prefix = "s",
|
.access_key_id = "ASIAYAM4POHXJNKTYFUN",
|
||||||
}, options);
|
}, options);
|
||||||
defer call.deinit();
|
defer call.deinit();
|
||||||
test_harness.stop();
|
test_harness.stop();
|
||||||
|
@ -1608,24 +1624,24 @@ test "query_with_input: sqs listQueues runtime" {
|
||||||
try std.testing.expectEqual(std.http.Method.POST, test_harness.request_options.request_method);
|
try std.testing.expectEqual(std.http.Method.POST, test_harness.request_options.request_method);
|
||||||
try std.testing.expectEqualStrings("/", test_harness.request_options.request_target);
|
try std.testing.expectEqualStrings("/", test_harness.request_options.request_target);
|
||||||
try std.testing.expectEqualStrings(
|
try std.testing.expectEqualStrings(
|
||||||
\\Action=ListQueues&Version=2012-11-05&QueueNamePrefix=s
|
\\Action=GetAccessKeyInfo&Version=2011-06-15&AccessKeyId=ASIAYAM4POHXJNKTYFUN
|
||||||
, test_harness.request_options.request_body);
|
, test_harness.request_options.request_body);
|
||||||
// Response expectations
|
// Response expectations
|
||||||
// TODO: We can get a lot better with this under test
|
try std.testing.expect(call.response.account != null);
|
||||||
try std.testing.expect(call.response.queue_urls == null);
|
try std.testing.expectEqualStrings("123456789012", call.response.account.?);
|
||||||
try std.testing.expectEqualStrings("a85e390b-b866-590e-8cae-645f2bbe59c5", call.response_metadata.request_id);
|
try std.testing.expectEqualStrings("ec85bf29-1ef0-459a-930e-6446dd14a286", call.response_metadata.request_id);
|
||||||
}
|
}
|
||||||
test "json_1_0_query_with_input: dynamodb listTables runtime" {
|
test "json_1_0_query_with_input: dynamodb listTables runtime" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(allocator, .{
|
var test_harness = TestSetup.init(.{
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.server_response =
|
.server_response =
|
||||||
\\{"LastEvaluatedTableName":"Customer","TableNames":["Customer"]}
|
\\{"LastEvaluatedTableName":"Customer","TableNames":["Customer"]}
|
||||||
,
|
,
|
||||||
.server_response_headers = @constCast(&[_][2][]const u8{
|
.server_response_headers = &.{
|
||||||
.{ "Content-Type", "application/json" },
|
.{ .name = "Content-Type", .value = "application/json" },
|
||||||
.{ "x-amzn-RequestId", "QBI72OUIN8U9M9AG6PCSADJL4JVV4KQNSO5AEMVJF66Q9ASUAAJG" },
|
.{ .name = "x-amzn-RequestId", .value = "QBI72OUIN8U9M9AG6PCSADJL4JVV4KQNSO5AEMVJF66Q9ASUAAJG" },
|
||||||
}),
|
},
|
||||||
});
|
});
|
||||||
defer test_harness.deinit();
|
defer test_harness.deinit();
|
||||||
const options = try test_harness.start();
|
const options = try test_harness.start();
|
||||||
|
@ -1653,15 +1669,15 @@ test "json_1_0_query_with_input: dynamodb listTables runtime" {
|
||||||
|
|
||||||
test "json_1_0_query_no_input: dynamodb listTables runtime" {
|
test "json_1_0_query_no_input: dynamodb listTables runtime" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(allocator, .{
|
var test_harness = TestSetup.init(.{
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.server_response =
|
.server_response =
|
||||||
\\{"AccountMaxReadCapacityUnits":80000,"AccountMaxWriteCapacityUnits":80000,"TableMaxReadCapacityUnits":40000,"TableMaxWriteCapacityUnits":40000}
|
\\{"AccountMaxReadCapacityUnits":80000,"AccountMaxWriteCapacityUnits":80000,"TableMaxReadCapacityUnits":40000,"TableMaxWriteCapacityUnits":40000}
|
||||||
,
|
,
|
||||||
.server_response_headers = @constCast(&[_][2][]const u8{
|
.server_response_headers = &.{
|
||||||
.{ "Content-Type", "application/json" },
|
.{ .name = "Content-Type", .value = "application/json" },
|
||||||
.{ "x-amzn-RequestId", "QBI72OUIN8U9M9AG6PCSADJL4JVV4KQNSO5AEMVJF66Q9ASUAAJG" },
|
.{ .name = "x-amzn-RequestId", .value = "QBI72OUIN8U9M9AG6PCSADJL4JVV4KQNSO5AEMVJF66Q9ASUAAJG" },
|
||||||
}),
|
},
|
||||||
});
|
});
|
||||||
defer test_harness.deinit();
|
defer test_harness.deinit();
|
||||||
const options = try test_harness.start();
|
const options = try test_harness.start();
|
||||||
|
@ -1682,15 +1698,15 @@ test "json_1_0_query_no_input: dynamodb listTables runtime" {
|
||||||
}
|
}
|
||||||
test "json_1_1_query_with_input: ecs listClusters runtime" {
|
test "json_1_1_query_with_input: ecs listClusters runtime" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(allocator, .{
|
var test_harness = TestSetup.init(.{
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.server_response =
|
.server_response =
|
||||||
\\{"clusterArns":["arn:aws:ecs:us-west-2:550620852718:cluster/web-applicationehjaf-cluster"],"nextToken":"czE0Og=="}
|
\\{"clusterArns":["arn:aws:ecs:us-west-2:550620852718:cluster/web-applicationehjaf-cluster"],"nextToken":"czE0Og=="}
|
||||||
,
|
,
|
||||||
.server_response_headers = @constCast(&[_][2][]const u8{
|
.server_response_headers = &.{
|
||||||
.{ "Content-Type", "application/json" },
|
.{ .name = "Content-Type", .value = "application/json" },
|
||||||
.{ "x-amzn-RequestId", "b2420066-ff67-4237-b782-721c4df60744" },
|
.{ .name = "x-amzn-RequestId", .value = "b2420066-ff67-4237-b782-721c4df60744" },
|
||||||
}),
|
},
|
||||||
});
|
});
|
||||||
defer test_harness.deinit();
|
defer test_harness.deinit();
|
||||||
const options = try test_harness.start();
|
const options = try test_harness.start();
|
||||||
|
@ -1716,16 +1732,19 @@ test "json_1_1_query_with_input: ecs listClusters runtime" {
|
||||||
try std.testing.expectEqualStrings("arn:aws:ecs:us-west-2:550620852718:cluster/web-applicationehjaf-cluster", call.response.cluster_arns.?[0]);
|
try std.testing.expectEqualStrings("arn:aws:ecs:us-west-2:550620852718:cluster/web-applicationehjaf-cluster", call.response.cluster_arns.?[0]);
|
||||||
}
|
}
|
||||||
test "json_1_1_query_no_input: ecs listClusters runtime" {
|
test "json_1_1_query_no_input: ecs listClusters runtime" {
|
||||||
|
// const old = std.testing.log_level;
|
||||||
|
// defer std.testing.log_level = old;
|
||||||
|
// std.testing.log_level = .debug;
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(allocator, .{
|
var test_harness = TestSetup.init(.{
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.server_response =
|
.server_response =
|
||||||
\\{"clusterArns":["arn:aws:ecs:us-west-2:550620852718:cluster/web-applicationehjaf-cluster"],"nextToken":"czE0Og=="}
|
\\{"clusterArns":["arn:aws:ecs:us-west-2:550620852718:cluster/web-applicationehjaf-cluster"],"nextToken":"czE0Og=="}
|
||||||
,
|
,
|
||||||
.server_response_headers = @constCast(&[_][2][]const u8{
|
.server_response_headers = &.{
|
||||||
.{ "Content-Type", "application/json" },
|
.{ .name = "Content-Type", .value = "application/json" },
|
||||||
.{ "x-amzn-RequestId", "e65322b2-0065-45f2-ba37-f822bb5ce395" },
|
.{ .name = "x-amzn-RequestId", .value = "e65322b2-0065-45f2-ba37-f822bb5ce395" },
|
||||||
}),
|
},
|
||||||
});
|
});
|
||||||
defer test_harness.deinit();
|
defer test_harness.deinit();
|
||||||
const options = try test_harness.start();
|
const options = try test_harness.start();
|
||||||
|
@ -1750,15 +1769,15 @@ test "json_1_1_query_no_input: ecs listClusters runtime" {
|
||||||
}
|
}
|
||||||
test "rest_json_1_query_with_input: lambda listFunctions runtime" {
|
test "rest_json_1_query_with_input: lambda listFunctions runtime" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(allocator, .{
|
var test_harness = TestSetup.init(.{
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.server_response =
|
.server_response =
|
||||||
\\{"Functions":[{"Description":"AWS CDK resource provider framework - onEvent (DevelopmentFrontendStack-g650u/com.amazonaws.cdk.custom-resources.amplify-asset-deployment-provider/amplify-asset-deployment-handler-provider)","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"0c62fc74-a692-403d-9206-5fcbad406424","LastModified":"2023-03-01T18:13:15.704+0000","FileSystemConfigs":null,"FunctionName":"DevelopmentFrontendStack--amplifyassetdeploymentha-aZqB9IbZLIKU","Runtime":"nodejs14.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentha-aZqB9IbZLIKU","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":900,"Handler":"framework.onEvent","CodeSha256":"m4tt+M0l3p8bZvxIDj83dwGrwRW6atCfS/q8AiXCD3o=","Role":"arn:aws:iam::550620852718:role/DevelopmentFrontendStack-amplifyassetdeploymentha-1782JF7WAPXZ3","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":4307,"State":null,"StateReason":null,"Environment":{"Variables":{"USER_ON_EVENT_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymenton-X9iZJSCSPYDH","WAITER_STATE_MACHINE_ARN":"arn:aws:states:us-west-2:550620852718:stateMachine:amplifyassetdeploymenthandlerproviderwaiterstatemachineB3C2FCBE-Ltggp5wBcHWO","USER_IS_COMPLETE_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentis-jaHopLrSSARV"},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]}],"NextMarker":"lslTXFcbLQKkb0vP9Kgh5hUL7C3VghELNGbWgZfxrRCk3eiDRMkct7D8EmptWfHSXssPdS7Bo66iQPTMpVOHZgANewpgGgFGGr4pVjd6VgLUO6qPe2EMAuNDBjUTxm8z6N28yhlUwEmKbrAV/m0k5qVzizwoxFwvyruMbuMx9kADFACSslcabxXl3/jDI4rfFnIsUVdzTLBgPF1hzwrE1f3lcdkBvUp+QgY+Pn3w5QuJmwsp/di8COzFemY89GgOHbLNqsrBsgR/ee2eXoJp0ZkKM4EcBK3HokqBzefLfgR02PnfNOdXwqTlhkSPW0TKiKGIYu3Bw7lSNrLd+q3+wEr7ZakqOQf0BVo3FMRhMHlVYgwUJzwi3ActyH2q6fuqGG1sS0B8Oa/prUpe5fmp3VaA3WpazioeHtrKF78JwCi6/nfQsrj/8ZtXGQOxlwEgvT1CIUaF+CdHY3biezrK0tRZNpkCtHnkPtF9lq2U7+UiKXSW9yzxT8P2b0M/Qh4IVdnw4rncQK/doYriAeOdrs1wjMEJnHWq9lAaEyipoxYcVr/z5+yaC6Gwxdg45p9X1vIAaYMf6IZxyFuua43SYi0Ls+IBk4VvpR2io7T0dCxHAr3WAo3D2dm0y8OsbM59"}
|
\\{"Functions":[{"Description":"AWS CDK resource provider framework - onEvent (DevelopmentFrontendStack-g650u/com.amazonaws.cdk.custom-resources.amplify-asset-deployment-provider/amplify-asset-deployment-handler-provider)","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"0c62fc74-a692-403d-9206-5fcbad406424","LastModified":"2023-03-01T18:13:15.704+0000","FileSystemConfigs":null,"FunctionName":"DevelopmentFrontendStack--amplifyassetdeploymentha-aZqB9IbZLIKU","Runtime":"nodejs14.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentha-aZqB9IbZLIKU","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":900,"Handler":"framework.onEvent","CodeSha256":"m4tt+M0l3p8bZvxIDj83dwGrwRW6atCfS/q8AiXCD3o=","Role":"arn:aws:iam::550620852718:role/DevelopmentFrontendStack-amplifyassetdeploymentha-1782JF7WAPXZ3","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":4307,"State":null,"StateReason":null,"Environment":{"Variables":{"USER_ON_EVENT_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymenton-X9iZJSCSPYDH","WAITER_STATE_MACHINE_ARN":"arn:aws:states:us-west-2:550620852718:stateMachine:amplifyassetdeploymenthandlerproviderwaiterstatemachineB3C2FCBE-Ltggp5wBcHWO","USER_IS_COMPLETE_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentis-jaHopLrSSARV"},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]}],"NextMarker":"lslTXFcbLQKkb0vP9Kgh5hUL7C3VghELNGbWgZfxrRCk3eiDRMkct7D8EmptWfHSXssPdS7Bo66iQPTMpVOHZgANewpgGgFGGr4pVjd6VgLUO6qPe2EMAuNDBjUTxm8z6N28yhlUwEmKbrAV/m0k5qVzizwoxFwvyruMbuMx9kADFACSslcabxXl3/jDI4rfFnIsUVdzTLBgPF1hzwrE1f3lcdkBvUp+QgY+Pn3w5QuJmwsp/di8COzFemY89GgOHbLNqsrBsgR/ee2eXoJp0ZkKM4EcBK3HokqBzefLfgR02PnfNOdXwqTlhkSPW0TKiKGIYu3Bw7lSNrLd+q3+wEr7ZakqOQf0BVo3FMRhMHlVYgwUJzwi3ActyH2q6fuqGG1sS0B8Oa/prUpe5fmp3VaA3WpazioeHtrKF78JwCi6/nfQsrj/8ZtXGQOxlwEgvT1CIUaF+CdHY3biezrK0tRZNpkCtHnkPtF9lq2U7+UiKXSW9yzxT8P2b0M/Qh4IVdnw4rncQK/doYriAeOdrs1wjMEJnHWq9lAaEyipoxYcVr/z5+yaC6Gwxdg45p9X1vIAaYMf6IZxyFuua43SYi0Ls+IBk4VvpR2io7T0dCxHAr3WAo3D2dm0y8OsbM59"}
|
||||||
,
|
,
|
||||||
.server_response_headers = @constCast(&[_][2][]const u8{
|
.server_response_headers = &.{
|
||||||
.{ "Content-Type", "application/json" },
|
.{ .name = "Content-Type", .value = "application/json" },
|
||||||
.{ "x-amzn-RequestId", "c4025199-226f-4a16-bb1f-48618e9d2ea6" },
|
.{ .name = "x-amzn-RequestId", .value = "c4025199-226f-4a16-bb1f-48618e9d2ea6" },
|
||||||
}),
|
},
|
||||||
});
|
});
|
||||||
defer test_harness.deinit();
|
defer test_harness.deinit();
|
||||||
const options = try test_harness.start();
|
const options = try test_harness.start();
|
||||||
|
@ -1784,13 +1803,13 @@ test "rest_json_1_query_with_input: lambda listFunctions runtime" {
|
||||||
}
|
}
|
||||||
test "rest_json_1_query_no_input: lambda listFunctions runtime" {
|
test "rest_json_1_query_no_input: lambda listFunctions runtime" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(allocator, .{
|
var test_harness = TestSetup.init(.{
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.server_response = @embedFile("test_rest_json_1_query_no_input.response"),
|
.server_response = @embedFile("test_rest_json_1_query_no_input.response"),
|
||||||
.server_response_headers = @constCast(&[_][2][]const u8{
|
.server_response_headers = &.{
|
||||||
.{ "Content-Type", "application/json" },
|
.{ .name = "Content-Type", .value = "application/json" },
|
||||||
.{ "x-amzn-RequestId", "b2aad11f-36fc-4d0d-ae92-fe0167fb0f40" },
|
.{ .name = "x-amzn-RequestId", .value = "b2aad11f-36fc-4d0d-ae92-fe0167fb0f40" },
|
||||||
}),
|
},
|
||||||
});
|
});
|
||||||
defer test_harness.deinit();
|
defer test_harness.deinit();
|
||||||
const options = try test_harness.start();
|
const options = try test_harness.start();
|
||||||
|
@ -1818,14 +1837,14 @@ test "rest_json_1_query_no_input: lambda listFunctions runtime" {
|
||||||
}
|
}
|
||||||
test "rest_json_1_work_with_lambda: lambda tagResource (only), to excercise zig issue 17015" {
|
test "rest_json_1_work_with_lambda: lambda tagResource (only), to excercise zig issue 17015" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(allocator, .{
|
var test_harness = TestSetup.init(.{
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.server_response = "",
|
.server_response = "",
|
||||||
.server_response_status = .no_content,
|
.server_response_status = .no_content,
|
||||||
.server_response_headers = @constCast(&[_][2][]const u8{
|
.server_response_headers = &.{
|
||||||
.{ "Content-Type", "application/json" },
|
.{ .name = "Content-Type", .value = "application/json" },
|
||||||
.{ "x-amzn-RequestId", "a521e152-6e32-4e67-9fb3-abc94e34551b" },
|
.{ .name = "x-amzn-RequestId", .value = "a521e152-6e32-4e67-9fb3-abc94e34551b" },
|
||||||
}),
|
},
|
||||||
});
|
});
|
||||||
defer test_harness.deinit();
|
defer test_harness.deinit();
|
||||||
const options = try test_harness.start();
|
const options = try test_harness.start();
|
||||||
|
@ -1854,13 +1873,13 @@ test "rest_json_1_work_with_lambda: lambda tagResource (only), to excercise zig
|
||||||
}
|
}
|
||||||
test "ec2_query_no_input: EC2 describe regions" {
|
test "ec2_query_no_input: EC2 describe regions" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(allocator, .{
|
var test_harness = TestSetup.init(.{
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.server_response = @embedFile("test_ec2_query_no_input.response"),
|
.server_response = @embedFile("test_ec2_query_no_input.response"),
|
||||||
.server_response_headers = @constCast(&[_][2][]const u8{
|
.server_response_headers = &.{
|
||||||
.{ "Content-Type", "text/xml;charset=UTF-8" },
|
.{ .name = "Content-Type", .value = "text/xml;charset=UTF-8" },
|
||||||
.{ "x-amzn-RequestId", "4cdbdd69-800c-49b5-8474-ae4c17709782" },
|
.{ .name = "x-amzn-RequestId", .value = "4cdbdd69-800c-49b5-8474-ae4c17709782" },
|
||||||
}),
|
},
|
||||||
.server_response_transfer_encoding = .chunked,
|
.server_response_transfer_encoding = .chunked,
|
||||||
});
|
});
|
||||||
defer test_harness.deinit();
|
defer test_harness.deinit();
|
||||||
|
@ -1881,13 +1900,13 @@ test "ec2_query_no_input: EC2 describe regions" {
|
||||||
}
|
}
|
||||||
test "ec2_query_with_input: EC2 describe instances" {
|
test "ec2_query_with_input: EC2 describe instances" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(allocator, .{
|
var test_harness = TestSetup.init(.{
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.server_response = @embedFile("test_ec2_query_with_input.response"),
|
.server_response = @embedFile("test_ec2_query_with_input.response"),
|
||||||
.server_response_headers = @constCast(&[_][2][]const u8{
|
.server_response_headers = &.{
|
||||||
.{ "Content-Type", "text/xml;charset=UTF-8" },
|
.{ .name = "Content-Type", .value = "text/xml;charset=UTF-8" },
|
||||||
.{ "x-amzn-RequestId", "150a14cc-785d-476f-a4c9-2aa4d03b14e2" },
|
.{ .name = "x-amzn-RequestId", .value = "150a14cc-785d-476f-a4c9-2aa4d03b14e2" },
|
||||||
}),
|
},
|
||||||
});
|
});
|
||||||
defer test_harness.deinit();
|
defer test_harness.deinit();
|
||||||
const options = try test_harness.start();
|
const options = try test_harness.start();
|
||||||
|
@ -1911,15 +1930,15 @@ test "ec2_query_with_input: EC2 describe instances" {
|
||||||
}
|
}
|
||||||
test "rest_xml_no_input: S3 list buckets" {
|
test "rest_xml_no_input: S3 list buckets" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(allocator, .{
|
var test_harness = TestSetup.init(.{
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.server_response =
|
.server_response =
|
||||||
\\<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>3367189aa775bd98da38e55093705f2051443c1e775fc0971d6d77387a47c8d0</ID><DisplayName>emilerch+sub1</DisplayName></Owner><Buckets><Bucket><Name>550620852718-backup</Name><CreationDate>2020-06-17T16:26:51.000Z</CreationDate></Bucket><Bucket><Name>amplify-letmework-staging-185741-deployment</Name><CreationDate>2023-03-10T18:57:49.000Z</CreationDate></Bucket><Bucket><Name>aws-cloudtrail-logs-550620852718-224022a7</Name><CreationDate>2021-06-21T18:32:44.000Z</CreationDate></Bucket><Bucket><Name>aws-sam-cli-managed-default-samclisourcebucket-1gy0z00mj47xe</Name><CreationDate>2021-10-05T16:38:07.000Z</CreationDate></Bucket><Bucket><Name>awsomeprojectstack-pipelineartifactsbucketaea9a05-1uzwo6c86ecr</Name><CreationDate>2021-10-05T22:55:09.000Z</CreationDate></Bucket><Bucket><Name>cdk-hnb659fds-assets-550620852718-us-west-2</Name><CreationDate>2023-02-28T21:49:36.000Z</CreationDate></Bucket><Bucket><Name>cf-templates-12iy6putgdxtk-us-west-2</Name><CreationDate>2020-06-26T02:31:59.000Z</CreationDate></Bucket><Bucket><Name>codepipeline-us-west-2-46714083637</Name><CreationDate>2021-09-14T18:43:07.000Z</CreationDate></Bucket><Bucket><Name>elasticbeanstalk-us-west-2-550620852718</Name><CreationDate>2022-04-15T16:22:42.000Z</CreationDate></Bucket><Bucket><Name>lobo-west</Name><CreationDate>2021-06-21T17:17:22.000Z</CreationDate></Bucket><Bucket><Name>lobo-west-2</Name><CreationDate>2021-11-19T20:12:31.000Z</CreationDate></Bucket><Bucket><Name>logging-backup-550620852718-us-east-2</Name><CreationDate>2022-05-29T21:55:16.000Z</CreationDate></Bucket><Bucket><Name>mysfitszj3t6webstack-hostingbucketa91a61fe-1ep3ezkgwpxr0</Name><CreationDate>2023-03-01T04:53:55.000Z</CreationDate></Bucket></Buckets></ListAllMyBucketsResult>
|
\\<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>3367189aa775bd98da38e55093705f2051443c1e775fc0971d6d77387a47c8d0</ID><DisplayName>emilerch+sub1</DisplayName></Owner><Buckets><Bucket><Name>550620852718-backup</Name><CreationDate>2020-06-17T16:26:51.000Z</CreationDate></Bucket><Bucket><Name>amplify-letmework-staging-185741-deployment</Name><CreationDate>2023-03-10T18:57:49.000Z</CreationDate></Bucket><Bucket><Name>aws-cloudtrail-logs-550620852718-224022a7</Name><CreationDate>2021-06-21T18:32:44.000Z</CreationDate></Bucket><Bucket><Name>aws-sam-cli-managed-default-samclisourcebucket-1gy0z00mj47xe</Name><CreationDate>2021-10-05T16:38:07.000Z</CreationDate></Bucket><Bucket><Name>awsomeprojectstack-pipelineartifactsbucketaea9a05-1uzwo6c86ecr</Name><CreationDate>2021-10-05T22:55:09.000Z</CreationDate></Bucket><Bucket><Name>cdk-hnb659fds-assets-550620852718-us-west-2</Name><CreationDate>2023-02-28T21:49:36.000Z</CreationDate></Bucket><Bucket><Name>cf-templates-12iy6putgdxtk-us-west-2</Name><CreationDate>2020-06-26T02:31:59.000Z</CreationDate></Bucket><Bucket><Name>codepipeline-us-west-2-46714083637</Name><CreationDate>2021-09-14T18:43:07.000Z</CreationDate></Bucket><Bucket><Name>elasticbeanstalk-us-west-2-550620852718</Name><CreationDate>2022-04-15T16:22:42.000Z</CreationDate></Bucket><Bucket><Name>lobo-west</Name><CreationDate>2021-06-21T17:17:22.000Z</CreationDate></Bucket><Bucket><Name>lobo-west-2</Name><CreationDate>2021-11-19T20:12:31.000Z</CreationDate></Bucket><Bucket><Name>logging-backup-550620852718-us-east-2</Name><CreationDate>2022-05-29T21:55:16.000Z</CreationDate></Bucket><Bucket><Name>mysfitszj3t6webstack-hostingbucketa91a61fe-1ep3ezkgwpxr0</Name><CreationDate>2023-03-01T04:53:55.000Z</CreationDate></Bucket></Buckets></ListAllMyBucketsResult>
|
||||||
,
|
,
|
||||||
.server_response_headers = @constCast(&[_][2][]const u8{
|
.server_response_headers = &.{
|
||||||
.{ "Content-Type", "application/xml" },
|
.{ .name = "Content-Type", .value = "application/xml" },
|
||||||
.{ "x-amzn-RequestId", "9PEYBAZ9J7TPRX43" },
|
.{ .name = "x-amzn-RequestId", .value = "9PEYBAZ9J7TPRX43" },
|
||||||
}),
|
},
|
||||||
});
|
});
|
||||||
defer test_harness.deinit();
|
defer test_harness.deinit();
|
||||||
const options = try test_harness.start();
|
const options = try test_harness.start();
|
||||||
|
@ -1929,7 +1948,12 @@ test "rest_xml_no_input: S3 list buckets" {
|
||||||
test_harness.stop();
|
test_harness.stop();
|
||||||
// Request expectations
|
// Request expectations
|
||||||
try std.testing.expectEqual(std.http.Method.GET, test_harness.request_options.request_method);
|
try std.testing.expectEqual(std.http.Method.GET, test_harness.request_options.request_method);
|
||||||
try std.testing.expectEqualStrings("/", test_harness.request_options.request_target);
|
// This changed in rev 830202d722c904c7e3da40e8dde7b9338d08752c of the go sdk, and
|
||||||
|
// contrary to the documentation, a query string argument was added. My guess is that
|
||||||
|
// there is no functional reason, and that this is strictly for some AWS reporting function.
|
||||||
|
// Alternatively, it could be to support some customization mechanism, as the commit
|
||||||
|
// title of that commit is "Merge customizations for S3"
|
||||||
|
try std.testing.expectEqualStrings("/?x-id=ListBuckets", test_harness.request_options.request_target);
|
||||||
try std.testing.expectEqualStrings("", test_harness.request_options.request_body);
|
try std.testing.expectEqualStrings("", test_harness.request_options.request_body);
|
||||||
// Response expectations
|
// Response expectations
|
||||||
try std.testing.expectEqualStrings("9PEYBAZ9J7TPRX43", call.response_metadata.request_id);
|
try std.testing.expectEqualStrings("9PEYBAZ9J7TPRX43", call.response_metadata.request_id);
|
||||||
|
@ -1937,15 +1961,15 @@ test "rest_xml_no_input: S3 list buckets" {
|
||||||
}
|
}
|
||||||
test "rest_xml_anything_but_s3: CloudFront list key groups" {
|
test "rest_xml_anything_but_s3: CloudFront list key groups" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(allocator, .{
|
var test_harness = TestSetup.init(.{
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.server_response =
|
.server_response =
|
||||||
\\{"Items":null,"MaxItems":100,"NextMarker":null,"Quantity":0}
|
\\{"Items":null,"MaxItems":100,"NextMarker":null,"Quantity":0}
|
||||||
,
|
,
|
||||||
.server_response_headers = @constCast(&[_][2][]const u8{
|
.server_response_headers = &.{
|
||||||
.{ "Content-Type", "application/json" },
|
.{ .name = "Content-Type", .value = "application/json" },
|
||||||
.{ "x-amzn-RequestId", "d3382082-5291-47a9-876b-8df3accbb7ea" },
|
.{ .name = "x-amzn-RequestId", .value = "d3382082-5291-47a9-876b-8df3accbb7ea" },
|
||||||
}),
|
},
|
||||||
});
|
});
|
||||||
defer test_harness.deinit();
|
defer test_harness.deinit();
|
||||||
const options = try test_harness.start();
|
const options = try test_harness.start();
|
||||||
|
@ -1963,16 +1987,16 @@ test "rest_xml_anything_but_s3: CloudFront list key groups" {
|
||||||
}
|
}
|
||||||
test "rest_xml_with_input: S3 put object" {
|
test "rest_xml_with_input: S3 put object" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(allocator, .{
|
var test_harness = TestSetup.init(.{
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.server_response = "",
|
.server_response = "",
|
||||||
.server_response_headers = @constCast(&[_][2][]const u8{
|
.server_response_headers = &.{
|
||||||
// .{ "Content-Type", "application/xml" },
|
// .{ "Content-Type", "application/xml" },
|
||||||
.{ "x-amzn-RequestId", "9PEYBAZ9J7TPRX43" },
|
.{ .name = "x-amzn-RequestId", .value = "9PEYBAZ9J7TPRX43" },
|
||||||
.{ "x-amz-id-2", "jdRDo30t7Ge9lf6F+4WYpg+YKui8z0mz2+rwinL38xDZzvloJqrmpCAiKG375OSvHA9OBykJS44=" },
|
.{ .name = "x-amz-id-2", .value = "jdRDo30t7Ge9lf6F+4WYpg+YKui8z0mz2+rwinL38xDZzvloJqrmpCAiKG375OSvHA9OBykJS44=" },
|
||||||
.{ "x-amz-server-side-encryption", "AES256" },
|
.{ .name = "x-amz-server-side-encryption", .value = "AES256" },
|
||||||
.{ "ETag", "37b51d194a7513e45b56f6524f2d51f2" },
|
.{ .name = "ETag", .value = "37b51d194a7513e45b56f6524f2d51f2" },
|
||||||
}),
|
},
|
||||||
});
|
});
|
||||||
defer test_harness.deinit();
|
defer test_harness.deinit();
|
||||||
const options = try test_harness.start();
|
const options = try test_harness.start();
|
||||||
|
@ -1981,7 +2005,6 @@ test "rest_xml_with_input: S3 put object" {
|
||||||
.client = options.client,
|
.client = options.client,
|
||||||
.signing_time = TestSetup.signing_time,
|
.signing_time = TestSetup.signing_time,
|
||||||
};
|
};
|
||||||
// std.testing.log_level = .debug;
|
|
||||||
const result = try Request(services.s3.put_object).call(.{
|
const result = try Request(services.s3.put_object).call(.{
|
||||||
.bucket = "mysfitszj3t6webstack-hostingbucketa91a61fe-1ep3ezkgwpxr0",
|
.bucket = "mysfitszj3t6webstack-hostingbucketa91a61fe-1ep3ezkgwpxr0",
|
||||||
.key = "i/am/a/teapot/foo",
|
.key = "i/am/a/teapot/foo",
|
||||||
|
@ -1989,7 +2012,7 @@ test "rest_xml_with_input: S3 put object" {
|
||||||
.body = "bar",
|
.body = "bar",
|
||||||
.storage_class = "STANDARD",
|
.storage_class = "STANDARD",
|
||||||
}, s3opts);
|
}, s3opts);
|
||||||
for (test_harness.request_options.request_headers.list.items) |header| {
|
for (test_harness.request_options.request_headers) |header| {
|
||||||
std.log.info("Request header: {s}: {s}", .{ header.name, header.value });
|
std.log.info("Request header: {s}: {s}", .{ header.name, header.value });
|
||||||
}
|
}
|
||||||
std.log.info("PutObject Request id: {s}", .{result.response_metadata.request_id});
|
std.log.info("PutObject Request id: {s}", .{result.response_metadata.request_id});
|
||||||
|
|
|
@ -122,29 +122,22 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
|
||||||
const container_uri = try std.fmt.allocPrint(allocator, "http://169.254.170.2{s}", .{container_relative_uri});
|
const container_uri = try std.fmt.allocPrint(allocator, "http://169.254.170.2{s}", .{container_relative_uri});
|
||||||
defer allocator.free(container_uri);
|
defer allocator.free(container_uri);
|
||||||
|
|
||||||
var empty_headers = std.http.Headers.init(allocator);
|
|
||||||
defer empty_headers.deinit();
|
|
||||||
var cl = std.http.Client{ .allocator = allocator };
|
var cl = std.http.Client{ .allocator = allocator };
|
||||||
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
|
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
|
||||||
var req = try cl.request(.GET, try std.Uri.parse(container_uri), empty_headers, .{});
|
var resp_payload = std.ArrayList(u8).init(allocator);
|
||||||
defer req.deinit();
|
defer resp_payload.deinit();
|
||||||
try req.start();
|
const req = try cl.fetch(.{
|
||||||
try req.wait();
|
.location = .{ .url = container_uri },
|
||||||
if (req.response.status != .ok and req.response.status != .not_found) {
|
.response_storage = .{ .dynamic = &resp_payload },
|
||||||
log.warn("Bad status code received from container credentials endpoint: {}", .{@intFromEnum(req.response.status)});
|
});
|
||||||
|
if (req.status != .ok and req.status != .not_found) {
|
||||||
|
log.warn("Bad status code received from container credentials endpoint: {}", .{@intFromEnum(req.status)});
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
if (req.response.status == .not_found) return null;
|
if (req.status == .not_found) return null;
|
||||||
if (req.response.content_length == null or req.response.content_length.? == 0) return null;
|
|
||||||
|
|
||||||
var resp_payload = try std.ArrayList(u8).initCapacity(allocator, @intCast(req.response.content_length.?));
|
log.debug("Read {d} bytes from container credentials endpoint", .{resp_payload.items.len});
|
||||||
defer resp_payload.deinit();
|
if (resp_payload.items.len == 0) return null;
|
||||||
try resp_payload.resize(@intCast(req.response.content_length.?));
|
|
||||||
const response_data = try resp_payload.toOwnedSlice();
|
|
||||||
defer allocator.free(response_data);
|
|
||||||
_ = try req.readAll(response_data);
|
|
||||||
log.debug("Read {d} bytes from container credentials endpoint", .{response_data.len});
|
|
||||||
if (response_data.len == 0) return null;
|
|
||||||
|
|
||||||
const CredsResponse = struct {
|
const CredsResponse = struct {
|
||||||
AccessKeyId: []const u8,
|
AccessKeyId: []const u8,
|
||||||
|
@ -154,8 +147,8 @@ fn getContainerCredentials(allocator: std.mem.Allocator) !?auth.Credentials {
|
||||||
Token: []const u8,
|
Token: []const u8,
|
||||||
};
|
};
|
||||||
const creds_response = blk: {
|
const creds_response = blk: {
|
||||||
const res = std.json.parseFromSlice(CredsResponse, allocator, response_data, .{}) catch |e| {
|
const res = std.json.parseFromSlice(CredsResponse, allocator, resp_payload.items, .{}) catch |e| {
|
||||||
log.err("Unexpected Json response from container credentials endpoint: {s}", .{response_data});
|
log.err("Unexpected Json response from container credentials endpoint: {s}", .{resp_payload.items});
|
||||||
log.err("Error parsing json: {}", .{e});
|
log.err("Error parsing json: {}", .{e});
|
||||||
if (@errorReturnTrace()) |trace| {
|
if (@errorReturnTrace()) |trace| {
|
||||||
std.debug.dumpStackTrace(trace.*);
|
std.debug.dumpStackTrace(trace.*);
|
||||||
|
@ -182,28 +175,27 @@ fn getImdsv2Credentials(allocator: std.mem.Allocator) !?auth.Credentials {
|
||||||
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
|
defer cl.deinit(); // I don't belive connection pooling would help much here as it's non-ssl and local
|
||||||
// Get token
|
// Get token
|
||||||
{
|
{
|
||||||
var headers = std.http.Headers.init(allocator);
|
var resp_payload = std.ArrayList(u8).init(allocator);
|
||||||
defer headers.deinit();
|
defer resp_payload.deinit();
|
||||||
try headers.append("X-aws-ec2-metadata-token-ttl-seconds", "21600");
|
const req = try cl.fetch(.{
|
||||||
var req = try cl.request(.PUT, try std.Uri.parse("http://169.254.169.254/latest/api/token"), headers, .{});
|
.method = .PUT,
|
||||||
defer req.deinit();
|
.location = .{ .url = "http://169.254.169.254/latest/api/token" },
|
||||||
try req.start();
|
.extra_headers = &[_]std.http.Header{
|
||||||
try req.wait();
|
.{ .name = "X-aws-ec2-metadata-token-ttl-seconds", .value = "21600" },
|
||||||
if (req.response.status != .ok) {
|
},
|
||||||
log.warn("Bad status code received from IMDS v2: {}", .{@intFromEnum(req.response.status)});
|
.response_storage = .{ .dynamic = &resp_payload },
|
||||||
|
});
|
||||||
|
if (req.status != .ok) {
|
||||||
|
log.warn("Bad status code received from IMDS v2: {}", .{@intFromEnum(req.status)});
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
if (req.response.content_length == null or req.response.content_length == 0) {
|
if (resp_payload.items.len == 0) {
|
||||||
log.warn("Unexpected zero response from IMDS v2", .{});
|
log.warn("Unexpected zero response from IMDS v2", .{});
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
var resp_payload = try std.ArrayList(u8).initCapacity(allocator, @intCast(req.response.content_length.?));
|
|
||||||
defer resp_payload.deinit();
|
|
||||||
try resp_payload.resize(@intCast(req.response.content_length.?));
|
|
||||||
token = try resp_payload.toOwnedSlice();
|
token = try resp_payload.toOwnedSlice();
|
||||||
errdefer if (token) |t| allocator.free(t);
|
errdefer if (token) |t| allocator.free(t);
|
||||||
_ = try req.readAll(token.?);
|
|
||||||
}
|
}
|
||||||
std.debug.assert(token != null);
|
std.debug.assert(token != null);
|
||||||
log.debug("Got token from IMDSv2: {s}", .{token.?});
|
log.debug("Got token from IMDSv2: {s}", .{token.?});
|
||||||
|
@ -224,28 +216,26 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
|
||||||
// "InstanceProfileArn" : "arn:aws:iam::550620852718:instance-profile/ec2-dev",
|
// "InstanceProfileArn" : "arn:aws:iam::550620852718:instance-profile/ec2-dev",
|
||||||
// "InstanceProfileId" : "AIPAYAM4POHXCFNKZ7HU2"
|
// "InstanceProfileId" : "AIPAYAM4POHXCFNKZ7HU2"
|
||||||
// }
|
// }
|
||||||
var headers = std.http.Headers.init(allocator);
|
var resp_payload = std.ArrayList(u8).init(allocator);
|
||||||
defer headers.deinit();
|
defer resp_payload.deinit();
|
||||||
try headers.append("X-aws-ec2-metadata-token", imds_token);
|
const req = try client.fetch(.{
|
||||||
|
.method = .GET,
|
||||||
|
.location = .{ .url = "http://169.254.169.254/latest/meta-data/iam/info" },
|
||||||
|
.extra_headers = &[_]std.http.Header{
|
||||||
|
.{ .name = "X-aws-ec2-metadata-token", .value = imds_token },
|
||||||
|
},
|
||||||
|
.response_storage = .{ .dynamic = &resp_payload },
|
||||||
|
});
|
||||||
|
|
||||||
var req = try client.request(.GET, try std.Uri.parse("http://169.254.169.254/latest/meta-data/iam/info"), headers, .{});
|
if (req.status != .ok and req.status != .not_found) {
|
||||||
defer req.deinit();
|
log.warn("Bad status code received from IMDS iam endpoint: {}", .{@intFromEnum(req.status)});
|
||||||
|
|
||||||
try req.start();
|
|
||||||
try req.wait();
|
|
||||||
|
|
||||||
if (req.response.status != .ok and req.response.status != .not_found) {
|
|
||||||
log.warn("Bad status code received from IMDS iam endpoint: {}", .{@intFromEnum(req.response.status)});
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
if (req.response.status == .not_found) return null;
|
if (req.status == .not_found) return null;
|
||||||
if (req.response.content_length == null or req.response.content_length.? == 0) {
|
if (resp_payload.items.len == 0) {
|
||||||
log.warn("Unexpected empty response from IMDS endpoint post token", .{});
|
log.warn("Unexpected empty response from IMDS endpoint post token", .{});
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
const resp = try allocator.alloc(u8, @intCast(req.response.content_length.?));
|
|
||||||
defer allocator.free(resp);
|
|
||||||
_ = try req.readAll(resp);
|
|
||||||
|
|
||||||
const ImdsResponse = struct {
|
const ImdsResponse = struct {
|
||||||
Code: []const u8,
|
Code: []const u8,
|
||||||
|
@ -253,8 +243,8 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
|
||||||
InstanceProfileArn: []const u8,
|
InstanceProfileArn: []const u8,
|
||||||
InstanceProfileId: []const u8,
|
InstanceProfileId: []const u8,
|
||||||
};
|
};
|
||||||
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, resp, .{}) catch |e| {
|
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, resp_payload.items, .{}) catch |e| {
|
||||||
log.err("Unexpected Json response from IMDS endpoint: {s}", .{resp});
|
log.err("Unexpected Json response from IMDS endpoint: {s}", .{resp_payload.items});
|
||||||
log.err("Error parsing json: {}", .{e});
|
log.err("Error parsing json: {}", .{e});
|
||||||
if (@errorReturnTrace()) |trace| {
|
if (@errorReturnTrace()) |trace| {
|
||||||
std.debug.dumpStackTrace(trace.*);
|
std.debug.dumpStackTrace(trace.*);
|
||||||
|
@ -274,31 +264,28 @@ fn getImdsRoleName(allocator: std.mem.Allocator, client: *std.http.Client, imds_
|
||||||
|
|
||||||
/// Note - this internal function assumes zfetch is initialized prior to use
|
/// Note - this internal function assumes zfetch is initialized prior to use
|
||||||
fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, role_name: []const u8, imds_token: []u8) !?auth.Credentials {
|
fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, role_name: []const u8, imds_token: []u8) !?auth.Credentials {
|
||||||
var headers = std.http.Headers.init(allocator);
|
|
||||||
defer headers.deinit();
|
|
||||||
try headers.append("X-aws-ec2-metadata-token", imds_token);
|
|
||||||
|
|
||||||
const url = try std.fmt.allocPrint(allocator, "http://169.254.169.254/latest/meta-data/iam/security-credentials/{s}/", .{role_name});
|
const url = try std.fmt.allocPrint(allocator, "http://169.254.169.254/latest/meta-data/iam/security-credentials/{s}/", .{role_name});
|
||||||
defer allocator.free(url);
|
defer allocator.free(url);
|
||||||
|
var resp_payload = std.ArrayList(u8).init(allocator);
|
||||||
|
defer resp_payload.deinit();
|
||||||
|
const req = try client.fetch(.{
|
||||||
|
.method = .GET,
|
||||||
|
.location = .{ .url = url },
|
||||||
|
.extra_headers = &[_]std.http.Header{
|
||||||
|
.{ .name = "X-aws-ec2-metadata-token", .value = imds_token },
|
||||||
|
},
|
||||||
|
.response_storage = .{ .dynamic = &resp_payload },
|
||||||
|
});
|
||||||
|
|
||||||
var req = try client.request(.GET, try std.Uri.parse(url), headers, .{});
|
if (req.status != .ok and req.status != .not_found) {
|
||||||
defer req.deinit();
|
log.warn("Bad status code received from IMDS role endpoint: {}", .{@intFromEnum(req.status)});
|
||||||
|
|
||||||
try req.start();
|
|
||||||
try req.wait();
|
|
||||||
|
|
||||||
if (req.response.status != .ok and req.response.status != .not_found) {
|
|
||||||
log.warn("Bad status code received from IMDS role endpoint: {}", .{@intFromEnum(req.response.status)});
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
if (req.response.status == .not_found) return null;
|
if (req.status == .not_found) return null;
|
||||||
if (req.response.content_length == null or req.response.content_length.? == 0) {
|
if (resp_payload.items.len == 0) {
|
||||||
log.warn("Unexpected empty response from IMDS role endpoint", .{});
|
log.warn("Unexpected empty response from IMDS role endpoint", .{});
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
const resp = try allocator.alloc(u8, @intCast(req.response.content_length.?));
|
|
||||||
defer allocator.free(resp);
|
|
||||||
_ = try req.readAll(resp);
|
|
||||||
|
|
||||||
// log.debug("Read {d} bytes from imds v2 credentials endpoint", .{read});
|
// log.debug("Read {d} bytes from imds v2 credentials endpoint", .{read});
|
||||||
const ImdsResponse = struct {
|
const ImdsResponse = struct {
|
||||||
|
@ -310,8 +297,8 @@ fn getImdsCredentials(allocator: std.mem.Allocator, client: *std.http.Client, ro
|
||||||
Token: []const u8,
|
Token: []const u8,
|
||||||
Expiration: []const u8,
|
Expiration: []const u8,
|
||||||
};
|
};
|
||||||
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, resp, .{}) catch |e| {
|
const imds_response = std.json.parseFromSlice(ImdsResponse, allocator, resp_payload.items, .{}) catch |e| {
|
||||||
log.err("Unexpected Json response from IMDS endpoint: {s}", .{resp});
|
log.err("Unexpected Json response from IMDS endpoint: {s}", .{resp_payload.items});
|
||||||
log.err("Error parsing json: {}", .{e});
|
log.err("Error parsing json: {}", .{e});
|
||||||
if (@errorReturnTrace()) |trace| {
|
if (@errorReturnTrace()) |trace| {
|
||||||
std.debug.dumpStackTrace(trace.*);
|
std.debug.dumpStackTrace(trace.*);
|
||||||
|
|
135
src/aws_http.zig
135
src/aws_http.zig
|
@ -44,7 +44,7 @@ pub const Options = struct {
|
||||||
signing_time: ?i64 = null,
|
signing_time: ?i64 = null,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const Header = base.Header;
|
pub const Header = std.http.Header;
|
||||||
pub const HttpRequest = base.Request;
|
pub const HttpRequest = base.Request;
|
||||||
pub const HttpResult = base.Result;
|
pub const HttpResult = base.Result;
|
||||||
|
|
||||||
|
@ -64,11 +64,11 @@ const EndPoint = struct {
|
||||||
};
|
};
|
||||||
pub const AwsHttp = struct {
|
pub const AwsHttp = struct {
|
||||||
allocator: std.mem.Allocator,
|
allocator: std.mem.Allocator,
|
||||||
proxy: ?std.http.Client.HttpProxy,
|
proxy: ?std.http.Client.Proxy,
|
||||||
|
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
|
|
||||||
pub fn init(allocator: std.mem.Allocator, proxy: ?std.http.Client.HttpProxy) Self {
|
pub fn init(allocator: std.mem.Allocator, proxy: ?std.http.Client.Proxy) Self {
|
||||||
return Self{
|
return Self{
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.proxy = proxy,
|
.proxy = proxy,
|
||||||
|
@ -149,7 +149,7 @@ pub const AwsHttp = struct {
|
||||||
// We will use endpoint instead
|
// We will use endpoint instead
|
||||||
request_cp.path = endpoint.path;
|
request_cp.path = endpoint.path;
|
||||||
|
|
||||||
var request_headers = std.ArrayList(base.Header).init(self.allocator);
|
var request_headers = std.ArrayList(std.http.Header).init(self.allocator);
|
||||||
defer request_headers.deinit();
|
defer request_headers.deinit();
|
||||||
|
|
||||||
const len = try addHeaders(self.allocator, &request_headers, endpoint.host, request_cp.body, request_cp.content_type, request_cp.headers);
|
const len = try addHeaders(self.allocator, &request_headers, endpoint.host, request_cp.body, request_cp.content_type, request_cp.headers);
|
||||||
|
@ -163,108 +163,75 @@ pub const AwsHttp = struct {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var headers = std.http.Headers.init(self.allocator);
|
var headers = std.ArrayList(std.http.Header).init(self.allocator);
|
||||||
defer headers.deinit();
|
defer headers.deinit();
|
||||||
for (request_cp.headers) |header|
|
for (request_cp.headers) |header|
|
||||||
try headers.append(header.name, header.value);
|
try headers.append(.{ .name = header.name, .value = header.value });
|
||||||
log.debug("All Request Headers:", .{});
|
log.debug("All Request Headers:", .{});
|
||||||
for (headers.list.items) |h| {
|
for (headers.items) |h| {
|
||||||
log.debug("\t{s}: {s}", .{ h.name, h.value });
|
log.debug("\t{s}: {s}", .{ h.name, h.value });
|
||||||
}
|
}
|
||||||
|
|
||||||
const url = try std.fmt.allocPrint(self.allocator, "{s}{s}{s}", .{ endpoint.uri, request_cp.path, request_cp.query });
|
const url = try std.fmt.allocPrint(self.allocator, "{s}{s}{s}", .{ endpoint.uri, request_cp.path, request_cp.query });
|
||||||
defer self.allocator.free(url);
|
defer self.allocator.free(url);
|
||||||
log.debug("Request url: {s}", .{url});
|
log.debug("Request url: {s}", .{url});
|
||||||
var cl = std.http.Client{ .allocator = self.allocator, .proxy = self.proxy };
|
// TODO: Fix this proxy stuff. This is all a kludge just to compile, but std.http.Client has it all built in now
|
||||||
|
var cl = std.http.Client{ .allocator = self.allocator, .https_proxy = if (self.proxy) |*p| @constCast(p) else null };
|
||||||
defer cl.deinit(); // TODO: Connection pooling
|
defer cl.deinit(); // TODO: Connection pooling
|
||||||
//
|
|
||||||
// var req = try zfetch.Request.init(self.allocator, url, self.trust_chain);
|
|
||||||
// defer req.deinit();
|
|
||||||
|
|
||||||
const method = std.meta.stringToEnum(std.http.Method, request_cp.method).?;
|
const method = std.meta.stringToEnum(std.http.Method, request_cp.method).?;
|
||||||
// std.Uri has a format function here that is used by start() (below)
|
var server_header_buffer: [16 * 1024]u8 = undefined;
|
||||||
// to escape the string we're about to send. But we don't want that...
|
var resp_payload = std.ArrayList(u8).init(self.allocator);
|
||||||
// we need the control, because the signing above relies on the url above.
|
defer resp_payload.deinit();
|
||||||
// We can't seem to have our cake and eat it too, because we need escaped
|
const req = try cl.fetch(.{
|
||||||
// ':' characters, but if we escape them, we'll get them double encoded.
|
.server_header_buffer = &server_header_buffer,
|
||||||
// If we don't escape them, they won't get encoded at all. I believe the
|
.method = method,
|
||||||
// only answer may be to copy the Request.start function from the
|
.payload = if (request_cp.body.len > 0) request_cp.body else null,
|
||||||
// standard library and tweak the print statements such that they don't
|
.response_storage = .{ .dynamic = &resp_payload },
|
||||||
// escape (but do still handle full uri (in proxy) vs path only (normal)
|
.raw_uri = true,
|
||||||
|
.location = .{ .url = url },
|
||||||
|
.extra_headers = headers.items,
|
||||||
|
});
|
||||||
|
// TODO: Need to test for payloads > 2^14. I believe one of our tests does this, but not sure
|
||||||
|
// if (request_cp.body.len > 0) {
|
||||||
|
// // Workaround for https://github.com/ziglang/zig/issues/15626
|
||||||
|
// const max_bytes: usize = 1 << 14;
|
||||||
|
// var inx: usize = 0;
|
||||||
|
// while (request_cp.body.len > inx) {
|
||||||
|
// try req.writeAll(request_cp.body[inx..@min(request_cp.body.len, inx + max_bytes)]);
|
||||||
|
// inx += max_bytes;
|
||||||
|
// }
|
||||||
//
|
//
|
||||||
// Bug report filed here:
|
// try req.finish();
|
||||||
// https://github.com/ziglang/zig/issues/17015
|
// }
|
||||||
//
|
// try req.wait();
|
||||||
// https://github.com/ziglang/zig/blob/0.11.0/lib/std/http/Client.zig#L538-L636
|
|
||||||
//
|
|
||||||
// Look at lines 551 and 553:
|
|
||||||
// https://github.com/ziglang/zig/blob/0.11.0/lib/std/http/Client.zig#L551
|
|
||||||
//
|
|
||||||
// This ends up executing the format function here:
|
|
||||||
// https://github.com/ziglang/zig/blob/0.11.0/lib/std/http/Client.zig#L551
|
|
||||||
//
|
|
||||||
// Which is basically the what we want, without the escaping on lines
|
|
||||||
// 249, 254, and 260:
|
|
||||||
// https://github.com/ziglang/zig/blob/0.11.0/lib/std/Uri.zig#L249
|
|
||||||
//
|
|
||||||
// const unescaped_url = try std.Uri.unescapeString(self.allocator, url);
|
|
||||||
// defer self.allocator.free(unescaped_url);
|
|
||||||
var req = try cl.request(method, try std.Uri.parse(url), headers, .{});
|
|
||||||
defer req.deinit();
|
|
||||||
if (request_cp.body.len > 0)
|
|
||||||
req.transfer_encoding = .{ .content_length = request_cp.body.len };
|
|
||||||
try @import("http_client_17015_issue.zig").start(&req);
|
|
||||||
// try req.start();
|
|
||||||
if (request_cp.body.len > 0) {
|
|
||||||
// Workaround for https://github.com/ziglang/zig/issues/15626
|
|
||||||
const max_bytes: usize = 1 << 14;
|
|
||||||
var inx: usize = 0;
|
|
||||||
while (request_cp.body.len > inx) {
|
|
||||||
try req.writeAll(request_cp.body[inx..@min(request_cp.body.len, inx + max_bytes)]);
|
|
||||||
inx += max_bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
try req.finish();
|
|
||||||
}
|
|
||||||
try req.wait();
|
|
||||||
|
|
||||||
// TODO: Timeout - is this now above us?
|
// TODO: Timeout - is this now above us?
|
||||||
log.debug(
|
log.debug(
|
||||||
"Request Complete. Response code {d}: {?s}",
|
"Request Complete. Response code {d}: {?s}",
|
||||||
.{ @intFromEnum(req.response.status), req.response.status.phrase() },
|
.{ @intFromEnum(req.status), req.status.phrase() },
|
||||||
);
|
);
|
||||||
log.debug("Response headers:", .{});
|
log.debug("Response headers:", .{});
|
||||||
var resp_headers = try std.ArrayList(Header).initCapacity(
|
var resp_headers = std.ArrayList(Header).init(
|
||||||
self.allocator,
|
self.allocator,
|
||||||
req.response.headers.list.items.len,
|
|
||||||
);
|
);
|
||||||
defer resp_headers.deinit();
|
defer resp_headers.deinit();
|
||||||
var content_length: usize = 0;
|
var it = std.http.HeaderIterator.init(server_header_buffer[0..]);
|
||||||
for (req.response.headers.list.items) |h| {
|
while (it.next()) |h| { // even though we don't expect to fill the buffer,
|
||||||
|
// we don't get a length, but looks via stdlib source
|
||||||
|
// it should be ok to call next on the undefined memory
|
||||||
log.debug(" {s}: {s}", .{ h.name, h.value });
|
log.debug(" {s}: {s}", .{ h.name, h.value });
|
||||||
resp_headers.appendAssumeCapacity(.{
|
try resp_headers.append(.{
|
||||||
.name = try (self.allocator.dupe(u8, h.name)),
|
.name = try (self.allocator.dupe(u8, h.name)),
|
||||||
.value = try (self.allocator.dupe(u8, h.value)),
|
.value = try (self.allocator.dupe(u8, h.value)),
|
||||||
});
|
});
|
||||||
if (content_length == 0 and std.ascii.eqlIgnoreCase("content-length", h.name))
|
|
||||||
content_length = std.fmt.parseInt(usize, h.value, 10) catch 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var response_data: []u8 =
|
log.debug("raw response body:\n{s}", .{resp_payload.items});
|
||||||
if (req.response.transfer_encoding) |_| // the only value here is "chunked"
|
|
||||||
try req.reader().readAllAlloc(self.allocator, std.math.maxInt(usize))
|
|
||||||
else blk: {
|
|
||||||
// content length
|
|
||||||
const tmp_data = try self.allocator.alloc(u8, content_length);
|
|
||||||
errdefer self.allocator.free(tmp_data);
|
|
||||||
_ = try req.readAll(tmp_data);
|
|
||||||
break :blk tmp_data;
|
|
||||||
};
|
|
||||||
log.debug("raw response body:\n{s}", .{response_data});
|
|
||||||
|
|
||||||
const rc = HttpResult{
|
const rc = HttpResult{
|
||||||
.response_code = @intFromEnum(req.response.status),
|
.response_code = @intFromEnum(req.status),
|
||||||
.body = response_data,
|
.body = try resp_payload.toOwnedSlice(),
|
||||||
.headers = try resp_headers.toOwnedSlice(),
|
.headers = try resp_headers.toOwnedSlice(),
|
||||||
.allocator = self.allocator,
|
.allocator = self.allocator,
|
||||||
};
|
};
|
||||||
|
@ -277,7 +244,16 @@ fn getRegion(service: []const u8, region: []const u8) []const u8 {
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn addHeaders(allocator: std.mem.Allocator, headers: *std.ArrayList(base.Header), host: []const u8, body: []const u8, content_type: []const u8, additional_headers: []Header) !?[]const u8 {
|
fn addHeaders(allocator: std.mem.Allocator, headers: *std.ArrayList(std.http.Header), host: []const u8, body: []const u8, content_type: []const u8, additional_headers: []const Header) !?[]const u8 {
|
||||||
|
// We don't need allocator and body because they were to add a
|
||||||
|
// Content-Length header. But that is being added by the client send()
|
||||||
|
// function, so we don't want it on the request twice. But I also feel
|
||||||
|
// pretty strongly that send() should be providing us control, because
|
||||||
|
// I think if we don't add it here, it won't get signed, and we would
|
||||||
|
// really prefer it to be signed. So, we will wait and watch for this
|
||||||
|
// situation to change in stdlib
|
||||||
|
_ = allocator;
|
||||||
|
_ = body;
|
||||||
var has_content_type = false;
|
var has_content_type = false;
|
||||||
for (additional_headers) |h| {
|
for (additional_headers) |h| {
|
||||||
if (std.ascii.eqlIgnoreCase(h.name, "Content-Type")) {
|
if (std.ascii.eqlIgnoreCase(h.name, "Content-Type")) {
|
||||||
|
@ -291,11 +267,6 @@ fn addHeaders(allocator: std.mem.Allocator, headers: *std.ArrayList(base.Header)
|
||||||
if (!has_content_type)
|
if (!has_content_type)
|
||||||
try headers.append(.{ .name = "Content-Type", .value = content_type });
|
try headers.append(.{ .name = "Content-Type", .value = content_type });
|
||||||
try headers.appendSlice(additional_headers);
|
try headers.appendSlice(additional_headers);
|
||||||
if (body.len > 0) {
|
|
||||||
const len = try std.fmt.allocPrint(allocator, "{d}", .{body.len});
|
|
||||||
try headers.append(.{ .name = "Content-Length", .value = len });
|
|
||||||
return len;
|
|
||||||
}
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,12 +7,12 @@ pub const Request = struct {
|
||||||
body: []const u8 = "",
|
body: []const u8 = "",
|
||||||
method: []const u8 = "POST",
|
method: []const u8 = "POST",
|
||||||
content_type: []const u8 = "application/json", // Can we get away with this?
|
content_type: []const u8 = "application/json", // Can we get away with this?
|
||||||
headers: []Header = &[_]Header{},
|
headers: []const std.http.Header = &.{},
|
||||||
};
|
};
|
||||||
pub const Result = struct {
|
pub const Result = struct {
|
||||||
response_code: u16, // actually 3 digits can fit in u10
|
response_code: u16, // actually 3 digits can fit in u10
|
||||||
body: []const u8,
|
body: []const u8,
|
||||||
headers: []Header,
|
headers: []const std.http.Header,
|
||||||
allocator: std.mem.Allocator,
|
allocator: std.mem.Allocator,
|
||||||
|
|
||||||
pub fn deinit(self: Result) void {
|
pub fn deinit(self: Result) void {
|
||||||
|
@ -26,8 +26,3 @@ pub const Result = struct {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const Header = struct {
|
|
||||||
name: []const u8,
|
|
||||||
value: []const u8,
|
|
||||||
};
|
|
||||||
|
|
|
@ -167,31 +167,45 @@ pub fn signRequest(allocator: std.mem.Allocator, request: base.Request, config:
|
||||||
var additional_header_count: u3 = 3;
|
var additional_header_count: u3 = 3;
|
||||||
if (config.credentials.session_token != null)
|
if (config.credentials.session_token != null)
|
||||||
additional_header_count += 1;
|
additional_header_count += 1;
|
||||||
const newheaders = try allocator.alloc(base.Header, rc.headers.len + additional_header_count);
|
if (config.signed_body_header == .none)
|
||||||
|
additional_header_count -= 1;
|
||||||
|
const newheaders = try allocator.alloc(std.http.Header, rc.headers.len + additional_header_count);
|
||||||
errdefer allocator.free(newheaders);
|
errdefer allocator.free(newheaders);
|
||||||
const oldheaders = rc.headers;
|
const oldheaders = rc.headers;
|
||||||
if (config.credentials.session_token) |t| {
|
if (config.credentials.session_token) |t| {
|
||||||
newheaders[newheaders.len - 4] = base.Header{
|
newheaders[newheaders.len - additional_header_count] = std.http.Header{
|
||||||
.name = "X-Amz-Security-Token",
|
.name = "X-Amz-Security-Token",
|
||||||
.value = try allocator.dupe(u8, t),
|
.value = try allocator.dupe(u8, t),
|
||||||
};
|
};
|
||||||
|
additional_header_count -= 1;
|
||||||
}
|
}
|
||||||
errdefer freeSignedRequest(allocator, &rc, config);
|
errdefer freeSignedRequest(allocator, &rc, config);
|
||||||
std.mem.copy(base.Header, newheaders, oldheaders);
|
@memcpy(newheaders[0..oldheaders.len], oldheaders);
|
||||||
newheaders[newheaders.len - 3] = base.Header{
|
newheaders[newheaders.len - additional_header_count] = std.http.Header{
|
||||||
.name = "X-Amz-Date",
|
.name = "X-Amz-Date",
|
||||||
.value = signing_iso8601,
|
.value = signing_iso8601,
|
||||||
};
|
};
|
||||||
|
additional_header_count -= 1;
|
||||||
|
|
||||||
|
// We always need the sha256 of the payload for the signature,
|
||||||
|
// regardless of whether we're sticking the header on the request
|
||||||
|
std.debug.assert(config.signed_body_header == .none or
|
||||||
|
config.signed_body_header == .sha256);
|
||||||
|
const payload_hash = try hash(allocator, request.body, .sha256);
|
||||||
|
if (config.signed_body_header == .sha256) {
|
||||||
// From the AWS nitro enclaves SDK, it appears that there is no reason
|
// From the AWS nitro enclaves SDK, it appears that there is no reason
|
||||||
// to avoid *ALWAYS* adding the x-amz-content-sha256 header
|
// to avoid *ALWAYS* adding the x-amz-content-sha256 header
|
||||||
// https://github.com/aws/aws-nitro-enclaves-sdk-c/blob/9ecb83d07fe953636e3c0b861d6dac0a15d00f82/source/rest.c#L464
|
// https://github.com/aws/aws-nitro-enclaves-sdk-c/blob/9ecb83d07fe953636e3c0b861d6dac0a15d00f82/source/rest.c#L464
|
||||||
const payload_hash = try hash(allocator, request.body, config.signed_body_header);
|
// However, for signature verification, we need to accomodate clients that
|
||||||
|
// may not add this header
|
||||||
// This will be freed in freeSignedRequest
|
// This will be freed in freeSignedRequest
|
||||||
// defer allocator.free(payload_hash);
|
// defer allocator.free(payload_hash);
|
||||||
newheaders[newheaders.len - 2] = base.Header{
|
newheaders[newheaders.len - additional_header_count] = std.http.Header{
|
||||||
.name = "x-amz-content-sha256",
|
.name = "x-amz-content-sha256",
|
||||||
.value = payload_hash,
|
.value = payload_hash,
|
||||||
};
|
};
|
||||||
|
additional_header_count -= 1;
|
||||||
|
}
|
||||||
|
|
||||||
rc.headers = newheaders[0 .. newheaders.len - 1];
|
rc.headers = newheaders[0 .. newheaders.len - 1];
|
||||||
log.debug("Signing with access key: {s}", .{config.credentials.access_key});
|
log.debug("Signing with access key: {s}", .{config.credentials.access_key});
|
||||||
|
@ -245,7 +259,7 @@ pub fn signRequest(allocator: std.mem.Allocator, request: base.Request, config:
|
||||||
|
|
||||||
const signature = try hmac(allocator, signing_key, string_to_sign);
|
const signature = try hmac(allocator, signing_key, string_to_sign);
|
||||||
defer allocator.free(signature);
|
defer allocator.free(signature);
|
||||||
newheaders[newheaders.len - 1] = base.Header{
|
newheaders[newheaders.len - 1] = std.http.Header{
|
||||||
.name = "Authorization",
|
.name = "Authorization",
|
||||||
.value = try std.fmt.allocPrint(
|
.value = try std.fmt.allocPrint(
|
||||||
allocator,
|
allocator,
|
||||||
|
@ -285,24 +299,51 @@ pub fn freeSignedRequest(allocator: std.mem.Allocator, request: *base.Request, c
|
||||||
|
|
||||||
pub const credentialsFn = *const fn ([]const u8) ?Credentials;
|
pub const credentialsFn = *const fn ([]const u8) ?Credentials;
|
||||||
|
|
||||||
pub fn verifyServerRequest(allocator: std.mem.Allocator, request: std.http.Server.Request, request_body_reader: anytype, credentials_fn: credentialsFn) !bool {
|
pub fn verifyServerRequest(allocator: std.mem.Allocator, request: *std.http.Server.Request, request_body_reader: anytype, credentials_fn: credentialsFn) !bool {
|
||||||
const unverified_request = UnverifiedRequest{
|
var unverified_request = try UnverifiedRequest.init(allocator, request);
|
||||||
.headers = request.headers,
|
defer unverified_request.deinit();
|
||||||
.target = request.target,
|
|
||||||
.method = request.method,
|
|
||||||
};
|
|
||||||
return verify(allocator, unverified_request, request_body_reader, credentials_fn);
|
return verify(allocator, unverified_request, request_body_reader, credentials_fn);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const UnverifiedRequest = struct {
|
pub const UnverifiedRequest = struct {
|
||||||
headers: std.http.Headers,
|
headers: []std.http.Header,
|
||||||
target: []const u8,
|
target: []const u8,
|
||||||
method: std.http.Method,
|
method: std.http.Method,
|
||||||
|
allocator: std.mem.Allocator,
|
||||||
|
|
||||||
|
pub fn init(allocator: std.mem.Allocator, request: *std.http.Server.Request) !UnverifiedRequest {
|
||||||
|
var al = std.ArrayList(std.http.Header).init(allocator);
|
||||||
|
defer al.deinit();
|
||||||
|
var it = request.iterateHeaders();
|
||||||
|
while (it.next()) |h| try al.append(h);
|
||||||
|
return .{
|
||||||
|
.target = request.head.target,
|
||||||
|
.method = request.head.method,
|
||||||
|
.headers = try al.toOwnedSlice(),
|
||||||
|
.allocator = allocator,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn getFirstHeaderValue(self: UnverifiedRequest, name: []const u8) ?[]const u8 {
|
||||||
|
for (self.headers) |*h| {
|
||||||
|
if (std.ascii.eqlIgnoreCase(name, h.name))
|
||||||
|
return h.value; // I don't think this is the whole story here, but should suffice for now
|
||||||
|
// We need to return the value before the first ';' IIRC
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: *UnverifiedRequest) void {
|
||||||
|
self.allocator.free(self.headers);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_body_reader: anytype, credentials_fn: credentialsFn) !bool {
|
pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_body_reader: anytype, credentials_fn: credentialsFn) !bool {
|
||||||
|
var arena = std.heap.ArenaAllocator.init(allocator);
|
||||||
|
defer arena.deinit();
|
||||||
|
const aa = arena.allocator();
|
||||||
// Authorization: AWS4-HMAC-SHA256 Credential=ACCESS/20230908/us-west-2/s3/aws4_request, SignedHeaders=accept;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class, Signature=fcc43ce73a34c9bd1ddf17e8a435f46a859812822f944f9eeb2aabcd64b03523
|
// Authorization: AWS4-HMAC-SHA256 Credential=ACCESS/20230908/us-west-2/s3/aws4_request, SignedHeaders=accept;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class, Signature=fcc43ce73a34c9bd1ddf17e8a435f46a859812822f944f9eeb2aabcd64b03523
|
||||||
const auth_header_or_null = request.headers.getFirstValue("Authorization");
|
const auth_header_or_null = request.getFirstHeaderValue("Authorization");
|
||||||
const auth_header = if (auth_header_or_null) |a| a else return error.AuthorizationHeaderMissing;
|
const auth_header = if (auth_header_or_null) |a| a else return error.AuthorizationHeaderMissing;
|
||||||
if (!std.mem.startsWith(u8, auth_header, "AWS4-HMAC-SHA256")) return error.UnsupportedAuthorizationType;
|
if (!std.mem.startsWith(u8, auth_header, "AWS4-HMAC-SHA256")) return error.UnsupportedAuthorizationType;
|
||||||
var credential: ?[]const u8 = null;
|
var credential: ?[]const u8 = null;
|
||||||
|
@ -328,7 +369,7 @@ pub fn verify(allocator: std.mem.Allocator, request: UnverifiedRequest, request_
|
||||||
if (signed_headers == null) return error.AuthorizationHeaderMissingSignedHeaders;
|
if (signed_headers == null) return error.AuthorizationHeaderMissingSignedHeaders;
|
||||||
if (signature == null) return error.AuthorizationHeaderMissingSignature;
|
if (signature == null) return error.AuthorizationHeaderMissingSignature;
|
||||||
return verifyParsedAuthorization(
|
return verifyParsedAuthorization(
|
||||||
allocator,
|
aa,
|
||||||
request,
|
request,
|
||||||
request_body_reader,
|
request_body_reader,
|
||||||
credential.?,
|
credential.?,
|
||||||
|
@ -356,30 +397,32 @@ fn verifyParsedAuthorization(
|
||||||
const credentials = credentials_fn(access_key) orelse return error.CredentialsNotFound;
|
const credentials = credentials_fn(access_key) orelse return error.CredentialsNotFound;
|
||||||
// TODO: https://stackoverflow.com/questions/29276609/aws-authentication-requires-a-valid-date-or-x-amz-date-header-curl
|
// TODO: https://stackoverflow.com/questions/29276609/aws-authentication-requires-a-valid-date-or-x-amz-date-header-curl
|
||||||
// For now I want to see this test pass
|
// For now I want to see this test pass
|
||||||
const normalized_iso_date = request.headers.getFirstValue("x-amz-date") orelse
|
const normalized_iso_date = request.getFirstHeaderValue("x-amz-date") orelse
|
||||||
request.headers.getFirstValue("Date").?;
|
request.getFirstHeaderValue("Date").?;
|
||||||
log.debug("Got date: {s}", .{normalized_iso_date});
|
log.debug("Got date: {s}", .{normalized_iso_date});
|
||||||
_ = credential_iterator.next().?; // skip the date...I don't think we need this
|
_ = credential_iterator.next().?; // skip the date...I don't think we need this
|
||||||
const region = credential_iterator.next().?;
|
const region = credential_iterator.next().?;
|
||||||
const service = credential_iterator.next().?;
|
const service = credential_iterator.next().?;
|
||||||
const aws4_request = credential_iterator.next().?;
|
const aws4_request = credential_iterator.next().?;
|
||||||
if (!std.mem.eql(u8, aws4_request, "aws4_request")) return error.UnexpectedCredentialValue;
|
if (!std.mem.eql(u8, aws4_request, "aws4_request")) return error.UnexpectedCredentialValue;
|
||||||
const config = Config{
|
var config = Config{
|
||||||
.service = service,
|
.service = service,
|
||||||
.credentials = credentials,
|
.credentials = credentials,
|
||||||
.region = region,
|
.region = region,
|
||||||
.algorithm = .v4,
|
.algorithm = .v4,
|
||||||
.signature_type = .headers,
|
.signature_type = .headers,
|
||||||
.signed_body_header = .sha256,
|
.signed_body_header = .none,
|
||||||
.expiration_in_seconds = 0,
|
.expiration_in_seconds = 0,
|
||||||
.signing_time = try date.dateTimeToTimestamp(try date.parseIso8601ToDateTime(normalized_iso_date)),
|
.signing_time = try date.dateTimeToTimestamp(try date.parseIso8601ToDateTime(normalized_iso_date)),
|
||||||
};
|
};
|
||||||
|
|
||||||
var headers = try allocator.alloc(base.Header, std.mem.count(u8, signed_headers, ";") + 1);
|
var headers = try allocator.alloc(std.http.Header, std.mem.count(u8, signed_headers, ";") + 1);
|
||||||
defer allocator.free(headers);
|
defer allocator.free(headers);
|
||||||
var signed_headers_iterator = std.mem.splitSequence(u8, signed_headers, ";");
|
var signed_headers_iterator = std.mem.splitSequence(u8, signed_headers, ";");
|
||||||
var inx: usize = 0;
|
var inx: usize = 0;
|
||||||
while (signed_headers_iterator.next()) |signed_header| {
|
while (signed_headers_iterator.next()) |signed_header| {
|
||||||
|
if (std.ascii.eqlIgnoreCase(signed_header, "x-amz-content-sha256"))
|
||||||
|
config.signed_body_header = .sha256;
|
||||||
var is_forbidden = false;
|
var is_forbidden = false;
|
||||||
inline for (forbidden_headers) |forbidden| {
|
inline for (forbidden_headers) |forbidden| {
|
||||||
if (std.ascii.eqlIgnoreCase(forbidden.name, signed_header)) {
|
if (std.ascii.eqlIgnoreCase(forbidden.name, signed_header)) {
|
||||||
|
@ -390,7 +433,7 @@ fn verifyParsedAuthorization(
|
||||||
if (is_forbidden) continue;
|
if (is_forbidden) continue;
|
||||||
headers[inx] = .{
|
headers[inx] = .{
|
||||||
.name = signed_header,
|
.name = signed_header,
|
||||||
.value = request.headers.getFirstValue(signed_header).?,
|
.value = request.getFirstHeaderValue(signed_header).?,
|
||||||
};
|
};
|
||||||
inx += 1;
|
inx += 1;
|
||||||
}
|
}
|
||||||
|
@ -399,7 +442,7 @@ fn verifyParsedAuthorization(
|
||||||
.path = target_iterator.first(),
|
.path = target_iterator.first(),
|
||||||
.headers = headers[0..inx],
|
.headers = headers[0..inx],
|
||||||
.method = @tagName(request.method),
|
.method = @tagName(request.method),
|
||||||
.content_type = request.headers.getFirstValue("content-type").?,
|
.content_type = request.getFirstHeaderValue("content-type").?,
|
||||||
};
|
};
|
||||||
signed_request.query = request.target[signed_request.path.len..]; // TODO: should this be +1? query here would include '?'
|
signed_request.query = request.target[signed_request.path.len..]; // TODO: should this be +1? query here would include '?'
|
||||||
signed_request.body = try request_body_reader.readAllAlloc(allocator, std.math.maxInt(usize));
|
signed_request.body = try request_body_reader.readAllAlloc(allocator, std.math.maxInt(usize));
|
||||||
|
@ -427,6 +470,11 @@ fn verifySignedRequest(signed_request: base.Request, signature: []const u8) !boo
|
||||||
}
|
}
|
||||||
break :blk null;
|
break :blk null;
|
||||||
};
|
};
|
||||||
|
log.debug(
|
||||||
|
\\Signature Verification
|
||||||
|
\\Request Signature: {s}
|
||||||
|
\\Calculated Signat: {s}
|
||||||
|
, .{ signature, calculated_signature.? });
|
||||||
return std.mem.eql(u8, signature, calculated_signature.?);
|
return std.mem.eql(u8, signature, calculated_signature.?);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -463,7 +511,6 @@ fn getSigningKey(allocator: std.mem.Allocator, signing_date: []const u8, config:
|
||||||
}
|
}
|
||||||
fn validateConfig(config: Config) SigningError!void {
|
fn validateConfig(config: Config) SigningError!void {
|
||||||
if (config.signature_type != .headers or
|
if (config.signature_type != .headers or
|
||||||
config.signed_body_header != .sha256 or
|
|
||||||
config.expiration_in_seconds != 0 or
|
config.expiration_in_seconds != 0 or
|
||||||
config.algorithm != .v4)
|
config.algorithm != .v4)
|
||||||
return SigningError.NotImplemented;
|
return SigningError.NotImplemented;
|
||||||
|
@ -519,7 +566,7 @@ fn createCanonicalRequest(allocator: std.mem.Allocator, request: base.Request, p
|
||||||
});
|
});
|
||||||
errdefer allocator.free(canonical_request);
|
errdefer allocator.free(canonical_request);
|
||||||
log.debug("Canonical_request (just calculated):\n{s}", .{canonical_request});
|
log.debug("Canonical_request (just calculated):\n{s}", .{canonical_request});
|
||||||
const hashed = try hash(allocator, canonical_request, config.signed_body_header);
|
const hashed = try hash(allocator, canonical_request, .sha256);
|
||||||
return Hashed{
|
return Hashed{
|
||||||
.arr = canonical_request,
|
.arr = canonical_request,
|
||||||
.hash = hashed,
|
.hash = hashed,
|
||||||
|
@ -757,7 +804,7 @@ const CanonicalHeaders = struct {
|
||||||
str: []const u8,
|
str: []const u8,
|
||||||
signed_headers: []const u8,
|
signed_headers: []const u8,
|
||||||
};
|
};
|
||||||
fn canonicalHeaders(allocator: std.mem.Allocator, headers: []base.Header, service: []const u8) !CanonicalHeaders {
|
fn canonicalHeaders(allocator: std.mem.Allocator, headers: []const std.http.Header, service: []const u8) !CanonicalHeaders {
|
||||||
//
|
//
|
||||||
// Doc example. Original:
|
// Doc example. Original:
|
||||||
//
|
//
|
||||||
|
@ -773,7 +820,7 @@ fn canonicalHeaders(allocator: std.mem.Allocator, headers: []base.Header, servic
|
||||||
// my-header1:a b c\n
|
// my-header1:a b c\n
|
||||||
// my-header2:"a b c"\n
|
// my-header2:"a b c"\n
|
||||||
// x-amz-date:20150830T123600Z\n
|
// x-amz-date:20150830T123600Z\n
|
||||||
var dest = try std.ArrayList(base.Header).initCapacity(allocator, headers.len);
|
var dest = try std.ArrayList(std.http.Header).initCapacity(allocator, headers.len);
|
||||||
defer {
|
defer {
|
||||||
for (dest.items) |h| {
|
for (dest.items) |h| {
|
||||||
allocator.free(h.name);
|
allocator.free(h.name);
|
||||||
|
@ -812,7 +859,7 @@ fn canonicalHeaders(allocator: std.mem.Allocator, headers: []base.Header, servic
|
||||||
try dest.append(.{ .name = n, .value = v });
|
try dest.append(.{ .name = n, .value = v });
|
||||||
}
|
}
|
||||||
|
|
||||||
std.sort.pdq(base.Header, dest.items, {}, lessThan);
|
std.sort.pdq(std.http.Header, dest.items, {}, lessThan);
|
||||||
|
|
||||||
var dest_str = try std.ArrayList(u8).initCapacity(allocator, total_len);
|
var dest_str = try std.ArrayList(u8).initCapacity(allocator, total_len);
|
||||||
defer dest_str.deinit();
|
defer dest_str.deinit();
|
||||||
|
@ -860,7 +907,7 @@ fn canonicalHeaderValue(allocator: std.mem.Allocator, value: []const u8) ![]cons
|
||||||
_ = allocator.resize(rc, rc_inx);
|
_ = allocator.resize(rc, rc_inx);
|
||||||
return rc[0..rc_inx];
|
return rc[0..rc_inx];
|
||||||
}
|
}
|
||||||
fn lessThan(context: void, lhs: base.Header, rhs: base.Header) bool {
|
fn lessThan(context: void, lhs: std.http.Header, rhs: std.http.Header) bool {
|
||||||
_ = context;
|
_ = context;
|
||||||
return std.ascii.lessThanIgnoreCase(lhs.name, rhs.name);
|
return std.ascii.lessThanIgnoreCase(lhs.name, rhs.name);
|
||||||
}
|
}
|
||||||
|
@ -912,7 +959,7 @@ test "canonical query" {
|
||||||
}
|
}
|
||||||
test "canonical headers" {
|
test "canonical headers" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var headers = try std.ArrayList(base.Header).initCapacity(allocator, 5);
|
var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5);
|
||||||
defer headers.deinit();
|
defer headers.deinit();
|
||||||
try headers.append(.{ .name = "Host", .value = "iam.amazonaws.com" });
|
try headers.append(.{ .name = "Host", .value = "iam.amazonaws.com" });
|
||||||
try headers.append(.{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" });
|
try headers.append(.{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" });
|
||||||
|
@ -937,7 +984,7 @@ test "canonical headers" {
|
||||||
|
|
||||||
test "canonical request" {
|
test "canonical request" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var headers = try std.ArrayList(base.Header).initCapacity(allocator, 5);
|
var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5);
|
||||||
defer headers.deinit();
|
defer headers.deinit();
|
||||||
try headers.append(.{ .name = "User-agent", .value = "c sdk v1.0" });
|
try headers.append(.{ .name = "User-agent", .value = "c sdk v1.0" });
|
||||||
// In contrast to AWS CRT (aws-c-auth), we add the date as part of the
|
// In contrast to AWS CRT (aws-c-auth), we add the date as part of the
|
||||||
|
@ -997,7 +1044,7 @@ test "can sign" {
|
||||||
// [debug] (awshttp): Content-Length: 43
|
// [debug] (awshttp): Content-Length: 43
|
||||||
|
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var headers = try std.ArrayList(base.Header).initCapacity(allocator, 5);
|
var headers = try std.ArrayList(std.http.Header).initCapacity(allocator, 5);
|
||||||
defer headers.deinit();
|
defer headers.deinit();
|
||||||
try headers.append(.{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" });
|
try headers.append(.{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" });
|
||||||
try headers.append(.{ .name = "Content-Length", .value = "13" });
|
try headers.append(.{ .name = "Content-Length", .value = "13" });
|
||||||
|
@ -1054,34 +1101,39 @@ test "can verify server request" {
|
||||||
test_credential = Credentials.init(allocator, access_key, secret_key, null);
|
test_credential = Credentials.init(allocator, access_key, secret_key, null);
|
||||||
defer test_credential.?.deinit();
|
defer test_credential.?.deinit();
|
||||||
|
|
||||||
var headers = std.http.Headers.init(allocator);
|
const req =
|
||||||
defer headers.deinit();
|
"PUT /mysfitszj3t6webstack-hostingbucketa91a61fe-1ep3ezkgwpxr0/i/am/a/teapot/foo?x-id=PutObject HTTP/1.1\r\n" ++
|
||||||
try headers.append("Connection", "keep-alive");
|
"Connection: keep-alive\r\n" ++
|
||||||
try headers.append("Accept-Encoding", "gzip, deflate, zstd");
|
"Accept-Encoding: gzip, deflate, zstd\r\n" ++
|
||||||
try headers.append("TE", "gzip, deflate, trailers");
|
"TE: gzip, deflate, trailers\r\n" ++
|
||||||
try headers.append("Accept", "application/json");
|
"Accept: application/json\r\n" ++
|
||||||
try headers.append("Host", "127.0.0.1");
|
"Host: 127.0.0.1\r\n" ++
|
||||||
try headers.append("User-Agent", "zig-aws 1.0");
|
"User-Agent: zig-aws 1.0\r\n" ++
|
||||||
try headers.append("Content-Type", "text/plain");
|
"Content-Type: text/plain\r\n" ++
|
||||||
try headers.append("x-amz-storage-class", "STANDARD");
|
"x-amz-storage-class: STANDARD\r\n" ++
|
||||||
try headers.append("Content-Length", "3");
|
"Content-Length: 3\r\n" ++
|
||||||
try headers.append("X-Amz-Date", "20230908T170252Z");
|
"X-Amz-Date: 20230908T170252Z\r\n" ++
|
||||||
try headers.append("x-amz-content-sha256", "fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9");
|
"x-amz-content-sha256: fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9\r\n" ++
|
||||||
try headers.append("Authorization", "AWS4-HMAC-SHA256 Credential=ACCESS/20230908/us-west-2/s3/aws4_request, SignedHeaders=accept;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class, Signature=fcc43ce73a34c9bd1ddf17e8a435f46a859812822f944f9eeb2aabcd64b03523");
|
"Authorization: AWS4-HMAC-SHA256 Credential=ACCESS/20230908/us-west-2/s3/aws4_request, SignedHeaders=accept;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class, Signature=fcc43ce73a34c9bd1ddf17e8a435f46a859812822f944f9eeb2aabcd64b03523\r\n\r\nbar";
|
||||||
|
var read_buffer: [1024]u8 = undefined;
|
||||||
var buf = "bar".*;
|
@memcpy(read_buffer[0..req.len], req);
|
||||||
var fis = std.io.fixedBufferStream(&buf);
|
var server: std.http.Server = .{
|
||||||
const request = std.http.Server.Request{
|
.connection = undefined,
|
||||||
.method = std.http.Method.PUT,
|
.state = .ready,
|
||||||
.target = "/mysfitszj3t6webstack-hostingbucketa91a61fe-1ep3ezkgwpxr0/i/am/a/teapot/foo?x-id=PutObject",
|
.read_buffer = &read_buffer,
|
||||||
.version = .@"HTTP/1.1",
|
.read_buffer_len = req.len,
|
||||||
.content_length = 3,
|
.next_request_start = 0,
|
||||||
.headers = headers,
|
};
|
||||||
.parser = std.http.protocol.HeadersParser.initDynamic(std.math.maxInt(usize)),
|
var request: std.http.Server.Request = .{
|
||||||
|
.server = &server,
|
||||||
|
.head_end = req.len - 3,
|
||||||
|
.head = try std.http.Server.Request.Head.parse(read_buffer[0 .. req.len - 3]),
|
||||||
|
.reader_state = undefined,
|
||||||
};
|
};
|
||||||
|
|
||||||
// std.testing.log_level = .debug;
|
// std.testing.log_level = .debug;
|
||||||
try std.testing.expect(try verifyServerRequest(allocator, request, fis.reader(), struct {
|
var fbs = std.io.fixedBufferStream("bar");
|
||||||
|
try std.testing.expect(try verifyServerRequest(allocator, &request, fbs.reader(), struct {
|
||||||
cred: Credentials,
|
cred: Credentials,
|
||||||
|
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
|
@ -1091,3 +1143,105 @@ test "can verify server request" {
|
||||||
}
|
}
|
||||||
}.getCreds));
|
}.getCreds));
|
||||||
}
|
}
|
||||||
|
test "can verify server request without x-amz-content-sha256" {
|
||||||
|
const allocator = std.testing.allocator;
|
||||||
|
|
||||||
|
const access_key = try allocator.dupe(u8, "ACCESS");
|
||||||
|
const secret_key = try allocator.dupe(u8, "SECRET");
|
||||||
|
test_credential = Credentials.init(allocator, access_key, secret_key, null);
|
||||||
|
defer test_credential.?.deinit();
|
||||||
|
|
||||||
|
const head =
|
||||||
|
"POST / HTTP/1.1\r\n" ++
|
||||||
|
"Connection: keep-alive\r\n" ++
|
||||||
|
"Accept-Encoding: gzip, deflate, zstd\r\n" ++
|
||||||
|
"TE: gzip, deflate, trailers\r\n" ++
|
||||||
|
"Accept: application/json\r\n" ++
|
||||||
|
"X-Amz-Target: DynamoDB_20120810.CreateTable\r\n" ++
|
||||||
|
"Host: dynamodb.us-west-2.amazonaws.com\r\n" ++
|
||||||
|
"User-Agent: zig-aws 1.0\r\n" ++
|
||||||
|
"Content-Type: application/x-amz-json-1.0\r\n" ++
|
||||||
|
"Content-Length: 403\r\n" ++
|
||||||
|
"X-Amz-Date: 20240224T154944Z\r\n" ++
|
||||||
|
"x-amz-content-sha256: fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9\r\n" ++
|
||||||
|
"Authorization: AWS4-HMAC-SHA256 Credential=ACCESS/20240224/us-west-2/dynamodb/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-amz-target, Signature=8fd23dc7dbcb36c4aa54207a7118f8b9fcd680da73a0590b498e9577ff68ec33\r\n\r\n";
|
||||||
|
const body =
|
||||||
|
\\{"AttributeDefinitions": [{"AttributeName": "Artist", "AttributeType": "S"}, {"AttributeName": "SongTitle", "AttributeType": "S"}], "TableName": "MusicCollection", "KeySchema": [{"AttributeName": "Artist", "KeyType": "HASH"}, {"AttributeName": "SongTitle", "KeyType": "RANGE"}], "ProvisionedThroughput": {"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, "Tags": [{"Key": "Owner", "Value": "blueTeam"}]}
|
||||||
|
;
|
||||||
|
const req_data = head ++ body;
|
||||||
|
var read_buffer: [2048]u8 = undefined;
|
||||||
|
@memcpy(read_buffer[0..req_data.len], req_data);
|
||||||
|
var server: std.http.Server = .{
|
||||||
|
.connection = undefined,
|
||||||
|
.state = .ready,
|
||||||
|
.read_buffer = &read_buffer,
|
||||||
|
.read_buffer_len = req_data.len,
|
||||||
|
.next_request_start = 0,
|
||||||
|
};
|
||||||
|
var request: std.http.Server.Request = .{
|
||||||
|
.server = &server,
|
||||||
|
.head_end = head.len,
|
||||||
|
.head = try std.http.Server.Request.Head.parse(read_buffer[0..head.len]),
|
||||||
|
.reader_state = undefined,
|
||||||
|
};
|
||||||
|
{
|
||||||
|
var h = std.ArrayList(std.http.Header).init(allocator);
|
||||||
|
defer h.deinit();
|
||||||
|
const signed_headers = &[_][]const u8{ "content-type", "host", "x-amz-date", "x-amz-target" };
|
||||||
|
var it = request.iterateHeaders();
|
||||||
|
while (it.next()) |source| {
|
||||||
|
var match = false;
|
||||||
|
for (signed_headers) |s| {
|
||||||
|
match = std.ascii.eqlIgnoreCase(s, source.name);
|
||||||
|
if (match) break;
|
||||||
|
}
|
||||||
|
if (match) try h.append(.{ .name = source.name, .value = source.value });
|
||||||
|
}
|
||||||
|
const req = base.Request{
|
||||||
|
.path = "/",
|
||||||
|
.method = "POST",
|
||||||
|
.headers = h.items,
|
||||||
|
};
|
||||||
|
const body_hash = try hash(allocator, body, .sha256);
|
||||||
|
defer allocator.free(body_hash);
|
||||||
|
try std.testing.expectEqualStrings("ebc5118b053c75178df0aa1f10d0443f5efb527a5589df943635834016c9b3bc", body_hash);
|
||||||
|
const canonical_request = try createCanonicalRequest(allocator, req, body_hash, .{
|
||||||
|
.region = "us-west-2",
|
||||||
|
.service = "dynamodb", // service
|
||||||
|
.credentials = test_credential.?,
|
||||||
|
.signing_time = 1708789784, // 20240224T154944Z (https://www.unixtimestamp.com)
|
||||||
|
});
|
||||||
|
defer allocator.free(canonical_request.arr);
|
||||||
|
defer allocator.free(canonical_request.hash);
|
||||||
|
defer allocator.free(canonical_request.headers.str);
|
||||||
|
defer allocator.free(canonical_request.headers.signed_headers);
|
||||||
|
// Canonical request:
|
||||||
|
const expected =
|
||||||
|
\\POST
|
||||||
|
\\/
|
||||||
|
\\
|
||||||
|
\\content-type:application/x-amz-json-1.0
|
||||||
|
\\host:dynamodb.us-west-2.amazonaws.com
|
||||||
|
\\x-amz-date:20240224T154944Z
|
||||||
|
\\x-amz-target:DynamoDB_20120810.CreateTable
|
||||||
|
\\
|
||||||
|
\\content-type;host;x-amz-date;x-amz-target
|
||||||
|
\\ebc5118b053c75178df0aa1f10d0443f5efb527a5589df943635834016c9b3bc
|
||||||
|
;
|
||||||
|
try std.testing.expectEqualStrings(expected, canonical_request.arr);
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // verification
|
||||||
|
var fis = std.io.fixedBufferStream(body[0..]);
|
||||||
|
|
||||||
|
try std.testing.expect(try verifyServerRequest(allocator, &request, fis.reader(), struct {
|
||||||
|
cred: Credentials,
|
||||||
|
|
||||||
|
const Self = @This();
|
||||||
|
fn getCreds(access: []const u8) ?Credentials {
|
||||||
|
if (std.mem.eql(u8, access, "ACCESS")) return test_credential.?;
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}.getCreds));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,155 +0,0 @@
|
||||||
const std = @import("std");
|
|
||||||
const Uri = std.Uri;
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////
|
|
||||||
/// This function imported from:
|
|
||||||
/// https://github.com/ziglang/zig/blob/0.11.0/lib/std/http/Client.zig#L538-L636
|
|
||||||
///
|
|
||||||
/// The first commit of this file will be unchanged from 0.11.0 to more
|
|
||||||
/// clearly indicate changes moving forward. The plan is to change
|
|
||||||
/// only the two w.print lines for req.uri 16 and 18 lines down from this comment
|
|
||||||
///////////////////////////////////////////////////////////////////////////
|
|
||||||
/// Send the request to the server.
|
|
||||||
pub fn start(req: *std.http.Client.Request) std.http.Client.Request.StartError!void {
|
|
||||||
var buffered = std.io.bufferedWriter(req.connection.?.data.writer());
|
|
||||||
const w = buffered.writer();
|
|
||||||
|
|
||||||
try w.writeAll(@tagName(req.method));
|
|
||||||
try w.writeByte(' ');
|
|
||||||
|
|
||||||
if (req.method == .CONNECT) {
|
|
||||||
try w.writeAll(req.uri.host.?);
|
|
||||||
try w.writeByte(':');
|
|
||||||
try w.print("{}", .{req.uri.port.?});
|
|
||||||
} else if (req.connection.?.data.proxied) {
|
|
||||||
// proxied connections require the full uri
|
|
||||||
try format(req.uri, "+/", .{}, w);
|
|
||||||
} else {
|
|
||||||
try format(req.uri, "/", .{}, w);
|
|
||||||
}
|
|
||||||
|
|
||||||
try w.writeByte(' ');
|
|
||||||
try w.writeAll(@tagName(req.version));
|
|
||||||
try w.writeAll("\r\n");
|
|
||||||
|
|
||||||
if (!req.headers.contains("host")) {
|
|
||||||
try w.writeAll("Host: ");
|
|
||||||
try w.writeAll(req.uri.host.?);
|
|
||||||
try w.writeAll("\r\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!req.headers.contains("user-agent")) {
|
|
||||||
try w.writeAll("User-Agent: zig/");
|
|
||||||
try w.writeAll(@import("builtin").zig_version_string);
|
|
||||||
try w.writeAll(" (std.http)\r\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!req.headers.contains("connection")) {
|
|
||||||
try w.writeAll("Connection: keep-alive\r\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!req.headers.contains("accept-encoding")) {
|
|
||||||
try w.writeAll("Accept-Encoding: gzip, deflate, zstd\r\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!req.headers.contains("te")) {
|
|
||||||
try w.writeAll("TE: gzip, deflate, trailers\r\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
const has_transfer_encoding = req.headers.contains("transfer-encoding");
|
|
||||||
const has_content_length = req.headers.contains("content-length");
|
|
||||||
|
|
||||||
if (!has_transfer_encoding and !has_content_length) {
|
|
||||||
switch (req.transfer_encoding) {
|
|
||||||
.chunked => try w.writeAll("Transfer-Encoding: chunked\r\n"),
|
|
||||||
.content_length => |content_length| try w.print("Content-Length: {d}\r\n", .{content_length}),
|
|
||||||
.none => {},
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (has_content_length) {
|
|
||||||
const content_length = std.fmt.parseInt(u64, req.headers.getFirstValue("content-length").?, 10) catch return error.InvalidContentLength;
|
|
||||||
|
|
||||||
req.transfer_encoding = .{ .content_length = content_length };
|
|
||||||
} else if (has_transfer_encoding) {
|
|
||||||
const transfer_encoding = req.headers.getFirstValue("transfer-encoding").?;
|
|
||||||
if (std.mem.eql(u8, transfer_encoding, "chunked")) {
|
|
||||||
req.transfer_encoding = .chunked;
|
|
||||||
} else {
|
|
||||||
return error.UnsupportedTransferEncoding;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
req.transfer_encoding = .none;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
try w.print("{}", .{req.headers});
|
|
||||||
|
|
||||||
try w.writeAll("\r\n");
|
|
||||||
|
|
||||||
try buffered.flush();
|
|
||||||
}
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////
|
|
||||||
/// This function imported from:
|
|
||||||
/// https://github.com/ziglang/zig/blob/0.11.0/lib/std/Uri.zig#L209-L264
|
|
||||||
///
|
|
||||||
/// The first commit of this file will be unchanged from 0.11.0 to more
|
|
||||||
/// clearly indicate changes moving forward. The plan is to change
|
|
||||||
/// only the writeEscapedPath call 42 lines down from this comment
|
|
||||||
///////////////////////////////////////////////////////////////////////////
|
|
||||||
pub fn format(
|
|
||||||
uri: Uri,
|
|
||||||
comptime fmt: []const u8,
|
|
||||||
options: std.fmt.FormatOptions,
|
|
||||||
writer: anytype,
|
|
||||||
) @TypeOf(writer).Error!void {
|
|
||||||
_ = options;
|
|
||||||
|
|
||||||
const needs_absolute = comptime std.mem.indexOf(u8, fmt, "+") != null;
|
|
||||||
const needs_path = comptime std.mem.indexOf(u8, fmt, "/") != null or fmt.len == 0;
|
|
||||||
const needs_fragment = comptime std.mem.indexOf(u8, fmt, "#") != null;
|
|
||||||
|
|
||||||
if (needs_absolute) {
|
|
||||||
try writer.writeAll(uri.scheme);
|
|
||||||
try writer.writeAll(":");
|
|
||||||
if (uri.host) |host| {
|
|
||||||
try writer.writeAll("//");
|
|
||||||
|
|
||||||
if (uri.user) |user| {
|
|
||||||
try writer.writeAll(user);
|
|
||||||
if (uri.password) |password| {
|
|
||||||
try writer.writeAll(":");
|
|
||||||
try writer.writeAll(password);
|
|
||||||
}
|
|
||||||
try writer.writeAll("@");
|
|
||||||
}
|
|
||||||
|
|
||||||
try writer.writeAll(host);
|
|
||||||
|
|
||||||
if (uri.port) |port| {
|
|
||||||
try writer.writeAll(":");
|
|
||||||
try std.fmt.formatInt(port, 10, .lower, .{}, writer);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (needs_path) {
|
|
||||||
if (uri.path.len == 0) {
|
|
||||||
try writer.writeAll("/");
|
|
||||||
} else {
|
|
||||||
try writer.writeAll(uri.path); // do not mess with our path
|
|
||||||
}
|
|
||||||
|
|
||||||
if (uri.query) |q| {
|
|
||||||
try writer.writeAll("?");
|
|
||||||
try Uri.writeEscapedQuery(writer, q);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (needs_fragment) {
|
|
||||||
if (uri.fragment) |f| {
|
|
||||||
try writer.writeAll("#");
|
|
||||||
try Uri.writeEscapedQuery(writer, f);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
16
src/json.zig
16
src/json.zig
|
@ -1762,7 +1762,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
var r: T = undefined;
|
var r: T = undefined;
|
||||||
const source_slice = stringToken.slice(tokens.slice, tokens.i - 1);
|
const source_slice = stringToken.slice(tokens.slice, tokens.i - 1);
|
||||||
switch (stringToken.escapes) {
|
switch (stringToken.escapes) {
|
||||||
.None => mem.copy(u8, &r, source_slice),
|
.None => @memcpy(&r, source_slice),
|
||||||
.Some => try unescapeValidString(&r, source_slice),
|
.Some => try unescapeValidString(&r, source_slice),
|
||||||
}
|
}
|
||||||
return r;
|
return r;
|
||||||
|
@ -2019,7 +2019,7 @@ test "parse into tagged union" {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // failing allocations should be bubbled up instantly without trying next member
|
{ // failing allocations should be bubbled up instantly without trying next member
|
||||||
var fail_alloc = testing.FailingAllocator.init(testing.allocator, 0);
|
var fail_alloc = testing.FailingAllocator.init(testing.allocator, .{ .fail_index = 0 });
|
||||||
const options = ParseOptions{ .allocator = fail_alloc.allocator() };
|
const options = ParseOptions{ .allocator = fail_alloc.allocator() };
|
||||||
const T = union(enum) {
|
const T = union(enum) {
|
||||||
// both fields here match the input
|
// both fields here match the input
|
||||||
|
@ -2067,7 +2067,7 @@ test "parse union bubbles up AllocatorRequired" {
|
||||||
}
|
}
|
||||||
|
|
||||||
test "parseFree descends into tagged union" {
|
test "parseFree descends into tagged union" {
|
||||||
var fail_alloc = testing.FailingAllocator.init(testing.allocator, 1);
|
var fail_alloc = testing.FailingAllocator.init(testing.allocator, .{ .fail_index = 1 });
|
||||||
const options = ParseOptions{ .allocator = fail_alloc.allocator() };
|
const options = ParseOptions{ .allocator = fail_alloc.allocator() };
|
||||||
const T = union(enum) {
|
const T = union(enum) {
|
||||||
int: i32,
|
int: i32,
|
||||||
|
@ -2827,14 +2827,14 @@ pub fn stringify(
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Enum => {
|
.Enum => {
|
||||||
if (comptime std.meta.trait.hasFn("jsonStringify")(T)) {
|
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
||||||
return value.jsonStringify(options, out_stream);
|
return value.jsonStringify(options, out_stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
@compileError("Unable to stringify enum '" ++ @typeName(T) ++ "'");
|
@compileError("Unable to stringify enum '" ++ @typeName(T) ++ "'");
|
||||||
},
|
},
|
||||||
.Union => {
|
.Union => {
|
||||||
if (comptime std.meta.trait.hasFn("jsonStringify")(T)) {
|
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
||||||
return value.jsonStringify(options, out_stream);
|
return value.jsonStringify(options, out_stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2850,7 +2850,7 @@ pub fn stringify(
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Struct => |S| {
|
.Struct => |S| {
|
||||||
if (comptime std.meta.trait.hasFn("jsonStringify")(T)) {
|
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
||||||
return value.jsonStringify(options, out_stream);
|
return value.jsonStringify(options, out_stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2874,11 +2874,11 @@ pub fn stringify(
|
||||||
try child_whitespace.outputIndent(out_stream);
|
try child_whitespace.outputIndent(out_stream);
|
||||||
}
|
}
|
||||||
var field_written = false;
|
var field_written = false;
|
||||||
if (comptime std.meta.trait.hasFn("jsonStringifyField")(T))
|
if (comptime std.meta.hasFn(T, "jsonStringifyField"))
|
||||||
field_written = try value.jsonStringifyField(Field.name, child_options, out_stream);
|
field_written = try value.jsonStringifyField(Field.name, child_options, out_stream);
|
||||||
|
|
||||||
if (!field_written) {
|
if (!field_written) {
|
||||||
if (comptime std.meta.trait.hasFn("fieldNameFor")(T)) {
|
if (comptime std.meta.hasFn(T, "fieldNameFor")) {
|
||||||
const name = value.fieldNameFor(Field.name);
|
const name = value.fieldNameFor(Field.name);
|
||||||
try stringify(name, options, out_stream);
|
try stringify(name, options, out_stream);
|
||||||
} else {
|
} else {
|
||||||
|
|
15
src/main.zig
15
src/main.zig
|
@ -38,8 +38,8 @@ pub fn log(
|
||||||
nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return;
|
nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const std_options = struct {
|
pub const std_options = std.Options{
|
||||||
pub const logFn = log;
|
.logFn = log,
|
||||||
};
|
};
|
||||||
const Tests = enum {
|
const Tests = enum {
|
||||||
query_no_input,
|
query_no_input,
|
||||||
|
@ -71,7 +71,7 @@ pub fn main() anyerror!void {
|
||||||
defer bw.flush() catch unreachable;
|
defer bw.flush() catch unreachable;
|
||||||
const stdout = bw.writer();
|
const stdout = bw.writer();
|
||||||
var arg0: ?[]const u8 = null;
|
var arg0: ?[]const u8 = null;
|
||||||
var proxy: ?std.http.Client.HttpProxy = null;
|
var proxy: ?std.http.Client.Proxy = null;
|
||||||
while (args.next()) |arg| {
|
while (args.next()) |arg| {
|
||||||
if (arg0 == null) arg0 = arg;
|
if (arg0 == null) arg0 = arg;
|
||||||
if (std.mem.eql(u8, "-h", arg) or std.mem.eql(u8, "--help", arg)) {
|
if (std.mem.eql(u8, "-h", arg) or std.mem.eql(u8, "--help", arg)) {
|
||||||
|
@ -353,17 +353,22 @@ pub fn main() anyerror!void {
|
||||||
std.log.info("===== Tests complete =====", .{});
|
std.log.info("===== Tests complete =====", .{});
|
||||||
}
|
}
|
||||||
|
|
||||||
fn proxyFromString(string: []const u8) !std.http.Client.HttpProxy {
|
fn proxyFromString(string: []const u8) !std.http.Client.Proxy {
|
||||||
var rc = std.http.Client.HttpProxy{
|
var rc = std.http.Client.Proxy{
|
||||||
.protocol = undefined,
|
.protocol = undefined,
|
||||||
.host = undefined,
|
.host = undefined,
|
||||||
|
.authorization = null,
|
||||||
|
.port = undefined,
|
||||||
|
.supports_connect = true, // TODO: Is this a good default?
|
||||||
};
|
};
|
||||||
var remaining: []const u8 = string;
|
var remaining: []const u8 = string;
|
||||||
if (std.mem.startsWith(u8, string, "http://")) {
|
if (std.mem.startsWith(u8, string, "http://")) {
|
||||||
remaining = remaining["http://".len..];
|
remaining = remaining["http://".len..];
|
||||||
rc.protocol = .plain;
|
rc.protocol = .plain;
|
||||||
|
rc.port = 80;
|
||||||
} else if (std.mem.startsWith(u8, string, "https://")) {
|
} else if (std.mem.startsWith(u8, string, "https://")) {
|
||||||
remaining = remaining["https://".len..];
|
remaining = remaining["https://".len..];
|
||||||
|
rc.port = 443;
|
||||||
rc.protocol = .tls;
|
rc.protocol = .tls;
|
||||||
} else return error.InvalidScheme;
|
} else return error.InvalidScheme;
|
||||||
var split_iterator = std.mem.split(u8, remaining, ":");
|
var split_iterator = std.mem.split(u8, remaining, ":");
|
||||||
|
|
|
@ -21,7 +21,7 @@ pub fn Services(comptime service_imports: anytype) type {
|
||||||
// finally, generate the type
|
// finally, generate the type
|
||||||
return @Type(.{
|
return @Type(.{
|
||||||
.Struct = .{
|
.Struct = .{
|
||||||
.layout = .Auto,
|
.layout = .Auto, // will be .auto in the future
|
||||||
.fields = &fields,
|
.fields = &fields,
|
||||||
.decls = &[_]std.builtin.Type.Declaration{},
|
.decls = &[_]std.builtin.Type.Declaration{},
|
||||||
.is_tuple = false,
|
.is_tuple = false,
|
||||||
|
|
|
@ -1,15 +1,14 @@
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
|
|
||||||
fn defaultTransformer(allocator: std.mem.Allocator, field_name: []const u8, options: EncodingOptions) anyerror![]const u8 {
|
fn defaultTransformer(allocator: std.mem.Allocator, field_name: []const u8) anyerror![]const u8 {
|
||||||
_ = options;
|
|
||||||
_ = allocator;
|
_ = allocator;
|
||||||
return field_name;
|
return field_name;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const fieldNameTransformerFn = *const fn (std.mem.Allocator, []const u8, EncodingOptions) anyerror![]const u8;
|
pub const fieldNameTransformerFn = *const fn (std.mem.Allocator, []const u8) anyerror![]const u8;
|
||||||
|
|
||||||
pub const EncodingOptions = struct {
|
pub const EncodingOptions = struct {
|
||||||
field_name_transformer: fieldNameTransformerFn = &defaultTransformer,
|
field_name_transformer: fieldNameTransformerFn = defaultTransformer,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn encode(allocator: std.mem.Allocator, obj: anytype, writer: anytype, comptime options: EncodingOptions) !void {
|
pub fn encode(allocator: std.mem.Allocator, obj: anytype, writer: anytype, comptime options: EncodingOptions) !void {
|
||||||
|
@ -26,7 +25,7 @@ fn encodeStruct(
|
||||||
) !bool {
|
) !bool {
|
||||||
var rc = first;
|
var rc = first;
|
||||||
inline for (@typeInfo(@TypeOf(obj)).Struct.fields) |field| {
|
inline for (@typeInfo(@TypeOf(obj)).Struct.fields) |field| {
|
||||||
const field_name = try options.field_name_transformer(allocator, field.name, options);
|
const field_name = try options.field_name_transformer(allocator, field.name);
|
||||||
defer if (options.field_name_transformer.* != defaultTransformer)
|
defer if (options.field_name_transformer.* != defaultTransformer)
|
||||||
allocator.free(field_name);
|
allocator.free(field_name);
|
||||||
// @compileLog(@typeInfo(field.field_type).Pointer);
|
// @compileLog(@typeInfo(field.field_type).Pointer);
|
||||||
|
|
|
@ -4,6 +4,8 @@ const date = @import("date.zig");
|
||||||
|
|
||||||
const log = std.log.scoped(.xml_shaper);
|
const log = std.log.scoped(.xml_shaper);
|
||||||
|
|
||||||
|
pub const Element = xml.Element;
|
||||||
|
|
||||||
pub fn Parsed(comptime T: type) type {
|
pub fn Parsed(comptime T: type) type {
|
||||||
return struct {
|
return struct {
|
||||||
// Forcing an arean allocator isn't my favorite choice here, but
|
// Forcing an arean allocator isn't my favorite choice here, but
|
||||||
|
@ -70,6 +72,8 @@ fn deinitObject(allocator: std.mem.Allocator, obj: anytype) void {
|
||||||
pub const ParseOptions = struct {
|
pub const ParseOptions = struct {
|
||||||
allocator: ?std.mem.Allocator = null,
|
allocator: ?std.mem.Allocator = null,
|
||||||
match_predicate_ptr: ?*const fn (a: []const u8, b: []const u8, options: xml.PredicateOptions) anyerror!bool = null,
|
match_predicate_ptr: ?*const fn (a: []const u8, b: []const u8, options: xml.PredicateOptions) anyerror!bool = null,
|
||||||
|
/// defines a function to use to locate an element other than the root of the document for parsing
|
||||||
|
elementToParse: ?*const fn (element: *Element, options: ParseOptions) *Element = null,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn parse(comptime T: type, source: []const u8, options: ParseOptions) !Parsed(T) {
|
pub fn parse(comptime T: type, source: []const u8, options: ParseOptions) !Parsed(T) {
|
||||||
|
@ -86,7 +90,8 @@ pub fn parse(comptime T: type, source: []const u8, options: ParseOptions) !Parse
|
||||||
.match_predicate_ptr = options.match_predicate_ptr,
|
.match_predicate_ptr = options.match_predicate_ptr,
|
||||||
};
|
};
|
||||||
|
|
||||||
return Parsed(T).init(arena_allocator, try parseInternal(T, parsed.root, opts), parsed);
|
const root = if (options.elementToParse) |e| e(parsed.root, opts) else parsed.root;
|
||||||
|
return Parsed(T).init(arena_allocator, try parseInternal(T, root, opts), parsed);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions) !T {
|
fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions) !T {
|
||||||
|
@ -214,9 +219,9 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
|
|
||||||
log.debug("Processing fields in struct: {s}", .{@typeName(T)});
|
log.debug("Processing fields in struct: {s}", .{@typeName(T)});
|
||||||
inline for (struct_info.fields, 0..) |field, i| {
|
inline for (struct_info.fields, 0..) |field, i| {
|
||||||
var name = field.name;
|
var name: []const u8 = field.name;
|
||||||
var found_value = false;
|
var found_value = false;
|
||||||
if (comptime std.meta.trait.hasFn("fieldNameFor")(T))
|
if (comptime std.meta.hasFn(T, "fieldNameFor"))
|
||||||
name = r.fieldNameFor(field.name);
|
name = r.fieldNameFor(field.name);
|
||||||
log.debug("Field name: {s}, Element: {s}, Adjusted field name: {s}", .{ field.name, element.tag, name });
|
log.debug("Field name: {s}, Element: {s}, Adjusted field name: {s}", .{ field.name, element.tag, name });
|
||||||
var iterator = element.findChildrenByTag(name);
|
var iterator = element.findChildrenByTag(name);
|
||||||
|
@ -244,6 +249,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
// Zig compiler bug circa 0.9.0. Using "and !found_value"
|
// Zig compiler bug circa 0.9.0. Using "and !found_value"
|
||||||
// in the if statement above will trigger assertion failure
|
// in the if statement above will trigger assertion failure
|
||||||
if (!found_value) {
|
if (!found_value) {
|
||||||
|
log.debug("Child element not found, but field optional. Setting {s}=null", .{field.name});
|
||||||
// @compileLog("Optional: Field name ", field.name, ", type ", field.type);
|
// @compileLog("Optional: Field name ", field.name, ", type ", field.type);
|
||||||
@field(r, field.name) = null;
|
@field(r, field.name) = null;
|
||||||
fields_set = fields_set + 1;
|
fields_set = fields_set + 1;
|
||||||
|
@ -625,12 +631,65 @@ test "can parse something serious" {
|
||||||
\\</DescribeRegionsResponse>
|
\\</DescribeRegionsResponse>
|
||||||
;
|
;
|
||||||
// const ServerResponse = struct { DescribeRegionsResponse: describe_regions.Response, };
|
// const ServerResponse = struct { DescribeRegionsResponse: describe_regions.Response, };
|
||||||
const parsed_data = try parse(describe_regions.Response, data, .{ .allocator = allocator });
|
const parsed_data = try parse(describe_regions.Response, data, .{ .allocator = allocator, .elementToParse = findResult });
|
||||||
defer parsed_data.deinit();
|
defer parsed_data.deinit();
|
||||||
try testing.expect(parsed_data.parsed_value.regions != null);
|
try testing.expect(parsed_data.parsed_value.regions != null);
|
||||||
try testing.expectEqualStrings("eu-north-1", parsed_data.parsed_value.regions.?[0].region_name.?);
|
try testing.expectEqualStrings("eu-north-1", parsed_data.parsed_value.regions.?[0].region_name.?);
|
||||||
try testing.expectEqualStrings("ec2.eu-north-1.amazonaws.com", parsed_data.parsed_value.regions.?[0].endpoint.?);
|
try testing.expectEqualStrings("ec2.eu-north-1.amazonaws.com", parsed_data.parsed_value.regions.?[0].endpoint.?);
|
||||||
}
|
}
|
||||||
|
const StsGetAccesskeyInfoResponse: type = struct {
|
||||||
|
account: ?[]const u8 = null,
|
||||||
|
|
||||||
|
pub fn fieldNameFor(_: @This(), comptime field_name: []const u8) []const u8 {
|
||||||
|
const mappings = .{
|
||||||
|
.account = "Account",
|
||||||
|
};
|
||||||
|
return @field(mappings, field_name);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
fn findResult(element: *xml.Element, options: ParseOptions) *xml.Element {
|
||||||
|
_ = options;
|
||||||
|
// We're looking for a very specific pattern here. We want only two direct
|
||||||
|
// children. The first one must end with "Result", and the second should
|
||||||
|
// be our ResponseMetadata node
|
||||||
|
var children = element.elements();
|
||||||
|
var found_metadata = false;
|
||||||
|
var result_child: ?*xml.Element = null;
|
||||||
|
var inx: usize = 0;
|
||||||
|
while (children.next()) |child| : (inx += 1) {
|
||||||
|
if (std.mem.eql(u8, child.tag, "ResponseMetadata")) {
|
||||||
|
found_metadata = true;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (std.mem.endsWith(u8, child.tag, "Result")) {
|
||||||
|
result_child = child;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (inx > 1) return element;
|
||||||
|
return element; // It should only be those two
|
||||||
|
}
|
||||||
|
return result_child orelse element;
|
||||||
|
}
|
||||||
|
test "can parse a result within a response" {
|
||||||
|
log.debug("", .{});
|
||||||
|
|
||||||
|
const allocator = std.testing.allocator;
|
||||||
|
const data =
|
||||||
|
\\<GetAccessKeyInfoResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
|
||||||
|
\\ <GetAccessKeyInfoResult>
|
||||||
|
\\ <Account>123456789012</Account>
|
||||||
|
\\ </GetAccessKeyInfoResult>
|
||||||
|
\\ <ResponseMetadata>
|
||||||
|
\\ <RequestId>ec85bf29-1ef0-459a-930e-6446dd14a286</RequestId>
|
||||||
|
\\ </ResponseMetadata>
|
||||||
|
\\</GetAccessKeyInfoResponse>
|
||||||
|
;
|
||||||
|
const parsed_data = try parse(StsGetAccesskeyInfoResponse, data, .{ .allocator = allocator, .elementToParse = findResult });
|
||||||
|
defer parsed_data.deinit();
|
||||||
|
// Response expectations
|
||||||
|
try std.testing.expect(parsed_data.parsed_value.account != null);
|
||||||
|
try std.testing.expectEqualStrings("123456789012", parsed_data.parsed_value.account.?);
|
||||||
|
}
|
||||||
|
|
||||||
test "compiler assertion failure 2" {
|
test "compiler assertion failure 2" {
|
||||||
// std.testing.log_level = .debug;
|
// std.testing.log_level = .debug;
|
||||||
|
|
Loading…
Reference in New Issue
Block a user