Compare commits
75 commits
zig-0.12.0
...
master
Author | SHA1 | Date | |
---|---|---|---|
8ac7aa47f7 | |||
e194debb96 | |||
e0e09fb19e | |||
8421fd9e55 | |||
9e8b3a6fc6 | |||
34c097e45f | |||
ffe3941dbe | |||
cdaf924867 | |||
6c106c1c71 | |||
f325ef4236 | |||
30d46261b7 | |||
86483ec84d | |||
4f16553410 | |||
12e24b01ad | |||
|
220d45ab20 | ||
|
71495a4d1d | ||
|
303af8661c | ||
8c68dd6902 | |||
96e2b7bbc1 | |||
acd6589909 | |||
78b36e2316 | |||
b369c29e84 | |||
e3bb4142d6 | |||
4313f8585b | |||
e02fb699fc | |||
dfda8e77d6 | |||
35fad85c13 | |||
88d7e99d6b | |||
0cda404b0a | |||
5aa191c415 | |||
370011eb1e | |||
debb4dab60 | |||
6240225db2 | |||
0adebe10da | |||
0892914c5b | |||
97b784f8e3 | |||
4fa30a70cc | |||
9497db373c | |||
3d78705ea5 | |||
1e2b3a6759 | |||
908c9d2d42 | |||
1fdff0bacd | |||
1fe39007c5 | |||
c5cb3dde29 | |||
f5663fd84d | |||
c056dbb0ff | |||
9e8198cee4 | |||
43238a97eb | |||
b048b1193d | |||
f85eb4caf1 | |||
0bd583cae0 | |||
3b35936ac6 | |||
262cdefe12 | |||
238952d127 | |||
38b51c768b | |||
86877ca264 | |||
e5b662873a | |||
a9f99c0205 | |||
c1c40644ac | |||
927871c59e | |||
7298c6d3ee | |||
4bfd9cb7bc | |||
3e89ec468a | |||
d84246333c | |||
f558b058e1 | |||
e665b94683 | |||
dd6a87a034 | |||
61592f039d | |||
c8f625068d | |||
ffbbf21303 | |||
a659c99350 | |||
981d5579f1 | |||
3307eb6b8f | |||
332aa1a855 | |||
7d80f42a3e |
29 changed files with 1125 additions and 436 deletions
8
.envrc
Normal file
8
.envrc
Normal file
|
@ -0,0 +1,8 @@
|
|||
# vi: ft=sh
|
||||
# shellcheck shell=bash
|
||||
|
||||
if ! has zvm_direnv_version || ! zvm_direnv_version 2.0.0; then
|
||||
source_url "https://git.lerch.org/lobo/zvm-direnv/raw/tag/2.0.0/direnvrc" "sha256-8Umzxj32hFU6G0a7Wrq0KTNDQ8XEuje2A3s2ljh/hFY="
|
||||
fi
|
||||
|
||||
use zig 0.14.0
|
|
@ -1,10 +1,9 @@
|
|||
name: AWS-Zig Build
|
||||
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- '*'
|
||||
- '!zig-develop*'
|
||||
- 'master'
|
||||
env:
|
||||
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
|
||||
|
@ -17,13 +16,19 @@ jobs:
|
|||
# image: alpine:3.15.0
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup Zig
|
||||
uses: https://git.lerch.org/lobo/setup-zig@v3
|
||||
uses: mlugg/setup-zig@v1.2.1
|
||||
with:
|
||||
version: 0.12.0
|
||||
version: 0.14.0
|
||||
- name: Run tests
|
||||
run: zig build test --verbose
|
||||
# Zig build scripts don't have the ability to import depenedencies directly
|
||||
# (https://github.com/ziglang/zig/issues/18164). We can allow downstream
|
||||
# build scripts to import aws with a few tweaks, but we can't @import("src/aws.zig")
|
||||
# until we have our models built. So we have to have the build script
|
||||
# basically modified, only during packaging, to allow this use case
|
||||
#
|
||||
# Zig package manager expects everything to be inside a directory in the archive,
|
||||
# which it then strips out on download. So we need to shove everything inside a directory
|
||||
# the way GitHub/Gitea does for repo archives
|
||||
|
@ -33,6 +38,7 @@ jobs:
|
|||
# should be using git archive, but we need our generated code to be part of it
|
||||
- name: Package source code with generated models
|
||||
run: |
|
||||
sed -i 's#// UNCOMMENT AFTER MODEL GEN TO USE IN BUILD SCRIPTS //##' build.zig
|
||||
tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||
--format ustar \
|
||||
--exclude 'zig-*' \
|
||||
|
|
|
@ -1,16 +1,20 @@
|
|||
name: aws-zig mach nominated build
|
||||
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 12 * * *' # noon UTC, 4AM Pacific
|
||||
push:
|
||||
branches:
|
||||
- 'zig-develop*'
|
||||
- 'zig-mach'
|
||||
env:
|
||||
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
|
||||
PKG_PREFIX: nominated-zig
|
||||
jobs:
|
||||
build-zig-nominated-mach-latest:
|
||||
container:
|
||||
# We need CAP_SYS_PTRACE for stack traces due to a regression in 0.14.0
|
||||
# TODO: Remove this after https://github.com/ziglang/zig/issues/21815 is
|
||||
# addressed
|
||||
options: --cap-add CAP_SYS_PTRACE
|
||||
runs-on: ubuntu-latest
|
||||
# Need to use the default container with node and all that, so we can
|
||||
# use JS-based actions like actions/checkout@v3...
|
||||
|
@ -18,13 +22,19 @@ jobs:
|
|||
# image: alpine:3.15.0
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: zig-develop
|
||||
ref: zig-mach
|
||||
- name: Setup Zig
|
||||
uses: https://git.lerch.org/lobo/setup-zig@v3
|
||||
uses: mlugg/setup-zig@v1.2.1
|
||||
with:
|
||||
version: mach-latest
|
||||
- name: Run gen
|
||||
run: zig build gen --verbose
|
||||
- name: Run smoke test
|
||||
run: zig build smoke-test --verbose
|
||||
- name: Run full tests
|
||||
run: zig build test --verbose
|
||||
# Zig package manager expects everything to be inside a directory in the archive,
|
||||
# which it then strips out on download. So we need to shove everything inside a directory
|
||||
# the way GitHub/Gitea does for repo archives
|
||||
|
@ -34,7 +44,7 @@ jobs:
|
|||
# should be using git archive, but we need our generated code to be part of it
|
||||
- name: Package source code with generated models
|
||||
run: |
|
||||
tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||
tar -czf ${{ runner.temp }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz \
|
||||
--format ustar \
|
||||
--exclude 'zig-*' \
|
||||
--transform 's,^,${{ github.sha }}/,' *
|
||||
|
@ -58,8 +68,8 @@ jobs:
|
|||
- name: Publish source code with generated models
|
||||
run: |
|
||||
curl --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
|
||||
--upload-file ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||
https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}-with-models.tar.gz
|
||||
--upload-file ${{ runner.temp }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz \
|
||||
https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz
|
||||
- name: Build example
|
||||
run: ( cd example && zig build ) # Make sure example builds
|
||||
- name: Notify
|
||||
|
|
|
@ -1,16 +1,20 @@
|
|||
name: aws-zig nightly build
|
||||
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 12 30 * *' # 12:30 UTC, 4:30AM Pacific
|
||||
- cron: '30 12 * * *' # 12:30 UTC, 4:30AM Pacific
|
||||
push:
|
||||
branches:
|
||||
- 'zig-develop*'
|
||||
- 'zig-develop'
|
||||
env:
|
||||
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
|
||||
PKG_PREFIX: nightly-zig
|
||||
jobs:
|
||||
build-zig-nightly:
|
||||
container:
|
||||
# We need CAP_SYS_PTRACE for stack traces due to a regression in 0.14.0
|
||||
# TODO: Remove this after https://github.com/ziglang/zig/issues/21815 is
|
||||
# addressed
|
||||
options: --cap-add CAP_SYS_PTRACE
|
||||
runs-on: ubuntu-latest
|
||||
# Need to use the default container with node and all that, so we can
|
||||
# use JS-based actions like actions/checkout@v3...
|
||||
|
@ -18,11 +22,11 @@ jobs:
|
|||
# image: alpine:3.15.0
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: zig-develop
|
||||
- name: Setup Zig
|
||||
uses: https://git.lerch.org/lobo/setup-zig@v3
|
||||
uses: mlugg/setup-zig@v1.2.1
|
||||
with:
|
||||
version: master
|
||||
- name: Run tests
|
||||
|
@ -36,7 +40,7 @@ jobs:
|
|||
# should be using git archive, but we need our generated code to be part of it
|
||||
- name: Package source code with generated models
|
||||
run: |
|
||||
tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||
tar -czf ${{ runner.temp }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz \
|
||||
--format ustar \
|
||||
--exclude 'zig-*' \
|
||||
--transform 's,^,${{ github.sha }}/,' *
|
||||
|
@ -60,8 +64,8 @@ jobs:
|
|||
- name: Publish source code with generated models
|
||||
run: |
|
||||
curl --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
|
||||
--upload-file ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||
https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}-with-models.tar.gz
|
||||
--upload-file ${{ runner.temp }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz \
|
||||
https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz
|
||||
- name: Build example
|
||||
run: ( cd example && zig build ) # Make sure example builds
|
||||
- name: Notify
|
||||
|
|
84
.gitea/workflows/zig-previous.yaml
Normal file
84
.gitea/workflows/zig-previous.yaml
Normal file
|
@ -0,0 +1,84 @@
|
|||
name: AWS-Zig Build
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- 'zig-0.13'
|
||||
env:
|
||||
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
|
||||
jobs:
|
||||
build-zig-amd64-host:
|
||||
runs-on: ubuntu-latest
|
||||
# Need to use the default container with node and all that, so we can
|
||||
# use JS-based actions like actions/checkout@v3...
|
||||
# container:
|
||||
# image: alpine:3.15.0
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: zig-0.13
|
||||
- name: Setup Zig
|
||||
uses: mlugg/setup-zig@v1.2.1
|
||||
with:
|
||||
version: 0.13.0
|
||||
- name: Run tests
|
||||
run: zig build test --verbose
|
||||
# Zig build scripts don't have the ability to import depenedencies directly
|
||||
# (https://github.com/ziglang/zig/issues/18164). We can allow downstream
|
||||
# build scripts to import aws with a few tweaks, but we can't @import("src/aws.zig")
|
||||
# until we have our models built. So we have to have the build script
|
||||
# basically modified, only during packaging, to allow this use case
|
||||
#
|
||||
# Zig package manager expects everything to be inside a directory in the archive,
|
||||
# which it then strips out on download. So we need to shove everything inside a directory
|
||||
# the way GitHub/Gitea does for repo archives
|
||||
#
|
||||
# Also, zig tar process doesn't handle gnu format for long names, nor does it seam to
|
||||
# handle posix long name semantics cleanly either. ustar works. This
|
||||
# should be using git archive, but we need our generated code to be part of it
|
||||
- name: Package source code with generated models
|
||||
run: |
|
||||
sed -i 's#// UNCOMMENT AFTER MODEL GEN TO USE IN BUILD SCRIPTS //##' build.zig
|
||||
tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||
--format ustar \
|
||||
--exclude 'zig-*' \
|
||||
*
|
||||
# Something in this PR broke this transform. I don't mind removing it, but
|
||||
# the PR attempts to handle situations with or without a prefix, but it
|
||||
# doesn't. I have not yet determined what the problem is, though
|
||||
# https://github.com/ziglang/zig/pull/19111/files
|
||||
# --transform 's,^,${{ github.sha }}/,' *
|
||||
# - name: Sign
|
||||
# id: sign
|
||||
# uses: https://git.lerch.org/lobo/action-hsm-sign@v1
|
||||
# with:
|
||||
# pin: ${{ secrets.HSM_USER_PIN }}
|
||||
# files: ???
|
||||
# public_key: 'https://emil.lerch.org/serverpublic.pem'
|
||||
# - run: |
|
||||
# echo "Source 0 should be ./bar: ${{ steps.sign.outputs.SOURCE_0 }}"
|
||||
# - run: |
|
||||
# echo "Signature 0 should be ./bar.sig: ${{ steps.sign.outputs.SIG_0 }}"
|
||||
# - run: echo "URL of bar (0) is ${{ steps.sign.outputs.URL_0 }}"
|
||||
# - run: |
|
||||
# echo "Source 1 should be ./foo: ${{ steps.sign.outputs.SOURCE_1 }}"
|
||||
# - run: |
|
||||
# echo "Signature 1 should be ./foo.sig: ${{ steps.sign.outputs.SIG_1 }}"
|
||||
# - run: echo "URL of foo (1) is ${{ steps.sign.outputs.URL_1 }}"
|
||||
- name: Publish source code with generated models
|
||||
run: |
|
||||
curl --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
|
||||
--upload-file ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||
https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}-with-models.tar.gz
|
||||
- name: Build example
|
||||
run: ( cd example && zig build ) # Make sure example builds
|
||||
- name: Notify
|
||||
uses: https://git.lerch.org/lobo/action-notify-ntfy@v2
|
||||
if: always()
|
||||
with:
|
||||
host: ${{ secrets.NTFY_HOST }}
|
||||
topic: ${{ secrets.NTFY_TOPIC }}
|
||||
user: ${{ secrets.NTFY_USER }}
|
||||
password: ${{ secrets.NTFY_PASSWORD }}
|
25
.github/workflows/build.yaml
vendored
25
.github/workflows/build.yaml
vendored
|
@ -1,30 +1,19 @@
|
|||
name: AWS-Zig Build
|
||||
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||
name: Current zig version build
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '*'
|
||||
- '!zig-develop*'
|
||||
- 'master'
|
||||
jobs:
|
||||
build-zig-0-12-0-amd64:
|
||||
build-amd64:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
ZIG_VERSION: 0.12.0
|
||||
ARCH: x86_64
|
||||
if: ${{ github.env.GITEA_ACTIONS != 'true' }}
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v4
|
||||
# ARCH is fine, but we can't substitute directly because zig
|
||||
# uses x86_64 instead of amd64. They also use aarch64 instead of arm64.
|
||||
#
|
||||
# However, arm64/linux isn't quite fully tier 1 yet, so this is more of a
|
||||
# TODO: https://github.com/ziglang/zig/issues/2443
|
||||
- name: Install zig
|
||||
run: |
|
||||
wget -q https://ziglang.org/download/${ZIG_VERSION}/zig-linux-${ARCH}-${ZIG_VERSION}.tar.xz
|
||||
sudo tar x -C /usr/local -f zig-linux-${ARCH}-${ZIG_VERSION}.tar.xz
|
||||
sudo ln -s /usr/local/zig-linux-${ARCH}-${ZIG_VERSION}/zig /usr/local/bin/zig
|
||||
- name: Setup Zig
|
||||
uses: mlugg/setup-zig@v1.2.1
|
||||
with:
|
||||
version: 0.14.0
|
||||
- name: Run tests
|
||||
run: zig build test -Dbroken-windows --verbose # Github runners try to run the windows tests despite disabling foreign checks
|
||||
- name: Build example
|
||||
|
|
28
.github/workflows/zig-mach.yaml
vendored
28
.github/workflows/zig-mach.yaml
vendored
|
@ -1,35 +1,19 @@
|
|||
name: aws-zig mach nominated build
|
||||
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||
name: Latest mach nominated zig version build
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'zig-develop*'
|
||||
- 'zig-mach*'
|
||||
jobs:
|
||||
build-zig-mach-latest:
|
||||
runs-on: ubuntu-latest
|
||||
# Need to use the default container with node and all that, so we can
|
||||
# use JS-based actions like actions/checkout@v3...
|
||||
# container:
|
||||
# image: alpine:3.15.0
|
||||
env:
|
||||
ZIG_VERSION: mach-latest
|
||||
ARCH: x86_64
|
||||
if: ${{ github.env.GITEA_ACTIONS != 'true' }}
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v4
|
||||
# ARCH is fine, but we can't substitute directly because zig
|
||||
# uses x86_64 instead of amd64. They also use aarch64 instead of arm64.
|
||||
#
|
||||
# However, arm64/linux isn't quite fully tier 1 yet, so this is more of a
|
||||
# TODO: https://github.com/ziglang/zig/issues/2443
|
||||
- name: Install zig
|
||||
run: |
|
||||
apt-get update && apt-get install -y jq
|
||||
file="$(curl -Osw '%{filename_effective}' "$(curl -s https://machengine.org/zig/index.json |jq -r '."'${ZIG_VERSION}'"."x86_64-linux".tarball')")"
|
||||
sudo tar x -C /usr/local -f "${file}"
|
||||
sudo ln -s /usr/local/"${file%%.tar.xz}"/zig /usr/local/bin/zig
|
||||
zig version
|
||||
- name: Setup Zig
|
||||
uses: mlugg/setup-zig@v1.2.1
|
||||
with:
|
||||
version: mach-latest
|
||||
- name: Run tests
|
||||
run: zig build test -Dbroken-windows --verbose
|
||||
- name: Build example
|
||||
|
|
26
.github/workflows/zig-nightly.yaml
vendored
26
.github/workflows/zig-nightly.yaml
vendored
|
@ -1,5 +1,4 @@
|
|||
name: aws-zig nightly build
|
||||
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||
name: Nightly zig version Build
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
|
@ -7,29 +6,14 @@ on:
|
|||
jobs:
|
||||
build-zig-nightly:
|
||||
runs-on: ubuntu-latest
|
||||
# Need to use the default container with node and all that, so we can
|
||||
# use JS-based actions like actions/checkout@v3...
|
||||
# container:
|
||||
# image: alpine:3.15.0
|
||||
env:
|
||||
ZIG_VERSION: master
|
||||
ARCH: x86_64
|
||||
if: ${{ github.env.GITEA_ACTIONS != 'true' }}
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v4
|
||||
# ARCH is fine, but we can't substitute directly because zig
|
||||
# uses x86_64 instead of amd64. They also use aarch64 instead of arm64.
|
||||
#
|
||||
# However, arm64/linux isn't quite fully tier 1 yet, so this is more of a
|
||||
# TODO: https://github.com/ziglang/zig/issues/2443
|
||||
- name: Install zig
|
||||
run: |
|
||||
apt-get update && apt-get install -y jq
|
||||
file="$(curl -Osw '%{filename_effective}' "$(curl -s https://ziglang.org/download/index.json |jq -r '."'${ZIG_VERSION}'"."x86_64-linux".tarball')")"
|
||||
sudo tar x -C /usr/local -f "${file}"
|
||||
sudo ln -s /usr/local/"${file%%.tar.xz}"/zig /usr/local/bin/zig
|
||||
zig version
|
||||
- name: Setup Zig
|
||||
uses: mlugg/setup-zig@v1.2.1
|
||||
with:
|
||||
version: master
|
||||
- name: Run tests
|
||||
run: zig build test -Dbroken-windows --verbose
|
||||
- name: Build example
|
||||
|
|
20
.github/workflows/zig-previous.yaml
vendored
Normal file
20
.github/workflows/zig-previous.yaml
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
name: Previous zig version Build
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'zig-0.13'
|
||||
jobs:
|
||||
build-amd64:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.env.GITEA_ACTIONS != 'true' }}
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup Zig
|
||||
uses: mlugg/setup-zig@v1.2.1
|
||||
with:
|
||||
version: 0.13.0
|
||||
- name: Run tests
|
||||
run: zig build test -Dbroken-windows --verbose # Github runners try to run the windows tests despite disabling foreign checks
|
||||
- name: Build example
|
||||
run: ( cd example && zig build ) # Make sure example builds
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -11,3 +11,4 @@ libs/
|
|||
src/git_version.zig
|
||||
zig-out
|
||||
core
|
||||
.zig-cache
|
||||
|
|
75
README.md
75
README.md
|
@ -1,61 +1,77 @@
|
|||
AWS SDK for Zig
|
||||
===============
|
||||
|
||||
[Zig 0.12](https://ziglang.org/download/#release-0.12.0):
|
||||
[Zig 0.14](https://ziglang.org/download/#release-0.14.0):
|
||||
|
||||
[](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=build.yaml&state=closed)
|
||||
[](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=build.yaml&state=closed)
|
||||
|
||||
[Last Mach Nominated Zig Version](https://machengine.org/about/nominated-zig/):
|
||||
[Last Mach Nominated Zig Version](https://machengine.org/docs/nominated-zig/):
|
||||
|
||||
[](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-mach.yaml&state=closed)
|
||||
[](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-mach.yaml&state=closed)
|
||||
|
||||
[Nightly Zig](https://ziglang.org/download/):
|
||||
|
||||
[](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-nightly.yaml&state=closed)
|
||||
[](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-nightly.yaml&state=closed)
|
||||
|
||||
[Zig 0.13](https://ziglang.org/download/#release-0.13.0):
|
||||
|
||||
[](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-previous.yaml&state=closed)
|
||||
|
||||
|
||||
Current executable size for the demo is 980k after compiling with -Doptimize=ReleaseSmall
|
||||
in x86_linux, and will vary based on services used. Tested targets:
|
||||
in x86_64-linux, and will vary based on services used. Tested targets:
|
||||
|
||||
* x86_64-linux
|
||||
* riscv64-linux\*
|
||||
* riscv64-linux
|
||||
* aarch64-linux
|
||||
* x86_64-windows\*\*
|
||||
* x86_64-windows
|
||||
* arm-linux
|
||||
* aarch64-macos
|
||||
* x86_64-macos
|
||||
|
||||
Tested targets are built, but not continuously tested, by CI.
|
||||
|
||||
\* On Zig 0.12, riscv64-linux tests take a significant time to compile (each aws.zig test takes approximately 1min, 45 seconds to compile on Intel i9 10th gen)
|
||||
Branches
|
||||
--------
|
||||
|
||||
\*\* On Zig 0.12, x86_64-windows tests have one test skipped as LLVM consumes all available RAM on the system
|
||||
* **master**: This branch tracks the latest released zig version
|
||||
* **zig-0.13**: This branch tracks the previous released zig version (0.13 currently).
|
||||
Support for the previous version is best effort, generally
|
||||
degrading over time. Fixes will generally appear in master, then
|
||||
backported into the previous version.
|
||||
* **zig-mach**: This branch tracks the latest mach nominated version. A separate
|
||||
branch is necessary as mach nominated is usually, but not always,
|
||||
more recent than the latest production zig. Support for the mach
|
||||
version is best effort.
|
||||
* **zig-develop**: This branch tracks zig nightly, and is used mainly as a canary
|
||||
for breaking changes that will need to be dealt with when
|
||||
a new mach nominated version or new zig release appear.
|
||||
Expect significant delays in any build failures.
|
||||
|
||||
|
||||
Zig-Develop Branch
|
||||
------------------
|
||||
|
||||
This branch is intended for use with the in-development version of Zig. This
|
||||
starts with 0.12.0-dev.3180+83e578a18. I will try to keep this branch up to date
|
||||
with latest, but with a special eye towards aligning with [Mach Engine's Nominated
|
||||
Zig Versions](https://machengine.org/about/nominated-zig/). As nightly zig versions
|
||||
disappear off the downloads page (and back end server), we can use the mirroring
|
||||
that the Mach Engine participates in to pull these versions.
|
||||
Other branches/tags exist but are unsupported
|
||||
|
||||
Building
|
||||
--------
|
||||
|
||||
`zig build` should work. It will build the code generation project, fetch model
|
||||
files from upstream AWS Go SDK v2, run the code generation, then build the main
|
||||
project with the generated code. Testing can be done with `zig test`.
|
||||
project with the generated code. Testing can be done with `zig build test`. Note that
|
||||
this command tests on all supported architectures, so for a faster testing
|
||||
process, use `zig build smoke-test` instead.
|
||||
|
||||
To make development even faster, a build option is provided to avoid the use of
|
||||
LLVM. To use this, use the command `zig build -Dno-llvm smoke-test`. This
|
||||
can reduce build/test time 300%. Note, however, native code generation in zig
|
||||
is not yet complete, so you may see errors.
|
||||
|
||||
Using
|
||||
-----
|
||||
|
||||
This is designed for use with the Zig 0.11 package manager, and exposes a module
|
||||
This is designed for use with the Zig package manager, and exposes a module
|
||||
called "aws". Set up `build.zig.zon` and add the dependency/module to your project
|
||||
as normal and the package manager should do its thing. A full example can be found
|
||||
in [/example](example/README.md).
|
||||
in [/example](example/build.zig.zon). This can also be used at build time in
|
||||
a downstream project's `build.zig`.
|
||||
|
||||
Configuring the module and/or Running the demo
|
||||
----------------------------------------------
|
||||
|
@ -63,8 +79,8 @@ Configuring the module and/or Running the demo
|
|||
This library mimics the aws c libraries for it's work, so it operates like most
|
||||
other 'AWS things'. [/src/main.zig](src/main.zig) gives you a handful of examples
|
||||
for working with services. For local testing or alternative endpoints, there's
|
||||
no real standard, so there is code to look for `AWS_ENDPOINT_URL` environment
|
||||
variable that will supersede all other configuration.
|
||||
no real standard, so there is code to look for an environment variable
|
||||
`AWS_ENDPOINT_URL` variable that will supersede all other configuration.
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
@ -85,13 +101,6 @@ TODO List:
|
|||
* Implement timeouts and other TODO's in the code
|
||||
* Add option to cache signature keys
|
||||
|
||||
Services without TLS 1.3 support
|
||||
--------------------------------
|
||||
|
||||
All AWS services should support TLS 1.3 at this point, but there are many regions
|
||||
and several partitions, and not all of them have been tested, so your mileage
|
||||
may vary.
|
||||
|
||||
Dependency tree
|
||||
---------------
|
||||
|
||||
|
@ -100,7 +109,6 @@ No dependencies:
|
|||
* aws_http_base: contains basic structures for http requests/results
|
||||
* case: provides functions to change casing
|
||||
* date: provides limited date manipulation functions
|
||||
* http_client_17015_issue: zig 0.11 http client, with changes
|
||||
* json: custom version of earlier stdlib json parser
|
||||
* xml: custom xml parser library
|
||||
* url: custom url encoding
|
||||
|
@ -109,7 +117,6 @@ aws_credentials: Allows credential handling
|
|||
aws_authentication
|
||||
|
||||
aws_http:
|
||||
http_client_17015_issue
|
||||
aws_http_base
|
||||
aws_signing
|
||||
|
||||
|
|
219
build.zig
219
build.zig
|
@ -4,42 +4,18 @@ const Builder = @import("std").Build;
|
|||
|
||||
const models_subdir = "codegen/sdk-codegen/aws-models/"; // note will probably not work on windows
|
||||
|
||||
const test_targets = [_]std.zig.CrossTarget{
|
||||
// UNCOMMENT AFTER MODEL GEN TO USE IN BUILD SCRIPTS //pub const aws = @import("src/aws.zig");
|
||||
|
||||
const test_targets = [_]std.Target.Query{
|
||||
.{}, // native
|
||||
.{
|
||||
.cpu_arch = .x86_64,
|
||||
.os_tag = .linux,
|
||||
},
|
||||
.{
|
||||
.cpu_arch = .aarch64,
|
||||
.os_tag = .linux,
|
||||
},
|
||||
// // The test executable just spins forever in LLVM using nominated zig 0.12 March 2024
|
||||
// // This is likely a LLVM problem unlikely to be fixed in zig 0.12
|
||||
// .{
|
||||
// .cpu_arch = .riscv64,
|
||||
// .os_tag = .linux,
|
||||
// },
|
||||
.{
|
||||
.cpu_arch = .arm,
|
||||
.os_tag = .linux,
|
||||
},
|
||||
.{
|
||||
.cpu_arch = .x86_64,
|
||||
.os_tag = .windows,
|
||||
},
|
||||
.{
|
||||
.cpu_arch = .aarch64,
|
||||
.os_tag = .macos,
|
||||
},
|
||||
.{
|
||||
.cpu_arch = .x86_64,
|
||||
.os_tag = .macos,
|
||||
},
|
||||
// .{
|
||||
// .cpu_arch = .wasm32,
|
||||
// .os_tag = .wasi,
|
||||
// },
|
||||
.{ .cpu_arch = .x86_64, .os_tag = .linux },
|
||||
.{ .cpu_arch = .aarch64, .os_tag = .linux },
|
||||
.{ .cpu_arch = .riscv64, .os_tag = .linux },
|
||||
.{ .cpu_arch = .arm, .os_tag = .linux },
|
||||
.{ .cpu_arch = .x86_64, .os_tag = .windows },
|
||||
.{ .cpu_arch = .aarch64, .os_tag = .macos },
|
||||
.{ .cpu_arch = .x86_64, .os_tag = .macos },
|
||||
// .{ .cpu_arch = .wasm32, .os_tag = .wasi },
|
||||
};
|
||||
|
||||
pub fn build(b: *Builder) !void {
|
||||
|
@ -53,11 +29,17 @@ pub fn build(b: *Builder) !void {
|
|||
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
|
||||
const no_llvm = b.option(
|
||||
bool,
|
||||
"no-llvm",
|
||||
"Disable LLVM",
|
||||
) orelse false;
|
||||
const broken_windows = b.option(
|
||||
bool,
|
||||
"broken-windows",
|
||||
"Windows is broken in this environment (do not run Windows tests)",
|
||||
) orelse false;
|
||||
const no_bin = b.option(bool, "no-bin", "skip emitting binary") orelse false;
|
||||
// TODO: Embed the current git version in the code. We can do this
|
||||
// by looking for .git/HEAD (if it exists, follow the ref to /ref/heads/whatevs,
|
||||
// grab that commit, and use b.addOptions/exe.addOptions to generate the
|
||||
|
@ -72,10 +54,11 @@ pub fn build(b: *Builder) !void {
|
|||
// It relies on code gen and is all fouled up when getting imported
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "demo",
|
||||
.root_source_file = .{ .path = "src/main.zig" },
|
||||
.root_source_file = b.path("src/main.zig"),
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
exe.use_llvm = !no_llvm;
|
||||
const smithy_dep = b.dependency("smithy", .{
|
||||
// These are the arguments to the dependency. It expects a target and optimization level.
|
||||
.target = target,
|
||||
|
@ -84,17 +67,6 @@ pub fn build(b: *Builder) !void {
|
|||
const smithy_module = smithy_dep.module("smithy");
|
||||
exe.root_module.addImport("smithy", smithy_module); // not sure this should be here...
|
||||
|
||||
// Expose module to others
|
||||
_ = b.addModule("aws", .{
|
||||
.root_source_file = .{ .path = "src/aws.zig" },
|
||||
.imports = &.{.{ .name = "smithy", .module = smithy_module }},
|
||||
});
|
||||
|
||||
// Expose module to others
|
||||
_ = b.addModule("aws-signing", .{
|
||||
.root_source_file = .{ .path = "src/aws_signing.zig" },
|
||||
.imports = &.{.{ .name = "smithy", .module = smithy_module }},
|
||||
});
|
||||
// TODO: This does not work correctly due to https://github.com/ziglang/zig/issues/16354
|
||||
//
|
||||
// We are working here with kind of a weird dependency though. So we can do this
|
||||
|
@ -117,61 +89,73 @@ pub fn build(b: *Builder) !void {
|
|||
const run_step = b.step("run", "Run the app");
|
||||
run_step.dependOn(&run_cmd.step);
|
||||
|
||||
const gen_step = blk: {
|
||||
const cg = b.step("gen", "Generate zig service code from smithy models");
|
||||
const cg = b.step("gen", "Generate zig service code from smithy models");
|
||||
|
||||
const cg_exe = b.addExecutable(.{
|
||||
.name = "codegen",
|
||||
.root_source_file = .{ .path = "codegen/src/main.zig" },
|
||||
// We need this generated for the host, not the real target
|
||||
.target = b.host,
|
||||
.optimize = if (b.verbose) .Debug else .ReleaseSafe,
|
||||
});
|
||||
cg_exe.root_module.addImport("smithy", smithy_dep.module("smithy"));
|
||||
var cg_cmd = b.addRunArtifact(cg_exe);
|
||||
cg_cmd.addArg("--models");
|
||||
const hash = hash_blk: {
|
||||
for (b.available_deps) |dep| {
|
||||
const dep_name = dep.@"0";
|
||||
const dep_hash = dep.@"1";
|
||||
if (std.mem.eql(u8, dep_name, "models"))
|
||||
break :hash_blk dep_hash;
|
||||
}
|
||||
return error.DependencyNamedModelsNotFoundInBuildZigZon;
|
||||
};
|
||||
cg_cmd.addArg(try std.fs.path.join(
|
||||
b.allocator,
|
||||
&[_][]const u8{
|
||||
b.graph.global_cache_root.path.?,
|
||||
"p",
|
||||
hash,
|
||||
models_subdir,
|
||||
},
|
||||
));
|
||||
cg_cmd.addArg("--output");
|
||||
cg_cmd.addDirectoryArg(b.path("src/models"));
|
||||
if (b.verbose)
|
||||
cg_cmd.addArg("--verbose");
|
||||
// cg_cmd.step.dependOn(&fetch_step.step);
|
||||
// TODO: this should use zig_exe from std.Build
|
||||
// codegen should store a hash in a comment
|
||||
// this would be hash of the exe that created the file
|
||||
// concatenated with hash of input json. this would
|
||||
// allow skipping generated files. May not include hash
|
||||
// of contents of output file as maybe we want to tweak
|
||||
// manually??
|
||||
//
|
||||
// All the hashes can be in service_manifest.zig, which
|
||||
// could be fun to just parse and go nuts. Top of
|
||||
// file, generator exe hash. Each import has comment
|
||||
// with both input and output hash and we can decide
|
||||
// later about warning on manual changes...
|
||||
const cg_exe = b.addExecutable(.{
|
||||
.name = "codegen",
|
||||
.root_source_file = b.path("codegen/src/main.zig"),
|
||||
// We need this generated for the host, not the real target
|
||||
.target = b.graph.host,
|
||||
.optimize = if (b.verbose) .Debug else .ReleaseSafe,
|
||||
});
|
||||
cg_exe.root_module.addImport("smithy", smithy_module);
|
||||
var cg_cmd = b.addRunArtifact(cg_exe);
|
||||
cg_cmd.addArg("--models");
|
||||
cg_cmd.addArg(try std.fs.path.join(
|
||||
b.allocator,
|
||||
&[_][]const u8{
|
||||
try b.dependency("models", .{}).path("").getPath3(b, null).toString(b.allocator),
|
||||
models_subdir,
|
||||
},
|
||||
));
|
||||
cg_cmd.addArg("--output");
|
||||
const cg_output_dir = cg_cmd.addOutputDirectoryArg("src/models");
|
||||
if (b.verbose)
|
||||
cg_cmd.addArg("--verbose");
|
||||
// cg_cmd.step.dependOn(&fetch_step.step);
|
||||
// TODO: this should use zig_exe from std.Build
|
||||
// codegen should store a hash in a comment
|
||||
// this would be hash of the exe that created the file
|
||||
// concatenated with hash of input json. this would
|
||||
// allow skipping generated files. May not include hash
|
||||
// of contents of output file as maybe we want to tweak
|
||||
// manually??
|
||||
//
|
||||
// All the hashes can be in service_manifest.zig, which
|
||||
// could be fun to just parse and go nuts. Top of
|
||||
// file, generator exe hash. Each import has comment
|
||||
// with both input and output hash and we can decide
|
||||
// later about warning on manual changes...
|
||||
|
||||
cg.dependOn(&cg_cmd.step);
|
||||
break :blk cg;
|
||||
};
|
||||
cg.dependOn(&cg_cmd.step);
|
||||
|
||||
exe.step.dependOn(gen_step);
|
||||
exe.step.dependOn(cg);
|
||||
|
||||
// This allows us to have each module depend on the
|
||||
// generated service manifest.
|
||||
const service_manifest_module = b.createModule(.{
|
||||
.root_source_file = cg_output_dir.path(b, "service_manifest.zig"),
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
service_manifest_module.addImport("smithy", smithy_module);
|
||||
|
||||
exe.root_module.addImport("service_manifest", service_manifest_module);
|
||||
|
||||
// Expose module to others
|
||||
_ = b.addModule("aws", .{
|
||||
.root_source_file = b.path("src/aws.zig"),
|
||||
.imports = &.{
|
||||
.{ .name = "smithy", .module = smithy_module },
|
||||
.{ .name = "service_manifest", .module = service_manifest_module },
|
||||
},
|
||||
});
|
||||
|
||||
// Expose module to others
|
||||
_ = b.addModule("aws-signing", .{
|
||||
.root_source_file = b.path("src/aws_signing.zig"),
|
||||
.imports = &.{.{ .name = "smithy", .module = smithy_module }},
|
||||
});
|
||||
|
||||
// Similar to creating the run step earlier, this exposes a `test` step to
|
||||
// the `zig build --help` menu, providing a way for the user to request
|
||||
|
@ -197,17 +181,46 @@ pub fn build(b: *Builder) !void {
|
|||
// Creates a step for unit testing. This only builds the test executable
|
||||
// but does not run it.
|
||||
const unit_tests = b.addTest(.{
|
||||
.root_source_file = .{ .path = "src/aws.zig" },
|
||||
.root_source_file = b.path("src/aws.zig"),
|
||||
.target = b.resolveTargetQuery(t),
|
||||
.optimize = optimize,
|
||||
});
|
||||
unit_tests.root_module.addImport("smithy", smithy_dep.module("smithy"));
|
||||
unit_tests.step.dependOn(gen_step);
|
||||
unit_tests.root_module.addImport("smithy", smithy_module);
|
||||
unit_tests.root_module.addImport("service_manifest", service_manifest_module);
|
||||
unit_tests.step.dependOn(cg);
|
||||
unit_tests.use_llvm = !no_llvm;
|
||||
|
||||
const run_unit_tests = b.addRunArtifact(unit_tests);
|
||||
run_unit_tests.skip_foreign_checks = true;
|
||||
|
||||
test_step.dependOn(&run_unit_tests.step);
|
||||
}
|
||||
b.installArtifact(exe);
|
||||
const check = b.step("check", "Check compilation errors");
|
||||
check.dependOn(&exe.step);
|
||||
|
||||
// Similar to creating the run step earlier, this exposes a `test` step to
|
||||
// the `zig build --help` menu, providing a way for the user to request
|
||||
// running the unit tests.
|
||||
const smoke_test_step = b.step("smoke-test", "Run unit tests");
|
||||
|
||||
// Creates a step for unit testing. This only builds the test executable
|
||||
// but does not run it.
|
||||
const smoke_test = b.addTest(.{
|
||||
.root_source_file = b.path("src/aws.zig"),
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
smoke_test.use_llvm = !no_llvm;
|
||||
smoke_test.root_module.addImport("smithy", smithy_module);
|
||||
smoke_test.root_module.addImport("service_manifest", service_manifest_module);
|
||||
smoke_test.step.dependOn(cg);
|
||||
|
||||
const run_smoke_test = b.addRunArtifact(smoke_test);
|
||||
|
||||
smoke_test_step.dependOn(&run_smoke_test.step);
|
||||
if (no_bin) {
|
||||
b.getInstallStep().dependOn(&exe.step);
|
||||
} else {
|
||||
b.installArtifact(exe);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,16 +1,20 @@
|
|||
.{
|
||||
.name = "aws",
|
||||
.name = .aws,
|
||||
.version = "0.0.1",
|
||||
.fingerprint = 0x1f26b7b27005bb49,
|
||||
.paths = .{
|
||||
"build.zig",
|
||||
"build.zig.zon",
|
||||
"src",
|
||||
"build.zig",
|
||||
"build.zig.zon",
|
||||
"src",
|
||||
"codegen",
|
||||
"README.md",
|
||||
"LICENSE",
|
||||
},
|
||||
|
||||
.dependencies = .{
|
||||
.smithy = .{
|
||||
.url = "https://git.lerch.org/lobo/smithy/archive/1e534201c4df5ea4f615faeedc69d414adbec0b1.tar.gz",
|
||||
.hash = "1220af63ae0498010004af79936cedf3fe6702f516daab77ebbd97a274eba1b42aad",
|
||||
.url = "https://git.lerch.org/lobo/smithy/archive/a4c6ec6dfe552c57bab601c7d99e8de02bbab1fe.tar.gz",
|
||||
.hash = "smithy-1.0.0-uAyBgS_MAgC4qgc9QaEy5Y5Nf7kv32buQZBYugqNQsAn",
|
||||
},
|
||||
.models = .{
|
||||
.url = "https://github.com/aws/aws-sdk-go-v2/archive/58cf6509525a12d64fd826da883bfdbacbd2f00e.tar.gz",
|
||||
|
|
|
@ -2,7 +2,33 @@ const std = @import("std");
|
|||
// options is a json.Options, but since we're using our hacked json.zig we don't want to
|
||||
// specifically call this out
|
||||
pub fn serializeMap(map: anytype, key: []const u8, options: anytype, out_stream: anytype) !bool {
|
||||
if (map.len == 0) return true;
|
||||
if (@typeInfo(@TypeOf(map)) == .optional) {
|
||||
if (map == null)
|
||||
return false
|
||||
else
|
||||
return serializeMapInternal(map.?, key, options, out_stream);
|
||||
}
|
||||
return serializeMapInternal(map, key, options, out_stream);
|
||||
}
|
||||
|
||||
fn serializeMapInternal(map: anytype, key: []const u8, options: anytype, out_stream: anytype) !bool {
|
||||
if (map.len == 0) {
|
||||
var child_options = options;
|
||||
if (child_options.whitespace) |*child_ws|
|
||||
child_ws.indent_level += 1;
|
||||
|
||||
try out_stream.writeByte('"');
|
||||
try out_stream.writeAll(key);
|
||||
_ = try out_stream.write("\":");
|
||||
if (options.whitespace) |ws| {
|
||||
if (ws.separator) {
|
||||
try out_stream.writeByte(' ');
|
||||
}
|
||||
}
|
||||
try out_stream.writeByte('{');
|
||||
try out_stream.writeByte('}');
|
||||
return true;
|
||||
}
|
||||
// TODO: Map might be [][]struct{key, value} rather than []struct{key, value}
|
||||
var child_options = options;
|
||||
if (child_options.whitespace) |*child_ws|
|
||||
|
|
|
@ -34,7 +34,7 @@ pub fn main() anyerror!void {
|
|||
models_dir = try std.fs.cwd().openDir(args[i + 1], .{ .iterate = true });
|
||||
}
|
||||
// TODO: Seems like we should remove this in favor of a package
|
||||
try output_dir.writeFile("json.zig", json_zig);
|
||||
try output_dir.writeFile(.{ .sub_path = "json.zig", .data = json_zig });
|
||||
|
||||
// TODO: We need a different way to handle this file...
|
||||
const manifest_file_started = false;
|
||||
|
@ -123,11 +123,11 @@ fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir) !void {
|
|||
// re-calculate so we can store the manifest
|
||||
model_digest = calculated_manifest.model_dir_hash_digest;
|
||||
calculated_manifest = try calculateDigests(models_dir, output_dir, &thread_pool);
|
||||
try output_dir.writeFile("output_manifest.json", try std.json.stringifyAlloc(
|
||||
try output_dir.writeFile(.{ .sub_path = "output_manifest.json", .data = try std.json.stringifyAlloc(
|
||||
allocator,
|
||||
calculated_manifest,
|
||||
.{ .whitespace = .indent_2 },
|
||||
));
|
||||
) });
|
||||
}
|
||||
|
||||
var model_digest: ?[Hasher.hex_multihash_len]u8 = null;
|
||||
|
@ -435,7 +435,7 @@ fn generateServices(allocator: std.mem.Allocator, comptime _: []const u8, file:
|
|||
|
||||
fn generateAdditionalTypes(allocator: std.mem.Allocator, file_state: FileGenerationState, writer: anytype) !void {
|
||||
// More types may be added during processing
|
||||
while (file_state.additional_types_to_generate.popOrNull()) |t| {
|
||||
while (file_state.additional_types_to_generate.pop()) |t| {
|
||||
if (file_state.additional_types_generated.getEntry(t.name) != null) continue;
|
||||
// std.log.info("\t\t{s}", .{t.name});
|
||||
var type_stack = std.ArrayList(*const smithy.ShapeInfo).init(allocator);
|
||||
|
@ -716,7 +716,7 @@ fn generateTypeFor(shape_id: []const u8, writer: anytype, state: GenerationState
|
|||
// The serializer will have to deal with the idea we might be an array
|
||||
return try generateTypeFor(shape.set.member_target, writer, state, true);
|
||||
},
|
||||
.timestamp => |s| try generateSimpleTypeFor(s, "i64", writer),
|
||||
.timestamp => |s| try generateSimpleTypeFor(s, "f128", writer),
|
||||
.blob => |s| try generateSimpleTypeFor(s, "[]const u8", writer),
|
||||
.boolean => |s| try generateSimpleTypeFor(s, "bool", writer),
|
||||
.double => |s| try generateSimpleTypeFor(s, "f64", writer),
|
||||
|
|
|
@ -19,18 +19,12 @@ pub fn build(b: *std.Build) void {
|
|||
.name = "tmp",
|
||||
// In this case the main source file is merely a path, however, in more
|
||||
// complicated build scripts, this could be a generated file.
|
||||
.root_source_file = .{ .path = "src/main.zig" },
|
||||
.root_source_file = b.path("src/main.zig"),
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
// const smithy_dep = b.dependency("smithy", .{
|
||||
// // These are the two arguments to the dependency. It expects a target and optimization level.
|
||||
// .target = target,
|
||||
// .optimize = optimize,
|
||||
// });
|
||||
// exe.addModule("smithy", smithy_dep.module("smithy"));
|
||||
const aws_dep = b.dependency("aws-zig", .{
|
||||
const aws_dep = b.dependency("aws", .{
|
||||
// These are the two arguments to the dependency. It expects a target and optimization level.
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
|
@ -68,7 +62,7 @@ pub fn build(b: *std.Build) void {
|
|||
// Creates a step for unit testing. This only builds the test executable
|
||||
// but does not run it.
|
||||
const unit_tests = b.addTest(.{
|
||||
.root_source_file = .{ .path = "src/main.zig" },
|
||||
.root_source_file = b.path("src/main.zig"),
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
|
|
@ -1,16 +1,13 @@
|
|||
.{
|
||||
.name = "myapp",
|
||||
.name = .myapp,
|
||||
.version = "0.0.1",
|
||||
.fingerprint = 0x8798022a511224c5,
|
||||
.paths = .{""},
|
||||
|
||||
.dependencies = .{
|
||||
.smithy = .{
|
||||
.url = "https://git.lerch.org/lobo/smithy/archive/1e534201c4df5ea4f615faeedc69d414adbec0b1.tar.gz",
|
||||
.hash = "1220af63ae0498010004af79936cedf3fe6702f516daab77ebbd97a274eba1b42aad",
|
||||
},
|
||||
.@"aws-zig" = .{
|
||||
.url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/a0773971f2f52182c8a5235582500d36afda2e81/a0773971f2f52182c8a5235582500d36afda2e81-with-models.tar.gz",
|
||||
.hash = "1220198f7b734c1cc6a683ad13246439a59be934156a2df3a734bcaf15433b33eead",
|
||||
.aws = .{
|
||||
.url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/30d46261b791a1a916e30e60814b39c7ee994a74/30d46261b791a1a916e30e60814b39c7ee994a74-with-models.tar.gz",
|
||||
.hash = "aws-0.0.1-SbsFcLuV6gEkmY-mNp_x-V_GJ-zuJRqIljc4tAu60-g_",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -36,10 +36,8 @@ pub fn main() anyerror!void {
|
|||
.client = client,
|
||||
};
|
||||
|
||||
// As of 2023-08-28, only ECS from this list supports TLS v1.3
|
||||
// AWS commitment is to enable all services by 2023-12-31
|
||||
const services = aws.Services(.{ .sts, .kms }){};
|
||||
try stdout.print("Calling KMS ListKeys, a TLS 1.3 enabled service\n", .{});
|
||||
try stdout.print("Calling KMS ListKeys\n", .{});
|
||||
try stdout.print("You likely have at least some AWS-generated keys in your account,\n", .{});
|
||||
try stdout.print("but if the account has not had many services used, this may return 0 keys\n\n", .{});
|
||||
const call_kms = try aws.Request(services.kms.list_keys).call(.{}, options);
|
||||
|
@ -51,8 +49,7 @@ pub fn main() anyerror!void {
|
|||
}
|
||||
defer call_kms.deinit();
|
||||
|
||||
try stdout.print("\n\n\nCalling STS GetCallerIdentity. This does not have TLS 1.3 in September 2023\n", .{});
|
||||
try stdout.print("A failure may occur\n\n", .{});
|
||||
try stdout.print("\n\n\nCalling STS GetCallerIdentity\n", .{});
|
||||
const call = try aws.Request(services.sts.get_caller_identity).call(.{}, options);
|
||||
defer call.deinit();
|
||||
try stdout.print("\tarn: {s}\n", .{call.response.arn.?});
|
||||
|
|
468
src/aws.zig
468
src/aws.zig
|
@ -9,7 +9,72 @@ const date = @import("date.zig");
|
|||
const servicemodel = @import("servicemodel.zig");
|
||||
const xml_shaper = @import("xml_shaper.zig");
|
||||
|
||||
const log = std.log.scoped(.aws);
|
||||
const scoped_log = std.log.scoped(.aws);
|
||||
|
||||
/// control all logs directly/indirectly used by aws sdk. Not recommended for
|
||||
/// use under normal circumstances, but helpful for times when the zig logging
|
||||
/// controls are insufficient (e.g. use in build script)
|
||||
pub fn globalLogControl(aws_level: std.log.Level, http_level: std.log.Level, signing_level: std.log.Level, off: bool) void {
|
||||
const signing = @import("aws_signing.zig");
|
||||
const credentials = @import("aws_credentials.zig");
|
||||
logs_off = off;
|
||||
signing.logs_off = off;
|
||||
credentials.logs_off = off;
|
||||
awshttp.logs_off = off;
|
||||
log_level = aws_level;
|
||||
awshttp.log_level = http_level;
|
||||
signing.log_level = signing_level;
|
||||
credentials.log_level = signing_level;
|
||||
}
|
||||
/// Specifies logging level. This should not be touched unless the normal
|
||||
/// zig logging capabilities are inaccessible (e.g. during a build)
|
||||
pub var log_level: std.log.Level = .debug;
|
||||
|
||||
/// Turn off logging completely
|
||||
pub var logs_off: bool = false;
|
||||
const log = struct {
|
||||
/// Log an error message. This log level is intended to be used
|
||||
/// when something has gone wrong. This might be recoverable or might
|
||||
/// be followed by the program exiting.
|
||||
pub fn err(
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
if (!logs_off and @intFromEnum(std.log.Level.err) <= @intFromEnum(log_level))
|
||||
scoped_log.err(format, args);
|
||||
}
|
||||
|
||||
/// Log a warning message. This log level is intended to be used if
|
||||
/// it is uncertain whether something has gone wrong or not, but the
|
||||
/// circumstances would be worth investigating.
|
||||
pub fn warn(
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
if (!logs_off and @intFromEnum(std.log.Level.warn) <= @intFromEnum(log_level))
|
||||
scoped_log.warn(format, args);
|
||||
}
|
||||
|
||||
/// Log an info message. This log level is intended to be used for
|
||||
/// general messages about the state of the program.
|
||||
pub fn info(
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
if (!logs_off and @intFromEnum(std.log.Level.info) <= @intFromEnum(log_level))
|
||||
scoped_log.info(format, args);
|
||||
}
|
||||
|
||||
/// Log a debug message. This log level is intended to be used for
|
||||
/// messages which are only useful for debugging.
|
||||
pub fn debug(
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
if (!logs_off and @intFromEnum(std.log.Level.debug) <= @intFromEnum(log_level))
|
||||
scoped_log.debug(format, args);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Options = struct {
|
||||
region: []const u8 = "aws-global",
|
||||
|
@ -19,6 +84,18 @@ pub const Options = struct {
|
|||
|
||||
/// Used for testing to provide consistent signing. If null, will use current time
|
||||
signing_time: ?i64 = null,
|
||||
diagnostics: ?*Diagnostics = null,
|
||||
};
|
||||
|
||||
pub const Diagnostics = struct {
|
||||
http_code: i64,
|
||||
response_body: []const u8,
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
pub fn deinit(self: *Diagnostics) void {
|
||||
self.allocator.free(self.response_body);
|
||||
self.response_body = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
/// Using this constant may blow up build times. Recommed using Services()
|
||||
|
@ -114,12 +191,15 @@ pub fn Request(comptime request_action: anytype) type {
|
|||
log.debug("Rest method: '{s}'", .{aws_request.method});
|
||||
log.debug("Rest success code: '{d}'", .{Action.http_config.success_code});
|
||||
log.debug("Rest raw uri: '{s}'", .{Action.http_config.uri});
|
||||
var al = std.ArrayList([]const u8).init(options.client.allocator);
|
||||
defer al.deinit();
|
||||
aws_request.path = try buildPath(
|
||||
options.client.allocator,
|
||||
Action.http_config.uri,
|
||||
ActionRequest,
|
||||
request,
|
||||
!std.mem.eql(u8, Self.service_meta.sdk_id, "S3"),
|
||||
&al,
|
||||
);
|
||||
defer options.client.allocator.free(aws_request.path);
|
||||
log.debug("Rest processed uri: '{s}'", .{aws_request.path});
|
||||
|
@ -151,7 +231,7 @@ pub fn Request(comptime request_action: anytype) type {
|
|||
defer nameAllocator.deinit();
|
||||
if (Self.service_meta.aws_protocol == .rest_json_1) {
|
||||
if (std.mem.eql(u8, "PUT", aws_request.method) or std.mem.eql(u8, "POST", aws_request.method)) {
|
||||
try json.stringify(request, .{ .whitespace = .{} }, buffer.writer());
|
||||
try json.stringify(request, .{ .whitespace = .{}, .emit_null = false, .exclude_fields = al.items }, buffer.writer());
|
||||
}
|
||||
}
|
||||
aws_request.body = buffer.items;
|
||||
|
@ -175,6 +255,7 @@ pub fn Request(comptime request_action: anytype) type {
|
|||
.dualstack = options.dualstack,
|
||||
.client = options.client,
|
||||
.signing_time = options.signing_time,
|
||||
.diagnostics = options.diagnostics,
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -182,9 +263,9 @@ pub fn Request(comptime request_action: anytype) type {
|
|||
fn callJson(request: ActionRequest, options: Options) !FullResponseType {
|
||||
const target =
|
||||
try std.fmt.allocPrint(options.client.allocator, "{s}.{s}", .{
|
||||
Self.service_meta.name,
|
||||
action.action_name,
|
||||
});
|
||||
Self.service_meta.name,
|
||||
action.action_name,
|
||||
});
|
||||
defer options.client.allocator.free(target);
|
||||
|
||||
var buffer = std.ArrayList(u8).init(options.client.allocator);
|
||||
|
@ -245,11 +326,11 @@ pub fn Request(comptime request_action: anytype) type {
|
|||
// originally?
|
||||
const body =
|
||||
try std.fmt.allocPrint(options.client.allocator, "Action={s}&Version={s}{s}{s}", .{
|
||||
action.action_name,
|
||||
Self.service_meta.version,
|
||||
continuation,
|
||||
buffer.items,
|
||||
});
|
||||
action.action_name,
|
||||
Self.service_meta.version,
|
||||
continuation,
|
||||
buffer.items,
|
||||
});
|
||||
defer options.client.allocator.free(body);
|
||||
return try Self.callAws(.{
|
||||
.query = query,
|
||||
|
@ -272,6 +353,10 @@ pub fn Request(comptime request_action: anytype) type {
|
|||
defer response.deinit();
|
||||
if (response.response_code != options.success_http_code) {
|
||||
try reportTraffic(options.client.allocator, "Call Failed", aws_request, response, log.err);
|
||||
if (options.diagnostics) |d| {
|
||||
d.http_code = response.response_code;
|
||||
d.response_body = try d.allocator.dupe(u8, response.body);
|
||||
}
|
||||
return error.HttpFailure;
|
||||
}
|
||||
|
||||
|
@ -353,7 +438,7 @@ pub fn Request(comptime request_action: anytype) type {
|
|||
// First, we need to determine if we care about a response at all
|
||||
// If the expected result has no fields, there's no sense in
|
||||
// doing any more work. Let's bail early
|
||||
var expected_body_field_len = std.meta.fields(action.Response).len;
|
||||
comptime var expected_body_field_len = std.meta.fields(action.Response).len;
|
||||
if (@hasDecl(action.Response, "http_header"))
|
||||
expected_body_field_len -= std.meta.fields(@TypeOf(action.Response.http_header)).len;
|
||||
if (@hasDecl(action.Response, "http_payload")) {
|
||||
|
@ -379,8 +464,6 @@ pub fn Request(comptime request_action: anytype) type {
|
|||
|
||||
// We don't care about the body if there are no fields we expect there...
|
||||
if (std.meta.fields(action.Response).len == 0 or expected_body_field_len == 0) {
|
||||
// ^^ This should be redundant, but is necessary. I suspect it's a compiler quirk
|
||||
//
|
||||
// Do we care if an unexpected body comes in?
|
||||
return FullResponseType{
|
||||
.response = .{},
|
||||
|
@ -434,9 +517,9 @@ pub fn Request(comptime request_action: anytype) type {
|
|||
// And the response property below will pull whatever is the ActionResult object
|
||||
// We can grab index [0] as structs are guaranteed by zig to be returned in the order
|
||||
// declared, and we're declaring in that order in ServerResponse().
|
||||
const real_response = @field(parsed_response, @typeInfo(response_types.NormalResponse).Struct.fields[0].name);
|
||||
const real_response = @field(parsed_response, @typeInfo(response_types.NormalResponse).@"struct".fields[0].name);
|
||||
return FullResponseType{
|
||||
.response = @field(real_response, @typeInfo(@TypeOf(real_response)).Struct.fields[0].name),
|
||||
.response = @field(real_response, @typeInfo(@TypeOf(real_response)).@"struct".fields[0].name),
|
||||
.response_metadata = .{
|
||||
.request_id = try options.client.allocator.dupe(u8, real_response.ResponseMetadata.RequestId),
|
||||
},
|
||||
|
@ -626,8 +709,10 @@ pub fn Request(comptime request_action: anytype) type {
|
|||
|
||||
// Extract the first json key
|
||||
const key = firstJsonKey(data);
|
||||
const found_normal_json_response = std.mem.eql(u8, key, action.action_name ++ "Response") or
|
||||
std.mem.eql(u8, key, action.action_name ++ "Result");
|
||||
const found_normal_json_response =
|
||||
std.mem.eql(u8, key, action.action_name ++ "Response") or
|
||||
std.mem.eql(u8, key, action.action_name ++ "Result") or
|
||||
isOtherNormalResponse(response_types.NormalResponse, key);
|
||||
var raw_response_parsed = false;
|
||||
var stream = json.TokenStream.init(data);
|
||||
const parsed_response_ptr = blk: {
|
||||
|
@ -651,22 +736,23 @@ pub fn Request(comptime request_action: anytype) type {
|
|||
log.debug("Appears server has provided a raw response", .{});
|
||||
raw_response_parsed = true;
|
||||
const ptr = try options.client.allocator.create(response_types.NormalResponse);
|
||||
errdefer options.client.allocator.destroy(ptr);
|
||||
@field(ptr.*, std.meta.fields(action.Response)[0].name) =
|
||||
json.parse(response_types.RawResponse, &stream, parser_options) catch |e| {
|
||||
log.err(
|
||||
\\Call successful, but unexpected response from service.
|
||||
\\This could be the result of a bug or a stale set of code generated
|
||||
\\service models.
|
||||
\\
|
||||
\\Model Type: {}
|
||||
\\
|
||||
\\Response from server:
|
||||
\\
|
||||
\\{s}
|
||||
\\
|
||||
, .{ action.Response, data });
|
||||
return e;
|
||||
};
|
||||
log.err(
|
||||
\\Call successful, but unexpected response from service.
|
||||
\\This could be the result of a bug or a stale set of code generated
|
||||
\\service models.
|
||||
\\
|
||||
\\Model Type: {}
|
||||
\\
|
||||
\\Response from server:
|
||||
\\
|
||||
\\{s}
|
||||
\\
|
||||
, .{ action.Response, data });
|
||||
return e;
|
||||
};
|
||||
break :blk ptr;
|
||||
};
|
||||
return ParsedJsonData(response_types.NormalResponse){
|
||||
|
@ -678,13 +764,25 @@ pub fn Request(comptime request_action: anytype) type {
|
|||
};
|
||||
}
|
||||
|
||||
fn isOtherNormalResponse(comptime T: type, first_key: []const u8) bool {
|
||||
const fields = std.meta.fields(T);
|
||||
if (fields.len != 1) return false;
|
||||
const first_field = fields[0];
|
||||
if (!@hasDecl(T, "fieldNameFor")) return false;
|
||||
const expected_key = T.fieldNameFor(undefined, first_field.name);
|
||||
return std.mem.eql(u8, first_key, expected_key);
|
||||
}
|
||||
fn coerceFromString(comptime T: type, val: []const u8) anyerror!T {
|
||||
if (@typeInfo(T) == .Optional) return try coerceFromString(@typeInfo(T).Optional.child, val);
|
||||
if (@typeInfo(T) == .optional) return try coerceFromString(@typeInfo(T).optional.child, val);
|
||||
// TODO: This is terrible...fix it
|
||||
switch (T) {
|
||||
bool => return std.ascii.eqlIgnoreCase(val, "true"),
|
||||
i64 => return parseInt(T, val) catch |e| {
|
||||
log.err("Invalid string representing i64: {s}", .{val});
|
||||
i64, i128 => return parseInt(T, val) catch |e| {
|
||||
log.err("Invalid string representing {s}: {s}", .{ @typeName(T), val });
|
||||
return e;
|
||||
},
|
||||
f64, f128 => return std.fmt.parseFloat(T, val) catch |e| {
|
||||
log.err("Invalid string representing {s}: {s}", .{ @typeName(T), val });
|
||||
return e;
|
||||
},
|
||||
else => return val,
|
||||
|
@ -700,14 +798,20 @@ fn parseInt(comptime T: type, val: []const u8) !T {
|
|||
return e;
|
||||
};
|
||||
}
|
||||
if (T == f128) {
|
||||
return @as(f128, date.parseEnglishToTimestamp(val)) catch |e| {
|
||||
log.err("Error coercing date string '{s}' to timestamp value", .{val});
|
||||
return e;
|
||||
};
|
||||
}
|
||||
log.err("Error parsing string '{s}' to integer", .{val});
|
||||
return rc;
|
||||
}
|
||||
|
||||
fn generalAllocPrint(allocator: std.mem.Allocator, val: anytype) !?[]const u8 {
|
||||
switch (@typeInfo(@TypeOf(val))) {
|
||||
.Optional => if (val) |v| return generalAllocPrint(allocator, v) else return null,
|
||||
.Array, .Pointer => return try std.fmt.allocPrint(allocator, "{s}", .{val}),
|
||||
.optional => if (val) |v| return generalAllocPrint(allocator, v) else return null,
|
||||
.array, .pointer => return try std.fmt.allocPrint(allocator, "{s}", .{val}),
|
||||
else => return try std.fmt.allocPrint(allocator, "{any}", .{val}),
|
||||
}
|
||||
}
|
||||
|
@ -826,20 +930,20 @@ fn ServerResponse(comptime action: anytype) type {
|
|||
RequestId: []u8,
|
||||
};
|
||||
const Result = @Type(.{
|
||||
.Struct = .{
|
||||
.@"struct" = .{
|
||||
.layout = .auto,
|
||||
.fields = &[_]std.builtin.Type.StructField{
|
||||
.{
|
||||
.name = action.action_name ++ "Result",
|
||||
.type = T,
|
||||
.default_value = null,
|
||||
.default_value_ptr = null,
|
||||
.is_comptime = false,
|
||||
.alignment = 0,
|
||||
},
|
||||
.{
|
||||
.name = "ResponseMetadata",
|
||||
.type = ResponseMetadata,
|
||||
.default_value = null,
|
||||
.default_value_ptr = null,
|
||||
.is_comptime = false,
|
||||
.alignment = 0,
|
||||
},
|
||||
|
@ -849,13 +953,13 @@ fn ServerResponse(comptime action: anytype) type {
|
|||
},
|
||||
});
|
||||
return @Type(.{
|
||||
.Struct = .{
|
||||
.@"struct" = .{
|
||||
.layout = .auto,
|
||||
.fields = &[_]std.builtin.Type.StructField{
|
||||
.{
|
||||
.name = action.action_name ++ "Response",
|
||||
.type = Result,
|
||||
.default_value = null,
|
||||
.default_value_ptr = null,
|
||||
.is_comptime = false,
|
||||
.alignment = 0,
|
||||
},
|
||||
|
@ -915,8 +1019,8 @@ fn FullResponse(comptime action: anytype) type {
|
|||
}
|
||||
fn safeFree(allocator: std.mem.Allocator, obj: anytype) void {
|
||||
switch (@typeInfo(@TypeOf(obj))) {
|
||||
.Pointer => allocator.free(obj),
|
||||
.Optional => if (obj) |o| safeFree(allocator, o),
|
||||
.pointer => allocator.free(obj),
|
||||
.optional => if (obj) |o| safeFree(allocator, o),
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
@ -930,6 +1034,7 @@ fn buildPath(
|
|||
comptime ActionRequest: type,
|
||||
request: anytype,
|
||||
encode_slash: bool,
|
||||
replaced_fields: *std.ArrayList([]const u8),
|
||||
) ![]const u8 {
|
||||
var buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
|
||||
// const writer = buffer.writer();
|
||||
|
@ -951,6 +1056,7 @@ fn buildPath(
|
|||
const replacement_label = raw_uri[start..end];
|
||||
inline for (std.meta.fields(ActionRequest)) |field| {
|
||||
if (std.mem.eql(u8, request.fieldNameFor(field.name), replacement_label)) {
|
||||
try replaced_fields.append(replacement_label);
|
||||
var replacement_buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
|
||||
defer replacement_buffer.deinit();
|
||||
var encoded_buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
|
||||
|
@ -1023,7 +1129,7 @@ fn buildQuery(allocator: std.mem.Allocator, request: anytype) ![]const u8 {
|
|||
var prefix = "?";
|
||||
if (@hasDecl(@TypeOf(request), "http_query")) {
|
||||
const query_arguments = @field(@TypeOf(request), "http_query");
|
||||
inline for (@typeInfo(@TypeOf(query_arguments)).Struct.fields) |arg| {
|
||||
inline for (@typeInfo(@TypeOf(query_arguments)).@"struct".fields) |arg| {
|
||||
const val = @field(request, arg.name);
|
||||
if (try addQueryArg(arg.type, prefix, @field(query_arguments, arg.name), val, writer))
|
||||
prefix = "&";
|
||||
|
@ -1034,14 +1140,14 @@ fn buildQuery(allocator: std.mem.Allocator, request: anytype) ![]const u8 {
|
|||
|
||||
fn addQueryArg(comptime ValueType: type, prefix: []const u8, key: []const u8, value: anytype, writer: anytype) !bool {
|
||||
switch (@typeInfo(@TypeOf(value))) {
|
||||
.Optional => {
|
||||
.optional => {
|
||||
if (value) |v|
|
||||
return try addQueryArg(ValueType, prefix, key, v, writer);
|
||||
return false;
|
||||
},
|
||||
// if this is a pointer, we want to make sure it is more than just a string
|
||||
.Pointer => |ptr| {
|
||||
if (ptr.child == u8 or ptr.size != .Slice) {
|
||||
.pointer => |ptr| {
|
||||
if (ptr.child == u8 or ptr.size != .slice) {
|
||||
// This is just a string
|
||||
return try addBasicQueryArg(prefix, key, value, writer);
|
||||
}
|
||||
|
@ -1052,7 +1158,7 @@ fn addQueryArg(comptime ValueType: type, prefix: []const u8, key: []const u8, va
|
|||
}
|
||||
return std.mem.eql(u8, "&", p);
|
||||
},
|
||||
.Array => |arr| {
|
||||
.array => |arr| {
|
||||
if (arr.child == u8)
|
||||
return try addBasicQueryArg(prefix, key, value, writer);
|
||||
var p = prefix;
|
||||
|
@ -1172,8 +1278,8 @@ fn reportTraffic(
|
|||
fn typeForField(comptime T: type, comptime field_name: []const u8) !type {
|
||||
const ti = @typeInfo(T);
|
||||
switch (ti) {
|
||||
.Struct => {
|
||||
inline for (ti.Struct.fields) |field| {
|
||||
.@"struct" => {
|
||||
inline for (ti.@"struct".fields) |field| {
|
||||
if (std.mem.eql(u8, field.name, field_name))
|
||||
return field.type;
|
||||
}
|
||||
|
@ -1187,7 +1293,7 @@ test "custom serialization for map objects" {
|
|||
const allocator = std.testing.allocator;
|
||||
var buffer = std.ArrayList(u8).init(allocator);
|
||||
defer buffer.deinit();
|
||||
var tags = try std.ArrayList(@typeInfo(try typeForField(services.lambda.tag_resource.Request, "tags")).Pointer.child).initCapacity(allocator, 2);
|
||||
var tags = try std.ArrayList(@typeInfo(try typeForField(services.lambda.tag_resource.Request, "tags")).pointer.child).initCapacity(allocator, 2);
|
||||
defer tags.deinit();
|
||||
tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" });
|
||||
tags.appendAssumeCapacity(.{ .key = "Baz", .value = "Qux" });
|
||||
|
@ -1204,6 +1310,58 @@ test "custom serialization for map objects" {
|
|||
, buffer.items);
|
||||
}
|
||||
|
||||
test "proper serialization for kms" {
|
||||
// Github issue #8
|
||||
// https://github.com/elerch/aws-sdk-for-zig/issues/8
|
||||
const allocator = std.testing.allocator;
|
||||
var buffer = std.ArrayList(u8).init(allocator);
|
||||
defer buffer.deinit();
|
||||
const req = services.kms.encrypt.Request{
|
||||
.encryption_algorithm = "SYMMETRIC_DEFAULT",
|
||||
// Since encryption_context is not null, we expect "{}" to be the value
|
||||
// here, not "[]", because this is our special AWS map pattern
|
||||
.encryption_context = &.{},
|
||||
.key_id = "42",
|
||||
.plaintext = "foo",
|
||||
.dry_run = false,
|
||||
.grant_tokens = &[_][]const u8{},
|
||||
};
|
||||
try json.stringify(req, .{ .whitespace = .{} }, buffer.writer());
|
||||
try std.testing.expectEqualStrings(
|
||||
\\{
|
||||
\\ "KeyId": "42",
|
||||
\\ "Plaintext": "foo",
|
||||
\\ "EncryptionContext": {},
|
||||
\\ "GrantTokens": [],
|
||||
\\ "EncryptionAlgorithm": "SYMMETRIC_DEFAULT",
|
||||
\\ "DryRun": false
|
||||
\\}
|
||||
, buffer.items);
|
||||
|
||||
var buffer_null = std.ArrayList(u8).init(allocator);
|
||||
defer buffer_null.deinit();
|
||||
const req_null = services.kms.encrypt.Request{
|
||||
.encryption_algorithm = "SYMMETRIC_DEFAULT",
|
||||
// Since encryption_context here *IS* null, we expect simply "null" to be the value
|
||||
.encryption_context = null,
|
||||
.key_id = "42",
|
||||
.plaintext = "foo",
|
||||
.dry_run = false,
|
||||
.grant_tokens = &[_][]const u8{},
|
||||
};
|
||||
try json.stringify(req_null, .{ .whitespace = .{} }, buffer_null.writer());
|
||||
try std.testing.expectEqualStrings(
|
||||
\\{
|
||||
\\ "KeyId": "42",
|
||||
\\ "Plaintext": "foo",
|
||||
\\ "EncryptionContext": null,
|
||||
\\ "GrantTokens": [],
|
||||
\\ "EncryptionAlgorithm": "SYMMETRIC_DEFAULT",
|
||||
\\ "DryRun": false
|
||||
\\}
|
||||
, buffer_null.items);
|
||||
}
|
||||
|
||||
test "REST Json v1 builds proper queries" {
|
||||
const allocator = std.testing.allocator;
|
||||
const svs = Services(.{.lambda}){};
|
||||
|
@ -1240,23 +1398,27 @@ test "REST Json v1 serializes lists in queries" {
|
|||
}
|
||||
test "REST Json v1 buildpath substitutes" {
|
||||
const allocator = std.testing.allocator;
|
||||
var al = std.ArrayList([]const u8).init(allocator);
|
||||
defer al.deinit();
|
||||
const svs = Services(.{.lambda}){};
|
||||
const request = svs.lambda.list_functions.Request{
|
||||
.max_items = 1,
|
||||
};
|
||||
const input_path = "https://myhost/{MaxItems}/";
|
||||
const output_path = try buildPath(allocator, input_path, @TypeOf(request), request, true);
|
||||
const output_path = try buildPath(allocator, input_path, @TypeOf(request), request, true, &al);
|
||||
defer allocator.free(output_path);
|
||||
try std.testing.expectEqualStrings("https://myhost/1/", output_path);
|
||||
}
|
||||
test "REST Json v1 buildpath handles restricted characters" {
|
||||
const allocator = std.testing.allocator;
|
||||
var al = std.ArrayList([]const u8).init(allocator);
|
||||
defer al.deinit();
|
||||
const svs = Services(.{.lambda}){};
|
||||
const request = svs.lambda.list_functions.Request{
|
||||
.marker = ":",
|
||||
};
|
||||
const input_path = "https://myhost/{Marker}/";
|
||||
const output_path = try buildPath(allocator, input_path, @TypeOf(request), request, true);
|
||||
const output_path = try buildPath(allocator, input_path, @TypeOf(request), request, true, &al);
|
||||
defer allocator.free(output_path);
|
||||
try std.testing.expectEqualStrings("https://myhost/%3A/", output_path);
|
||||
}
|
||||
|
@ -1380,6 +1542,49 @@ const TestOptions = struct {
|
|||
|
||||
const Self = @This();
|
||||
|
||||
/// Builtin hashmap for strings as keys.
|
||||
/// Key memory is managed by the caller. Keys and values
|
||||
/// will not automatically be freed.
|
||||
pub fn StringCaseInsensitiveHashMap(comptime V: type) type {
|
||||
return std.HashMap([]const u8, V, StringInsensitiveContext, std.hash_map.default_max_load_percentage);
|
||||
}
|
||||
|
||||
pub const StringInsensitiveContext = struct {
|
||||
pub fn hash(self: @This(), s: []const u8) u64 {
|
||||
_ = self;
|
||||
return hashString(s);
|
||||
}
|
||||
pub fn eql(self: @This(), a: []const u8, b: []const u8) bool {
|
||||
_ = self;
|
||||
return eqlString(a, b);
|
||||
}
|
||||
};
|
||||
|
||||
pub fn eqlString(a: []const u8, b: []const u8) bool {
|
||||
return std.ascii.eqlIgnoreCase(a, b);
|
||||
}
|
||||
|
||||
pub fn hashString(s: []const u8) u64 {
|
||||
var buf: [1024]u8 = undefined;
|
||||
if (s.len > buf.len) unreachable; // tolower has a debug assert, but we want non-debug check too
|
||||
const lower_s = std.ascii.lowerString(buf[0..], s);
|
||||
return std.hash.Wyhash.hash(0, lower_s);
|
||||
}
|
||||
|
||||
fn expectNoDuplicateHeaders(self: *Self) !void {
|
||||
// As header keys are
|
||||
var hm = StringCaseInsensitiveHashMap(void).init(self.allocator);
|
||||
try hm.ensureTotalCapacity(@intCast(self.request_headers.len));
|
||||
defer hm.deinit();
|
||||
for (self.request_headers) |h| {
|
||||
if (hm.getKey(h.name)) |_| {
|
||||
log.err("Duplicate key detected. Key name: {s}", .{h.name});
|
||||
return error.duplicateKeyDetected;
|
||||
}
|
||||
try hm.put(h.name, {});
|
||||
}
|
||||
}
|
||||
|
||||
fn expectHeader(self: *Self, name: []const u8, value: []const u8) !void {
|
||||
for (self.request_headers) |h|
|
||||
if (std.ascii.eqlIgnoreCase(name, h.name) and
|
||||
|
@ -1593,6 +1798,58 @@ test "query_no_input: sts getCallerIdentity comptime" {
|
|||
try std.testing.expectEqualStrings("123456789012", call.response.account.?);
|
||||
try std.testing.expectEqualStrings("8f0d54da-1230-40f7-b4ac-95015c4b84cd", call.response_metadata.request_id);
|
||||
}
|
||||
test "query_with_input: iam getRole runtime" {
|
||||
// sqs switched from query to json in aws sdk for go v2 commit f5a08768ef820ff5efd62a49ba50c61c9ca5dbcb
|
||||
const allocator = std.testing.allocator;
|
||||
var test_harness = TestSetup.init(.{
|
||||
.allocator = allocator,
|
||||
.server_response =
|
||||
\\<GetRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
\\<GetRoleResult>
|
||||
\\ <Role>
|
||||
\\ <Path>/application_abc/component_xyz/</Path>
|
||||
\\ <Arn>arn:aws:iam::123456789012:role/application_abc/component_xyz/S3Access</Arn>
|
||||
\\ <RoleName>S3Access</RoleName>
|
||||
\\ <AssumeRolePolicyDocument>
|
||||
\\ {"Version":"2012-10-17","Statement":[{"Effect":"Allow",
|
||||
\\ "Principal":{"Service":["ec2.amazonaws.com"]},"Action":["sts:AssumeRole"]}]}
|
||||
\\ </AssumeRolePolicyDocument>
|
||||
\\ <CreateDate>2012-05-08T23:34:01Z</CreateDate>
|
||||
\\ <RoleId>AROADBQP57FF2AEXAMPLE</RoleId>
|
||||
\\ <RoleLastUsed>
|
||||
\\ <LastUsedDate>2019-11-20T17:09:20Z</LastUsedDate>
|
||||
\\ <Region>us-east-1</Region>
|
||||
\\ </RoleLastUsed>
|
||||
\\ </Role>
|
||||
\\</GetRoleResult>
|
||||
\\<ResponseMetadata>
|
||||
\\ <RequestId>df37e965-9967-11e1-a4c3-270EXAMPLE04</RequestId>
|
||||
\\</ResponseMetadata>
|
||||
\\</GetRoleResponse>
|
||||
,
|
||||
.server_response_headers = &.{
|
||||
.{ .name = "Content-Type", .value = "text/xml" },
|
||||
.{ .name = "x-amzn-RequestId", .value = "df37e965-9967-11e1-a4c3-270EXAMPLE04" },
|
||||
},
|
||||
});
|
||||
defer test_harness.deinit();
|
||||
const options = try test_harness.start();
|
||||
const iam = (Services(.{.iam}){}).iam;
|
||||
const call = try test_harness.client.call(iam.get_role.Request{
|
||||
.role_name = "S3Access",
|
||||
}, options);
|
||||
defer call.deinit();
|
||||
test_harness.stop();
|
||||
// Request expectations
|
||||
try std.testing.expectEqual(std.http.Method.POST, test_harness.request_options.request_method);
|
||||
try std.testing.expectEqualStrings("/", test_harness.request_options.request_target);
|
||||
try std.testing.expectEqualStrings(
|
||||
\\Action=GetRole&Version=2010-05-08&RoleName=S3Access
|
||||
, test_harness.request_options.request_body);
|
||||
// Response expectations
|
||||
try std.testing.expectEqualStrings("arn:aws:iam::123456789012:role/application_abc/component_xyz/S3Access", call.response.role.arn);
|
||||
try std.testing.expectEqualStrings("df37e965-9967-11e1-a4c3-270EXAMPLE04", call.response_metadata.request_id);
|
||||
}
|
||||
test "query_with_input: sts getAccessKeyInfo runtime" {
|
||||
// sqs switched from query to json in aws sdk for go v2 commit f5a08768ef820ff5efd62a49ba50c61c9ca5dbcb
|
||||
const allocator = std.testing.allocator;
|
||||
|
@ -1850,7 +2107,7 @@ test "rest_json_1_work_with_lambda: lambda tagResource (only), to excercise zig
|
|||
defer test_harness.deinit();
|
||||
const options = try test_harness.start();
|
||||
const lambda = (Services(.{.lambda}){}).lambda;
|
||||
var tags = try std.ArrayList(@typeInfo(try typeForField(lambda.tag_resource.Request, "tags")).Pointer.child).initCapacity(allocator, 1);
|
||||
var tags = try std.ArrayList(@typeInfo(try typeForField(lambda.tag_resource.Request, "tags")).pointer.child).initCapacity(allocator, 1);
|
||||
defer tags.deinit();
|
||||
tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" });
|
||||
const req = services.lambda.tag_resource.Request{ .resource = "arn:aws:lambda:us-west-2:550620852718:function:awsome-lambda-LambdaStackawsomeLambda", .tags = tags.items };
|
||||
|
@ -1861,7 +2118,6 @@ test "rest_json_1_work_with_lambda: lambda tagResource (only), to excercise zig
|
|||
try std.testing.expectEqual(std.http.Method.POST, test_harness.request_options.request_method);
|
||||
try std.testing.expectEqualStrings(
|
||||
\\{
|
||||
\\ "Resource": "arn:aws:lambda:us-west-2:550620852718:function:awsome-lambda-LambdaStackawsomeLambda",
|
||||
\\ "Tags": {
|
||||
\\ "Foo": "Bar"
|
||||
\\ }
|
||||
|
@ -1872,6 +2128,45 @@ test "rest_json_1_work_with_lambda: lambda tagResource (only), to excercise zig
|
|||
// Response expectations
|
||||
try std.testing.expectEqualStrings("a521e152-6e32-4e67-9fb3-abc94e34551b", call.response_metadata.request_id);
|
||||
}
|
||||
test "rest_json_1_url_parameters_not_in_request: lambda update_function_code" {
|
||||
const allocator = std.testing.allocator;
|
||||
var test_harness = TestSetup.init(.{
|
||||
.allocator = allocator,
|
||||
.server_response = "{\"CodeSize\": 42}",
|
||||
.server_response_status = .ok,
|
||||
.server_response_headers = &.{
|
||||
.{ .name = "Content-Type", .value = "application/json" },
|
||||
.{ .name = "x-amzn-RequestId", .value = "a521e152-6e32-4e67-9fb3-abc94e34551b" },
|
||||
},
|
||||
});
|
||||
defer test_harness.deinit();
|
||||
const options = try test_harness.start();
|
||||
const lambda = (Services(.{.lambda}){}).lambda;
|
||||
const architectures = [_][]const u8{"x86_64"};
|
||||
const arches: [][]const u8 = @constCast(architectures[0..]);
|
||||
const req = services.lambda.update_function_code.Request{
|
||||
.function_name = "functionname",
|
||||
.architectures = arches,
|
||||
.zip_file = "zipfile",
|
||||
};
|
||||
const call = try Request(lambda.update_function_code).call(req, options);
|
||||
defer call.deinit();
|
||||
test_harness.stop();
|
||||
// Request expectations
|
||||
try std.testing.expectEqual(std.http.Method.PUT, test_harness.request_options.request_method);
|
||||
try std.testing.expectEqualStrings(
|
||||
\\{
|
||||
\\ "ZipFile": "zipfile",
|
||||
\\ "Architectures": [
|
||||
\\ "x86_64"
|
||||
\\ ]
|
||||
\\}
|
||||
, test_harness.request_options.request_body);
|
||||
// Due to 17015, we see %253A instead of %3A
|
||||
try std.testing.expectEqualStrings("/2015-03-31/functions/functionname/code", test_harness.request_options.request_target);
|
||||
// Response expectations
|
||||
try std.testing.expectEqualStrings("a521e152-6e32-4e67-9fb3-abc94e34551b", call.response_metadata.request_id);
|
||||
}
|
||||
test "ec2_query_no_input: EC2 describe regions" {
|
||||
const allocator = std.testing.allocator;
|
||||
var test_harness = TestSetup.init(.{
|
||||
|
@ -1904,7 +2199,6 @@ test "ec2_query_no_input: EC2 describe regions" {
|
|||
// riscv64-linux also seems to have another problem with LLVM basically infinitely
|
||||
// doing something. My guess is the @embedFile is freaking out LLVM
|
||||
test "ec2_query_with_input: EC2 describe instances" {
|
||||
if (builtin.cpu.arch == .x86_64 and builtin.os.tag == .windows) return error.SkipZigTest;
|
||||
if (builtin.cpu.arch == .riscv64 and builtin.os.tag == .linux) return error.SkipZigTest;
|
||||
const allocator = std.testing.allocator;
|
||||
var test_harness = TestSetup.init(.{
|
||||
|
@ -1993,6 +2287,9 @@ test "rest_xml_anything_but_s3: CloudFront list key groups" {
|
|||
try std.testing.expectEqual(@as(i64, 100), call.response.key_group_list.?.max_items);
|
||||
}
|
||||
test "rest_xml_with_input: S3 put object" {
|
||||
// const old = std.testing.log_level;
|
||||
// defer std.testing.log_level = old;
|
||||
// std.testing.log_level = .debug;
|
||||
const allocator = std.testing.allocator;
|
||||
var test_harness = TestSetup.init(.{
|
||||
.allocator = allocator,
|
||||
|
@ -2019,13 +2316,14 @@ test "rest_xml_with_input: S3 put object" {
|
|||
.body = "bar",
|
||||
.storage_class = "STANDARD",
|
||||
}, s3opts);
|
||||
defer result.deinit();
|
||||
for (test_harness.request_options.request_headers) |header| {
|
||||
std.log.info("Request header: {s}: {s}", .{ header.name, header.value });
|
||||
}
|
||||
try test_harness.request_options.expectNoDuplicateHeaders();
|
||||
std.log.info("PutObject Request id: {s}", .{result.response_metadata.request_id});
|
||||
std.log.info("PutObject etag: {s}", .{result.response.e_tag.?});
|
||||
//mysfitszj3t6webstack-hostingbucketa91a61fe-1ep3ezkgwpxr0.s3.us-west-2.amazonaws.com
|
||||
defer result.deinit();
|
||||
test_harness.stop();
|
||||
// Request expectations
|
||||
try std.testing.expectEqual(std.http.Method.PUT, test_harness.request_options.request_method);
|
||||
|
@ -2039,3 +2337,59 @@ test "rest_xml_with_input: S3 put object" {
|
|||
try std.testing.expectEqualStrings("AES256", result.response.server_side_encryption.?);
|
||||
try std.testing.expectEqualStrings("37b51d194a7513e45b56f6524f2d51f2", result.response.e_tag.?);
|
||||
}
|
||||
test "raw ECR timestamps" {
|
||||
// This is a way to test the json parsing. Ultimately the more robust tests
|
||||
// should be preferred, but in this case we were tracking down an issue
|
||||
// for which the root cause was the incorrect type being passed to the parse
|
||||
// routine
|
||||
const allocator = std.testing.allocator;
|
||||
const ecr = (Services(.{.ecr}){}).ecr;
|
||||
const options = json.ParseOptions{
|
||||
.allocator = allocator,
|
||||
.allow_camel_case_conversion = true, // new option
|
||||
.allow_snake_case_conversion = true, // new option
|
||||
.allow_unknown_fields = true, // new option. Cannot yet handle non-struct fields though
|
||||
.allow_missing_fields = false, // new option. Cannot yet handle non-struct fields though
|
||||
};
|
||||
var stream = json.TokenStream.init(
|
||||
\\{"authorizationData":[{"authorizationToken":"***","expiresAt":1.7385984915E9,"proxyEndpoint":"https://146325435496.dkr.ecr.us-west-2.amazonaws.com"}]}
|
||||
);
|
||||
const ptr = try json.parse(ecr.get_authorization_token.Response, &stream, options);
|
||||
defer json.parseFree(ecr.get_authorization_token.Response, ptr, options);
|
||||
}
|
||||
test "json_1_1: ECR timestamps" {
|
||||
// See: https://github.com/elerch/aws-sdk-for-zig/issues/5
|
||||
// const old = std.testing.log_level;
|
||||
// defer std.testing.log_level = old;
|
||||
// std.testing.log_level = .debug;
|
||||
const allocator = std.testing.allocator;
|
||||
var test_harness = TestSetup.init(.{
|
||||
.allocator = allocator,
|
||||
.server_response =
|
||||
\\{"authorizationData":[{"authorizationToken":"***","expiresAt":1.7385984915E9,"proxyEndpoint":"https://146325435496.dkr.ecr.us-west-2.amazonaws.com"}]}
|
||||
// \\{"authorizationData":[{"authorizationToken":"***","expiresAt":1.738598491557E9,"proxyEndpoint":"https://146325435496.dkr.ecr.us-west-2.amazonaws.com"}]}
|
||||
,
|
||||
.server_response_headers = &.{
|
||||
.{ .name = "Content-Type", .value = "application/json" },
|
||||
.{ .name = "x-amzn-RequestId", .value = "QBI72OUIN8U9M9AG6PCSADJL4JVV4KQNSO5AEMVJF66Q9ASUAAJG" },
|
||||
},
|
||||
});
|
||||
defer test_harness.deinit();
|
||||
const options = try test_harness.start();
|
||||
const ecr = (Services(.{.ecr}){}).ecr;
|
||||
std.log.debug("Typeof response {}", .{@TypeOf(ecr.get_authorization_token.Response{})});
|
||||
const call = try test_harness.client.call(ecr.get_authorization_token.Request{}, options);
|
||||
defer call.deinit();
|
||||
test_harness.stop();
|
||||
// Request expectations
|
||||
try std.testing.expectEqual(std.http.Method.POST, test_harness.request_options.request_method);
|
||||
try std.testing.expectEqualStrings("/", test_harness.request_options.request_target);
|
||||
try test_harness.request_options.expectHeader("X-Amz-Target", "AmazonEC2ContainerRegistry_V20150921.GetAuthorizationToken");
|
||||
// Response expectations
|
||||
try std.testing.expectEqualStrings("QBI72OUIN8U9M9AG6PCSADJL4JVV4KQNSO5AEMVJF66Q9ASUAAJG", call.response_metadata.request_id);
|
||||
try std.testing.expectEqual(@as(usize, 1), call.response.authorization_data.?.len);
|
||||
try std.testing.expectEqualStrings("***", call.response.authorization_data.?[0].authorization_token.?);
|
||||
try std.testing.expectEqualStrings("https://146325435496.dkr.ecr.us-west-2.amazonaws.com", call.response.authorization_data.?[0].proxy_endpoint.?);
|
||||
// try std.testing.expectEqual(@as(i64, 1.73859841557E9), call.response.authorization_data.?[0].expires_at.?);
|
||||
try std.testing.expectEqual(@as(f128, 1.7385984915E9), call.response.authorization_data.?[0].expires_at.?);
|
||||
}
|
||||
|
|
|
@ -11,7 +11,56 @@ const std = @import("std");
|
|||
const builtin = @import("builtin");
|
||||
const auth = @import("aws_authentication.zig");
|
||||
|
||||
const log = std.log.scoped(.aws_credentials);
|
||||
const scoped_log = std.log.scoped(.aws_credentials);
|
||||
/// Specifies logging level. This should not be touched unless the normal
|
||||
/// zig logging capabilities are inaccessible (e.g. during a build)
|
||||
pub var log_level: std.log.Level = .debug;
|
||||
|
||||
/// Turn off logging completely
|
||||
pub var logs_off: bool = false;
|
||||
const log = struct {
|
||||
/// Log an error message. This log level is intended to be used
|
||||
/// when something has gone wrong. This might be recoverable or might
|
||||
/// be followed by the program exiting.
|
||||
pub fn err(
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
if (!logs_off and @intFromEnum(std.log.Level.err) <= @intFromEnum(log_level))
|
||||
scoped_log.err(format, args);
|
||||
}
|
||||
|
||||
/// Log a warning message. This log level is intended to be used if
|
||||
/// it is uncertain whether something has gone wrong or not, but the
|
||||
/// circumstances would be worth investigating.
|
||||
pub fn warn(
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
if (!logs_off and @intFromEnum(std.log.Level.warn) <= @intFromEnum(log_level))
|
||||
scoped_log.warn(format, args);
|
||||
}
|
||||
|
||||
/// Log an info message. This log level is intended to be used for
|
||||
/// general messages about the state of the program.
|
||||
pub fn info(
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
if (!logs_off and @intFromEnum(std.log.Level.info) <= @intFromEnum(log_level))
|
||||
scoped_log.info(format, args);
|
||||
}
|
||||
|
||||
/// Log a debug message. This log level is intended to be used for
|
||||
/// messages which are only useful for debugging.
|
||||
pub fn debug(
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
if (!logs_off and @intFromEnum(std.log.Level.debug) <= @intFromEnum(log_level))
|
||||
scoped_log.debug(format, args);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Profile = struct {
|
||||
/// Credential file. Defaults to AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials
|
||||
|
|
|
@ -17,7 +17,57 @@ const CN_NORTHWEST_1_HASH = std.hash_map.hashString("cn-northwest-1");
|
|||
const US_ISO_EAST_1_HASH = std.hash_map.hashString("us-iso-east-1");
|
||||
const US_ISOB_EAST_1_HASH = std.hash_map.hashString("us-isob-east-1");
|
||||
|
||||
const log = std.log.scoped(.awshttp);
|
||||
const scoped_log = std.log.scoped(.awshttp);
|
||||
|
||||
/// Specifies logging level. This should not be touched unless the normal
|
||||
/// zig logging capabilities are inaccessible (e.g. during a build)
|
||||
pub var log_level: std.log.Level = .debug;
|
||||
|
||||
/// Turn off logging completely
|
||||
pub var logs_off: bool = false;
|
||||
const log = struct {
|
||||
/// Log an error message. This log level is intended to be used
|
||||
/// when something has gone wrong. This might be recoverable or might
|
||||
/// be followed by the program exiting.
|
||||
pub fn err(
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
if (!logs_off and @intFromEnum(std.log.Level.err) <= @intFromEnum(log_level))
|
||||
scoped_log.err(format, args);
|
||||
}
|
||||
|
||||
/// Log a warning message. This log level is intended to be used if
|
||||
/// it is uncertain whether something has gone wrong or not, but the
|
||||
/// circumstances would be worth investigating.
|
||||
pub fn warn(
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
if (!logs_off and @intFromEnum(std.log.Level.warn) <= @intFromEnum(log_level))
|
||||
scoped_log.warn(format, args);
|
||||
}
|
||||
|
||||
/// Log an info message. This log level is intended to be used for
|
||||
/// general messages about the state of the program.
|
||||
pub fn info(
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
if (!logs_off and @intFromEnum(std.log.Level.info) <= @intFromEnum(log_level))
|
||||
scoped_log.info(format, args);
|
||||
}
|
||||
|
||||
/// Log a debug message. This log level is intended to be used for
|
||||
/// messages which are only useful for debugging.
|
||||
pub fn debug(
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
if (!logs_off and @intFromEnum(std.log.Level.debug) <= @intFromEnum(log_level))
|
||||
scoped_log.debug(format, args);
|
||||
}
|
||||
};
|
||||
|
||||
pub const AwsError = error{
|
||||
AddHeaderError,
|
||||
|
@ -190,6 +240,16 @@ pub const AwsHttp = struct {
|
|||
.response_storage = .{ .dynamic = &resp_payload },
|
||||
.raw_uri = true,
|
||||
.location = .{ .url = url },
|
||||
// we need full control over most headers. I wish libraries would do a
|
||||
// better job of having default headers as an opt-in...
|
||||
.headers = .{
|
||||
.host = .omit,
|
||||
.authorization = .omit,
|
||||
.user_agent = .omit,
|
||||
.connection = .default, // we can let the client manage this...it has no impact to us
|
||||
.accept_encoding = .default, // accept encoding (gzip, deflate) *should* be ok
|
||||
.content_type = .omit,
|
||||
},
|
||||
.extra_headers = headers.items,
|
||||
});
|
||||
// TODO: Need to test for payloads > 2^14. I believe one of our tests does this, but not sure
|
||||
|
@ -241,6 +301,7 @@ pub const AwsHttp = struct {
|
|||
|
||||
fn getRegion(service: []const u8, region: []const u8) []const u8 {
|
||||
if (std.mem.eql(u8, service, "cloudfront")) return "us-east-1";
|
||||
if (std.mem.eql(u8, service, "iam")) return "us-east-1";
|
||||
return region;
|
||||
}
|
||||
|
||||
|
@ -328,6 +389,26 @@ fn endpointException(
|
|||
dualstack: []const u8,
|
||||
domain: []const u8,
|
||||
) !?EndPoint {
|
||||
// Global endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#global-endpoints):
|
||||
// ✓ Amazon CloudFront
|
||||
// AWS Global Accelerator
|
||||
// ✓ AWS Identity and Access Management (IAM)
|
||||
// AWS Network Manager
|
||||
// AWS Organizations
|
||||
// Amazon Route 53
|
||||
// AWS Shield Advanced
|
||||
// AWS WAF Classic
|
||||
|
||||
if (std.mem.eql(u8, service, "iam")) {
|
||||
return EndPoint{
|
||||
.uri = try allocator.dupe(u8, "https://iam.amazonaws.com"),
|
||||
.host = try allocator.dupe(u8, "iam.amazonaws.com"),
|
||||
.scheme = "https",
|
||||
.port = 443,
|
||||
.allocator = allocator,
|
||||
.path = try allocator.dupe(u8, request.path),
|
||||
};
|
||||
}
|
||||
if (std.mem.eql(u8, service, "cloudfront")) {
|
||||
return EndPoint{
|
||||
.uri = try allocator.dupe(u8, "https://cloudfront.amazonaws.com"),
|
||||
|
|
|
@ -22,7 +22,7 @@ pub const Result = struct {
|
|||
self.allocator.free(h.value);
|
||||
}
|
||||
self.allocator.free(self.headers);
|
||||
log.debug("http result deinit complete", .{});
|
||||
//log.debug("http result deinit complete", .{});
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
|
|
@ -3,8 +3,57 @@ const base = @import("aws_http_base.zig");
|
|||
const auth = @import("aws_authentication.zig");
|
||||
const date = @import("date.zig");
|
||||
|
||||
const log = std.log.scoped(.aws_signing);
|
||||
const scoped_log = std.log.scoped(.aws_signing);
|
||||
|
||||
/// Specifies logging level. This should not be touched unless the normal
|
||||
/// zig logging capabilities are inaccessible (e.g. during a build)
|
||||
pub var log_level: std.log.Level = .debug;
|
||||
|
||||
/// Turn off logging completely
|
||||
pub var logs_off: bool = false;
|
||||
const log = struct {
|
||||
/// Log an error message. This log level is intended to be used
|
||||
/// when something has gone wrong. This might be recoverable or might
|
||||
/// be followed by the program exiting.
|
||||
pub fn err(
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
if (!logs_off and @intFromEnum(std.log.Level.err) <= @intFromEnum(log_level))
|
||||
scoped_log.err(format, args);
|
||||
}
|
||||
|
||||
/// Log a warning message. This log level is intended to be used if
|
||||
/// it is uncertain whether something has gone wrong or not, but the
|
||||
/// circumstances would be worth investigating.
|
||||
pub fn warn(
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
if (!logs_off and @intFromEnum(std.log.Level.warn) <= @intFromEnum(log_level))
|
||||
scoped_log.warn(format, args);
|
||||
}
|
||||
|
||||
/// Log an info message. This log level is intended to be used for
|
||||
/// general messages about the state of the program.
|
||||
pub fn info(
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
if (!logs_off and @intFromEnum(std.log.Level.info) <= @intFromEnum(log_level))
|
||||
scoped_log.info(format, args);
|
||||
}
|
||||
|
||||
/// Log a debug message. This log level is intended to be used for
|
||||
/// messages which are only useful for debugging.
|
||||
pub fn debug(
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
if (!logs_off and @intFromEnum(std.log.Level.debug) <= @intFromEnum(log_level))
|
||||
scoped_log.debug(format, args);
|
||||
}
|
||||
};
|
||||
// TODO: Remove this?! This is an aws_signing, so we should know a thing
|
||||
// or two about aws. So perhaps the right level of abstraction here
|
||||
// is to have our service signing idiosyncracies dealt with in this
|
||||
|
@ -392,7 +441,7 @@ fn verifyParsedAuthorization(
|
|||
// Credential=ACCESS/20230908/us-west-2/s3/aws4_request
|
||||
// SignedHeaders=accept;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class
|
||||
// Signature=fcc43ce73a34c9bd1ddf17e8a435f46a859812822f944f9eeb2aabcd64b03523
|
||||
var credential_iterator = std.mem.split(u8, credential, "/");
|
||||
var credential_iterator = std.mem.splitScalar(u8, credential, '/');
|
||||
const access_key = credential_iterator.next().?;
|
||||
const credentials = credentials_fn(access_key) orelse return error.CredentialsNotFound;
|
||||
// TODO: https://stackoverflow.com/questions/29276609/aws-authentication-requires-a-valid-date-or-x-amz-date-header-curl
|
||||
|
@ -613,12 +662,12 @@ fn canonicalUri(allocator: std.mem.Allocator, path: []const u8, double_encode: b
|
|||
}
|
||||
defer allocator.free(encoded_once);
|
||||
var encoded_twice = try encodeUri(allocator, encoded_once);
|
||||
defer allocator.free(encoded_twice);
|
||||
log.debug("encoded path (2): {s}", .{encoded_twice});
|
||||
if (std.mem.lastIndexOf(u8, encoded_twice, "?")) |i| {
|
||||
_ = allocator.resize(encoded_twice, i);
|
||||
return encoded_twice[0..i];
|
||||
return try allocator.dupe(u8, encoded_twice[0..i]);
|
||||
}
|
||||
return encoded_twice;
|
||||
return try allocator.dupe(u8, encoded_twice);
|
||||
}
|
||||
|
||||
fn encodeParamPart(allocator: std.mem.Allocator, path: []const u8) ![]const u8 {
|
||||
|
@ -750,7 +799,7 @@ fn canonicalQueryString(allocator: std.mem.Allocator, path: []const u8) ![]const
|
|||
const query = path[first_question.? + 1 ..];
|
||||
|
||||
// Split this by component
|
||||
var portions = std.mem.split(u8, query, "&");
|
||||
var portions = std.mem.splitScalar(u8, query, '&');
|
||||
var sort_me = std.ArrayList([]const u8).init(allocator);
|
||||
defer sort_me.deinit();
|
||||
while (portions.next()) |item|
|
||||
|
@ -887,6 +936,7 @@ fn canonicalHeaderValue(allocator: std.mem.Allocator, value: []const u8) ![]cons
|
|||
const in_quote = false;
|
||||
var start: usize = 0;
|
||||
const rc = try allocator.alloc(u8, value.len);
|
||||
defer allocator.free(rc);
|
||||
var rc_inx: usize = 0;
|
||||
for (value, 0..) |c, i| {
|
||||
if (!started and !std.ascii.isWhitespace(c)) {
|
||||
|
@ -904,8 +954,7 @@ fn canonicalHeaderValue(allocator: std.mem.Allocator, value: []const u8) ![]cons
|
|||
// Trim end
|
||||
while (std.ascii.isWhitespace(rc[rc_inx - 1]))
|
||||
rc_inx -= 1;
|
||||
_ = allocator.resize(rc, rc_inx);
|
||||
return rc[0..rc_inx];
|
||||
return try allocator.dupe(u8, rc[0..rc_inx]);
|
||||
}
|
||||
fn lessThan(context: void, lhs: std.http.Header, rhs: std.http.Header) bool {
|
||||
_ = context;
|
||||
|
@ -937,6 +986,7 @@ test "canonical uri" {
|
|||
const path = "/documents and settings/?foo=bar";
|
||||
const expected = "/documents%2520and%2520settings/";
|
||||
const actual = try canonicalUri(allocator, path, true);
|
||||
|
||||
defer allocator.free(actual);
|
||||
try std.testing.expectEqualStrings(expected, actual);
|
||||
|
||||
|
|
167
src/json.zig
167
src/json.zig
|
@ -1560,21 +1560,21 @@ fn skipValue(tokens: *TokenStream) SkipValueError!void {
|
|||
|
||||
fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options: ParseOptions) !T {
|
||||
switch (@typeInfo(T)) {
|
||||
.Bool => {
|
||||
.bool => {
|
||||
return switch (token) {
|
||||
.True => true,
|
||||
.False => false,
|
||||
else => error.UnexpectedToken,
|
||||
};
|
||||
},
|
||||
.Float, .ComptimeFloat => {
|
||||
.float, .comptime_float => {
|
||||
const numberToken = switch (token) {
|
||||
.Number => |n| n,
|
||||
else => return error.UnexpectedToken,
|
||||
};
|
||||
return try std.fmt.parseFloat(T, numberToken.slice(tokens.slice, tokens.i - 1));
|
||||
},
|
||||
.Int, .ComptimeInt => {
|
||||
.int, .comptime_int => {
|
||||
const numberToken = switch (token) {
|
||||
.Number => |n| n,
|
||||
else => return error.UnexpectedToken,
|
||||
|
@ -1587,14 +1587,14 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
|||
if (std.math.round(float) != float) return error.InvalidNumber;
|
||||
return @as(T, @intFromFloat(float));
|
||||
},
|
||||
.Optional => |optionalInfo| {
|
||||
.optional => |optionalInfo| {
|
||||
if (token == .Null) {
|
||||
return null;
|
||||
} else {
|
||||
return try parseInternal(optionalInfo.child, token, tokens, options);
|
||||
}
|
||||
},
|
||||
.Enum => |enumInfo| {
|
||||
.@"enum" => |enumInfo| {
|
||||
switch (token) {
|
||||
.Number => |numberToken| {
|
||||
if (!numberToken.is_integer) return error.UnexpectedToken;
|
||||
|
@ -1618,7 +1618,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
|||
else => return error.UnexpectedToken,
|
||||
}
|
||||
},
|
||||
.Union => |unionInfo| {
|
||||
.@"union" => |unionInfo| {
|
||||
if (unionInfo.tag_type) |_| {
|
||||
// try each of the union fields until we find one that matches
|
||||
inline for (unionInfo.fields) |u_field| {
|
||||
|
@ -1642,7 +1642,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
|||
@compileError("Unable to parse into untagged union '" ++ @typeName(T) ++ "'");
|
||||
}
|
||||
},
|
||||
.Struct => |structInfo| {
|
||||
.@"struct" => |structInfo| {
|
||||
switch (token) {
|
||||
.ObjectBegin => {},
|
||||
else => return error.UnexpectedToken,
|
||||
|
@ -1723,7 +1723,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
|||
}
|
||||
inline for (structInfo.fields, 0..) |field, i| {
|
||||
if (!fields_seen[i]) {
|
||||
if (field.default_value) |default_value_ptr| {
|
||||
if (field.default_value_ptr) |default_value_ptr| {
|
||||
if (!field.is_comptime) {
|
||||
const default_value = @as(*align(1) const field.type, @ptrCast(default_value_ptr)).*;
|
||||
@field(r, field.name) = default_value;
|
||||
|
@ -1736,7 +1736,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
|||
}
|
||||
return r;
|
||||
},
|
||||
.Array => |arrayInfo| {
|
||||
.array => |arrayInfo| {
|
||||
switch (token) {
|
||||
.ArrayBegin => {
|
||||
var r: T = undefined;
|
||||
|
@ -1770,21 +1770,21 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
|||
else => return error.UnexpectedToken,
|
||||
}
|
||||
},
|
||||
.Pointer => |ptrInfo| {
|
||||
.pointer => |ptrInfo| {
|
||||
const allocator = options.allocator orelse return error.AllocatorRequired;
|
||||
switch (ptrInfo.size) {
|
||||
.One => {
|
||||
.one => {
|
||||
const r: T = try allocator.create(ptrInfo.child);
|
||||
errdefer allocator.destroy(r);
|
||||
r.* = try parseInternal(ptrInfo.child, token, tokens, options);
|
||||
return r;
|
||||
},
|
||||
.Slice => {
|
||||
.slice => {
|
||||
switch (token) {
|
||||
.ArrayBegin => {
|
||||
var arraylist = std.ArrayList(ptrInfo.child).init(allocator);
|
||||
errdefer {
|
||||
while (arraylist.popOrNull()) |v| {
|
||||
while (arraylist.pop()) |v| {
|
||||
parseFree(ptrInfo.child, v, options);
|
||||
}
|
||||
arraylist.deinit();
|
||||
|
@ -1829,7 +1829,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
|||
if (value_type == null) return error.UnexpectedToken;
|
||||
var arraylist = std.ArrayList(ptrInfo.child).init(allocator);
|
||||
errdefer {
|
||||
while (arraylist.popOrNull()) |v| {
|
||||
while (arraylist.pop()) |v| {
|
||||
parseFree(ptrInfo.child, v, options);
|
||||
}
|
||||
arraylist.deinit();
|
||||
|
@ -1863,8 +1863,8 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
|||
fn typeForField(comptime T: type, comptime field_name: []const u8) ?type {
|
||||
const ti = @typeInfo(T);
|
||||
switch (ti) {
|
||||
.Struct => {
|
||||
inline for (ti.Struct.fields) |field| {
|
||||
.@"struct" => {
|
||||
inline for (ti.@"struct".fields) |field| {
|
||||
if (std.mem.eql(u8, field.name, field_name))
|
||||
return field.type;
|
||||
}
|
||||
|
@ -1878,14 +1878,14 @@ fn isMapPattern(comptime T: type) bool {
|
|||
// We should be getting a type that is a pointer to a slice.
|
||||
// Let's just double check before proceeding
|
||||
const ti = @typeInfo(T);
|
||||
if (ti != .Pointer) return false;
|
||||
if (ti.Pointer.size != .Slice) return false;
|
||||
const ti_child = @typeInfo(ti.Pointer.child);
|
||||
if (ti_child != .Struct) return false;
|
||||
if (ti_child.Struct.fields.len != 2) return false;
|
||||
if (ti != .pointer) return false;
|
||||
if (ti.pointer.size != .slice) return false;
|
||||
const ti_child = @typeInfo(ti.pointer.child);
|
||||
if (ti_child != .@"struct") return false;
|
||||
if (ti_child.@"struct".fields.len != 2) return false;
|
||||
var key_found = false;
|
||||
var value_found = false;
|
||||
inline for (ti_child.Struct.fields) |field| {
|
||||
inline for (ti_child.@"struct".fields) |field| {
|
||||
if (std.mem.eql(u8, "key", field.name))
|
||||
key_found = true;
|
||||
if (std.mem.eql(u8, "value", field.name))
|
||||
|
@ -1895,6 +1895,7 @@ fn isMapPattern(comptime T: type) bool {
|
|||
}
|
||||
|
||||
pub fn parse(comptime T: type, tokens: *TokenStream, options: ParseOptions) !T {
|
||||
// std.log.debug("parsing {s} into type {s}", .{ tokens.slice, @typeName(T) });
|
||||
const token = (try tokens.next()) orelse return error.UnexpectedEndOfJson;
|
||||
return parseInternal(T, token, tokens, options);
|
||||
}
|
||||
|
@ -1903,13 +1904,13 @@ pub fn parse(comptime T: type, tokens: *TokenStream, options: ParseOptions) !T {
|
|||
/// Should be called with the same type and `ParseOptions` that were passed to `parse`
|
||||
pub fn parseFree(comptime T: type, value: T, options: ParseOptions) void {
|
||||
switch (@typeInfo(T)) {
|
||||
.Bool, .Float, .ComptimeFloat, .Int, .ComptimeInt, .Enum => {},
|
||||
.Optional => {
|
||||
.bool, .float, .comptime_float, .int, .comptime_int, .@"enum" => {},
|
||||
.optional => {
|
||||
if (value) |v| {
|
||||
return parseFree(@TypeOf(v), v, options);
|
||||
}
|
||||
},
|
||||
.Union => |unionInfo| {
|
||||
.@"union" => |unionInfo| {
|
||||
if (unionInfo.tag_type) |UnionTagType| {
|
||||
inline for (unionInfo.fields) |u_field| {
|
||||
if (value == @field(UnionTagType, u_field.name)) {
|
||||
|
@ -1921,24 +1922,24 @@ pub fn parseFree(comptime T: type, value: T, options: ParseOptions) void {
|
|||
unreachable;
|
||||
}
|
||||
},
|
||||
.Struct => |structInfo| {
|
||||
.@"struct" => |structInfo| {
|
||||
inline for (structInfo.fields) |field| {
|
||||
parseFree(field.type, @field(value, field.name), options);
|
||||
}
|
||||
},
|
||||
.Array => |arrayInfo| {
|
||||
.array => |arrayInfo| {
|
||||
for (value) |v| {
|
||||
parseFree(arrayInfo.child, v, options);
|
||||
}
|
||||
},
|
||||
.Pointer => |ptrInfo| {
|
||||
.pointer => |ptrInfo| {
|
||||
const allocator = options.allocator orelse unreachable;
|
||||
switch (ptrInfo.size) {
|
||||
.One => {
|
||||
.one => {
|
||||
parseFree(ptrInfo.child, value.*, options);
|
||||
allocator.destroy(value);
|
||||
},
|
||||
.Slice => {
|
||||
.slice => {
|
||||
for (value) |v| {
|
||||
parseFree(ptrInfo.child, v, options);
|
||||
}
|
||||
|
@ -2283,7 +2284,7 @@ pub const Parser = struct {
|
|||
return;
|
||||
}
|
||||
|
||||
var value = p.stack.pop();
|
||||
var value = p.stack.pop().?;
|
||||
try p.pushToParent(&value);
|
||||
},
|
||||
.String => |s| {
|
||||
|
@ -2349,7 +2350,7 @@ pub const Parser = struct {
|
|||
return;
|
||||
}
|
||||
|
||||
var value = p.stack.pop();
|
||||
var value = p.stack.pop().?;
|
||||
try p.pushToParent(&value);
|
||||
},
|
||||
.ObjectBegin => {
|
||||
|
@ -2510,7 +2511,7 @@ pub fn unescapeValidString(output: []u8, input: []const u8) !void {
|
|||
mem.nativeToLittle(u16, firstCodeUnit),
|
||||
mem.nativeToLittle(u16, secondCodeUnit),
|
||||
};
|
||||
if (std.unicode.utf16leToUtf8(output[outIndex..], &utf16le_seq)) |byteCount| {
|
||||
if (std.unicode.utf16LeToUtf8(output[outIndex..], &utf16le_seq)) |byteCount| {
|
||||
outIndex += byteCount;
|
||||
inIndex += 12;
|
||||
} else |_| {
|
||||
|
@ -2756,6 +2757,10 @@ pub const StringifyOptions = struct {
|
|||
}
|
||||
};
|
||||
|
||||
emit_null: bool = true,
|
||||
|
||||
exclude_fields: ?[][]const u8 = null,
|
||||
|
||||
/// Controls the whitespace emitted
|
||||
whitespace: ?Whitespace = null,
|
||||
|
||||
|
@ -2807,38 +2812,38 @@ pub fn stringify(
|
|||
) !void {
|
||||
const T = @TypeOf(value);
|
||||
switch (@typeInfo(T)) {
|
||||
.Float, .ComptimeFloat => {
|
||||
.float, .comptime_float => {
|
||||
return std.fmt.format(out_stream, "{e}", .{value});
|
||||
},
|
||||
.Int, .ComptimeInt => {
|
||||
.int, .comptime_int => {
|
||||
return std.fmt.formatIntValue(value, "", std.fmt.FormatOptions{}, out_stream);
|
||||
},
|
||||
.Bool => {
|
||||
.bool => {
|
||||
return out_stream.writeAll(if (value) "true" else "false");
|
||||
},
|
||||
.Null => {
|
||||
.null => {
|
||||
return out_stream.writeAll("null");
|
||||
},
|
||||
.Optional => {
|
||||
.optional => {
|
||||
if (value) |payload| {
|
||||
return try stringify(payload, options, out_stream);
|
||||
} else {
|
||||
return try stringify(null, options, out_stream);
|
||||
}
|
||||
},
|
||||
.Enum => {
|
||||
.@"enum" => {
|
||||
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
||||
return value.jsonStringify(options, out_stream);
|
||||
}
|
||||
|
||||
@compileError("Unable to stringify enum '" ++ @typeName(T) ++ "'");
|
||||
},
|
||||
.Union => {
|
||||
.@"union" => {
|
||||
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
||||
return value.jsonStringify(options, out_stream);
|
||||
}
|
||||
|
||||
const info = @typeInfo(T).Union;
|
||||
const info = @typeInfo(T).@"union";
|
||||
if (info.tag_type) |UnionTagType| {
|
||||
inline for (info.fields) |u_field| {
|
||||
if (value == @field(UnionTagType, u_field.name)) {
|
||||
|
@ -2849,13 +2854,13 @@ pub fn stringify(
|
|||
@compileError("Unable to stringify untagged union '" ++ @typeName(T) ++ "'");
|
||||
}
|
||||
},
|
||||
.Struct => |S| {
|
||||
.@"struct" => |S| {
|
||||
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
||||
return value.jsonStringify(options, out_stream);
|
||||
}
|
||||
|
||||
try out_stream.writeByte('{');
|
||||
comptime var field_output = false;
|
||||
var field_output = false;
|
||||
var child_options = options;
|
||||
if (child_options.whitespace) |*child_whitespace| {
|
||||
child_whitespace.indent_level += 1;
|
||||
|
@ -2864,34 +2869,46 @@ pub fn stringify(
|
|||
// don't include void fields
|
||||
if (Field.type == void) continue;
|
||||
|
||||
if (!field_output) {
|
||||
field_output = true;
|
||||
} else {
|
||||
try out_stream.writeByte(',');
|
||||
}
|
||||
if (child_options.whitespace) |child_whitespace| {
|
||||
try out_stream.writeByte('\n');
|
||||
try child_whitespace.outputIndent(out_stream);
|
||||
}
|
||||
var field_written = false;
|
||||
if (comptime std.meta.hasFn(T, "jsonStringifyField"))
|
||||
field_written = try value.jsonStringifyField(Field.name, child_options, out_stream);
|
||||
var output_this_field = true;
|
||||
if (!options.emit_null and @typeInfo(Field.type) == .optional and @field(value, Field.name) == null) output_this_field = false;
|
||||
|
||||
if (!field_written) {
|
||||
if (comptime std.meta.hasFn(T, "fieldNameFor")) {
|
||||
const name = value.fieldNameFor(Field.name);
|
||||
try stringify(name, options, out_stream);
|
||||
} else {
|
||||
try stringify(Field.name, options, out_stream);
|
||||
}
|
||||
|
||||
try out_stream.writeByte(':');
|
||||
if (child_options.whitespace) |child_whitespace| {
|
||||
if (child_whitespace.separator) {
|
||||
try out_stream.writeByte(' ');
|
||||
const final_name = if (comptime std.meta.hasFn(T, "fieldNameFor"))
|
||||
value.fieldNameFor(Field.name)
|
||||
else
|
||||
Field.name;
|
||||
if (options.exclude_fields) |exclude_fields| {
|
||||
for (exclude_fields) |exclude_field| {
|
||||
if (std.mem.eql(u8, final_name, exclude_field)) {
|
||||
output_this_field = false;
|
||||
}
|
||||
}
|
||||
try stringify(@field(value, Field.name), child_options, out_stream);
|
||||
}
|
||||
|
||||
if (!field_output) {
|
||||
field_output = output_this_field;
|
||||
} else {
|
||||
if (output_this_field) try out_stream.writeByte(',');
|
||||
}
|
||||
if (child_options.whitespace) |child_whitespace| {
|
||||
if (output_this_field) try out_stream.writeByte('\n');
|
||||
if (output_this_field) try child_whitespace.outputIndent(out_stream);
|
||||
}
|
||||
var field_written = false;
|
||||
if (comptime std.meta.hasFn(T, "jsonStringifyField")) {
|
||||
if (output_this_field) field_written = try value.jsonStringifyField(Field.name, child_options, out_stream);
|
||||
}
|
||||
|
||||
if (!field_written) {
|
||||
if (output_this_field) {
|
||||
try stringify(final_name, options, out_stream);
|
||||
try out_stream.writeByte(':');
|
||||
}
|
||||
if (child_options.whitespace) |child_whitespace| {
|
||||
if (child_whitespace.separator) {
|
||||
if (output_this_field) try out_stream.writeByte(' ');
|
||||
}
|
||||
}
|
||||
if (output_this_field) try stringify(@field(value, Field.name), child_options, out_stream);
|
||||
}
|
||||
}
|
||||
if (field_output) {
|
||||
|
@ -2903,10 +2920,10 @@ pub fn stringify(
|
|||
try out_stream.writeByte('}');
|
||||
return;
|
||||
},
|
||||
.ErrorSet => return stringify(@as([]const u8, @errorName(value)), options, out_stream),
|
||||
.Pointer => |ptr_info| switch (ptr_info.size) {
|
||||
.One => switch (@typeInfo(ptr_info.child)) {
|
||||
.Array => {
|
||||
.error_set => return stringify(@as([]const u8, @errorName(value)), options, out_stream),
|
||||
.pointer => |ptr_info| switch (ptr_info.size) {
|
||||
.one => switch (@typeInfo(ptr_info.child)) {
|
||||
.array => {
|
||||
const Slice = []const std.meta.Elem(ptr_info.child);
|
||||
return stringify(@as(Slice, value), options, out_stream);
|
||||
},
|
||||
|
@ -2916,7 +2933,7 @@ pub fn stringify(
|
|||
},
|
||||
},
|
||||
// TODO: .Many when there is a sentinel (waiting for https://github.com/ziglang/zig/pull/3972)
|
||||
.Slice => {
|
||||
.slice => {
|
||||
if (ptr_info.child == u8 and options.string == .String and std.unicode.utf8ValidateSlice(value)) {
|
||||
try out_stream.writeByte('\"');
|
||||
var i: usize = 0;
|
||||
|
@ -2985,8 +3002,8 @@ pub fn stringify(
|
|||
},
|
||||
else => @compileError("Unable to stringify type '" ++ @typeName(T) ++ "'"),
|
||||
},
|
||||
.Array => return stringify(&value, options, out_stream),
|
||||
.Vector => |info| {
|
||||
.array => return stringify(&value, options, out_stream),
|
||||
.vector => |info| {
|
||||
const array: [info.len]info.child = value;
|
||||
return stringify(&array, options, out_stream);
|
||||
},
|
||||
|
|
16
src/main.zig
16
src/main.zig
|
@ -32,8 +32,8 @@ pub fn log(
|
|||
const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix;
|
||||
|
||||
// Print the message to stderr, silently ignoring any errors
|
||||
std.debug.getStderrMutex().lock();
|
||||
defer std.debug.getStderrMutex().unlock();
|
||||
std.debug.lockStdErr();
|
||||
defer std.debug.unlockStdErr();
|
||||
const stderr = std.io.getStdErr().writer();
|
||||
nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return;
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ pub fn main() anyerror!void {
|
|||
}
|
||||
continue;
|
||||
}
|
||||
inline for (@typeInfo(Tests).Enum.fields) |f| {
|
||||
inline for (@typeInfo(Tests).@"enum".fields) |f| {
|
||||
if (std.mem.eql(u8, f.name, arg)) {
|
||||
try tests.append(@field(Tests, f.name));
|
||||
break;
|
||||
|
@ -105,7 +105,7 @@ pub fn main() anyerror!void {
|
|||
}
|
||||
}
|
||||
if (tests.items.len == 0) {
|
||||
inline for (@typeInfo(Tests).Enum.fields) |f|
|
||||
inline for (@typeInfo(Tests).@"enum".fields) |f|
|
||||
try tests.append(@field(Tests, f.name));
|
||||
}
|
||||
|
||||
|
@ -192,7 +192,7 @@ pub fn main() anyerror!void {
|
|||
const func = fns[0];
|
||||
const arn = func.function_arn.?;
|
||||
// This is a bit ugly. Maybe a helper function in the library would help?
|
||||
var tags = try std.ArrayList(@typeInfo(try typeForField(services.lambda.tag_resource.Request, "tags")).Pointer.child).initCapacity(allocator, 1);
|
||||
var tags = try std.ArrayList(@typeInfo(try typeForField(services.lambda.tag_resource.Request, "tags")).pointer.child).initCapacity(allocator, 1);
|
||||
defer tags.deinit();
|
||||
tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" });
|
||||
const req = services.lambda.tag_resource.Request{ .resource = arn, .tags = tags.items };
|
||||
|
@ -371,7 +371,7 @@ fn proxyFromString(string: []const u8) !std.http.Client.Proxy {
|
|||
rc.port = 443;
|
||||
rc.protocol = .tls;
|
||||
} else return error.InvalidScheme;
|
||||
var split_iterator = std.mem.split(u8, remaining, ":");
|
||||
var split_iterator = std.mem.splitScalar(u8, remaining, ':');
|
||||
rc.host = std.mem.trimRight(u8, split_iterator.first(), "/");
|
||||
if (split_iterator.next()) |port|
|
||||
rc.port = try std.fmt.parseInt(u16, port, 10);
|
||||
|
@ -380,8 +380,8 @@ fn proxyFromString(string: []const u8) !std.http.Client.Proxy {
|
|||
fn typeForField(comptime T: type, comptime field_name: []const u8) !type {
|
||||
const ti = @typeInfo(T);
|
||||
switch (ti) {
|
||||
.Struct => {
|
||||
inline for (ti.Struct.fields) |field| {
|
||||
.@"struct" => {
|
||||
inline for (ti.@"struct".fields) |field| {
|
||||
if (std.mem.eql(u8, field.name, field_name))
|
||||
return field.type;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
const std = @import("std");
|
||||
const service_list = @import("models/service_manifest.zig");
|
||||
const service_list = @import("service_manifest");
|
||||
const expectEqualStrings = std.testing.expectEqualStrings;
|
||||
|
||||
pub fn Services(comptime service_imports: anytype) type {
|
||||
|
@ -12,7 +12,7 @@ pub fn Services(comptime service_imports: anytype) type {
|
|||
item.* = .{
|
||||
.name = @tagName(service_imports[i]),
|
||||
.type = @TypeOf(import_field),
|
||||
.default_value = &import_field,
|
||||
.default_value_ptr = &import_field,
|
||||
.is_comptime = false,
|
||||
.alignment = 0,
|
||||
};
|
||||
|
@ -20,7 +20,7 @@ pub fn Services(comptime service_imports: anytype) type {
|
|||
|
||||
// finally, generate the type
|
||||
return @Type(.{
|
||||
.Struct = .{
|
||||
.@"struct" = .{
|
||||
.layout = .auto,
|
||||
.fields = &fields,
|
||||
.decls = &[_]std.builtin.Type.Declaration{},
|
||||
|
|
19
src/url.zig
19
src/url.zig
|
@ -24,10 +24,11 @@ fn encodeStruct(
|
|||
comptime options: EncodingOptions,
|
||||
) !bool {
|
||||
var rc = first;
|
||||
inline for (@typeInfo(@TypeOf(obj)).Struct.fields) |field| {
|
||||
const field_name = try options.field_name_transformer(allocator, field.name);
|
||||
defer if (options.field_name_transformer.* != defaultTransformer)
|
||||
allocator.free(field_name);
|
||||
var arena = std.heap.ArenaAllocator.init(allocator);
|
||||
defer arena.deinit();
|
||||
const arena_alloc = arena.allocator();
|
||||
inline for (@typeInfo(@TypeOf(obj)).@"struct".fields) |field| {
|
||||
const field_name = try options.field_name_transformer(arena_alloc, field.name);
|
||||
// @compileLog(@typeInfo(field.field_type).Pointer);
|
||||
rc = try encodeInternal(allocator, parent, field_name, rc, @field(obj, field.name), writer, options);
|
||||
}
|
||||
|
@ -47,10 +48,10 @@ pub fn encodeInternal(
|
|||
// @compileLog(@typeInfo(@TypeOf(obj)));
|
||||
var rc = first;
|
||||
switch (@typeInfo(@TypeOf(obj))) {
|
||||
.Optional => if (obj) |o| {
|
||||
.optional => if (obj) |o| {
|
||||
rc = try encodeInternal(allocator, parent, field_name, first, o, writer, options);
|
||||
},
|
||||
.Pointer => |ti| if (ti.size == .One) {
|
||||
.pointer => |ti| if (ti.size == .one) {
|
||||
rc = try encodeInternal(allocator, parent, field_name, first, obj.*, writer, options);
|
||||
} else {
|
||||
if (!first) _ = try writer.write("&");
|
||||
|
@ -61,7 +62,7 @@ pub fn encodeInternal(
|
|||
try writer.print("{s}{s}={any}", .{ parent, field_name, obj });
|
||||
rc = false;
|
||||
},
|
||||
.Struct => if (std.mem.eql(u8, "", field_name)) {
|
||||
.@"struct" => if (std.mem.eql(u8, "", field_name)) {
|
||||
rc = try encodeStruct(allocator, parent, first, obj, writer, options);
|
||||
} else {
|
||||
// TODO: It would be lovely if we could concat at compile time or allocPrint at runtime
|
||||
|
@ -73,12 +74,12 @@ pub fn encodeInternal(
|
|||
rc = try encodeStruct(allocator, new_parent, first, obj, writer, options);
|
||||
// try encodeStruct(parent ++ field_name ++ ".", first, obj, writer, options);
|
||||
},
|
||||
.Array => {
|
||||
.array => {
|
||||
if (!first) _ = try writer.write("&");
|
||||
try writer.print("{s}{s}={s}", .{ parent, field_name, obj });
|
||||
rc = false;
|
||||
},
|
||||
.Int, .ComptimeInt, .Float, .ComptimeFloat => {
|
||||
.int, .comptime_int, .float, .comptime_float => {
|
||||
if (!first) _ = try writer.write("&");
|
||||
try writer.print("{s}{s}={d}", .{ parent, field_name, obj });
|
||||
rc = false;
|
||||
|
|
|
@ -653,7 +653,10 @@ fn dupeAndUnescape(alloc: Allocator, text: []const u8) ![]const u8 {
|
|||
|
||||
// This error is not strictly true, but we need to match one of the items
|
||||
// from the error set provided by the other stdlib calls at the calling site
|
||||
if (!alloc.resize(str, j)) return error.OutOfMemory;
|
||||
if (!alloc.resize(str, j)) {
|
||||
defer alloc.free(str);
|
||||
return alloc.dupe(u8, str[0..j]) catch return error.OutOfMemory;
|
||||
}
|
||||
return str[0..j];
|
||||
}
|
||||
|
||||
|
|
|
@ -96,15 +96,21 @@ pub fn parse(comptime T: type, source: []const u8, options: ParseOptions) !Parse
|
|||
|
||||
fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions) !T {
|
||||
switch (@typeInfo(T)) {
|
||||
.Bool => {
|
||||
.bool => {
|
||||
if (std.ascii.eqlIgnoreCase("true", element.children.items[0].CharData))
|
||||
return true;
|
||||
if (std.ascii.eqlIgnoreCase("false", element.children.items[0].CharData))
|
||||
return false;
|
||||
return error.UnexpectedToken;
|
||||
},
|
||||
.Float, .ComptimeFloat => {
|
||||
.float, .comptime_float => {
|
||||
return std.fmt.parseFloat(T, element.children.items[0].CharData) catch |e| {
|
||||
if (element.children.items[0].CharData[element.children.items[0].CharData.len - 1] == 'Z') {
|
||||
// We have an iso8601 in an integer field (we think)
|
||||
// Try to coerce this into our type
|
||||
const timestamp = try date.parseIso8601ToTimestamp(element.children.items[0].CharData);
|
||||
return @floatFromInt(timestamp);
|
||||
}
|
||||
if (log_parse_traces) {
|
||||
std.log.err(
|
||||
"Could not parse '{s}' as float in element '{s}': {any}",
|
||||
|
@ -121,7 +127,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
|||
return e;
|
||||
};
|
||||
},
|
||||
.Int, .ComptimeInt => {
|
||||
.int, .comptime_int => {
|
||||
// 2021-10-05T16:39:45.000Z
|
||||
return std.fmt.parseInt(T, element.children.items[0].CharData, 10) catch |e| {
|
||||
if (element.children.items[0].CharData[element.children.items[0].CharData.len - 1] == 'Z') {
|
||||
|
@ -146,7 +152,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
|||
return e;
|
||||
};
|
||||
},
|
||||
.Optional => |optional_info| {
|
||||
.optional => |optional_info| {
|
||||
if (element.children.items.len == 0) {
|
||||
// This is almost certainly incomplete. Empty strings? xsi:nil?
|
||||
return null;
|
||||
|
@ -156,7 +162,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
|||
return try parseInternal(optional_info.child, element, options);
|
||||
}
|
||||
},
|
||||
.Enum => |enum_info| {
|
||||
.@"enum" => |enum_info| {
|
||||
_ = enum_info;
|
||||
// const numeric: ?enum_info.tag_type = std.fmt.parseInt(enum_info.tag_type, element.children.items[0].CharData, 10) catch null;
|
||||
// if (numeric) |num| {
|
||||
|
@ -166,7 +172,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
|||
// return std.meta.stringToEnum(T, element.CharData);
|
||||
// }
|
||||
},
|
||||
.Union => |union_info| {
|
||||
.@"union" => |union_info| {
|
||||
if (union_info.tag_type) |_| {
|
||||
// try each of the union fields until we find one that matches
|
||||
// inline for (union_info.fields) |u_field| {
|
||||
|
@ -189,7 +195,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
|||
}
|
||||
@compileError("Unable to parse into untagged union '" ++ @typeName(T) ++ "'");
|
||||
},
|
||||
.Struct => |struct_info| {
|
||||
.@"struct" => |struct_info| {
|
||||
var r: T = undefined;
|
||||
var fields_seen = [_]bool{false} ** struct_info.fields.len;
|
||||
var fields_set: u64 = 0;
|
||||
|
@ -244,7 +250,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
|||
fields_set = fields_set + 1;
|
||||
found_value = true;
|
||||
}
|
||||
if (@typeInfo(field.type) == .Optional) {
|
||||
if (@typeInfo(field.type) == .optional) {
|
||||
// Test "compiler assertion failure 2"
|
||||
// Zig compiler bug circa 0.9.0. Using "and !found_value"
|
||||
// in the if statement above will trigger assertion failure
|
||||
|
@ -269,7 +275,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
|||
return error.FieldElementMismatch; // see fields_seen for details
|
||||
return r;
|
||||
},
|
||||
.Array => //|array_info| {
|
||||
.array => //|array_info| {
|
||||
return error.ArrayNotImplemented,
|
||||
// switch (token) {
|
||||
// .ArrayBegin => {
|
||||
|
@ -304,16 +310,16 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
|||
// else => return error.UnexpectedToken,
|
||||
// }
|
||||
// },
|
||||
.Pointer => |ptr_info| {
|
||||
.pointer => |ptr_info| {
|
||||
const allocator = options.allocator orelse return error.AllocatorRequired;
|
||||
switch (ptr_info.size) {
|
||||
.One => {
|
||||
.one => {
|
||||
const r: T = try allocator.create(ptr_info.child);
|
||||
errdefer allocator.free(r);
|
||||
r.* = try parseInternal(ptr_info.child, element, options);
|
||||
return r;
|
||||
},
|
||||
.Slice => {
|
||||
.slice => {
|
||||
// TODO: Detect and deal with arrays. This will require two
|
||||
// passes through the element children - one to
|
||||
// determine if it is an array, one to parse the elements
|
||||
|
@ -342,10 +348,10 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
|||
}
|
||||
return try allocator.dupe(u8, element.children.items[0].CharData);
|
||||
},
|
||||
.Many => {
|
||||
.many => {
|
||||
return error.ManyPointerSizeNotImplemented;
|
||||
},
|
||||
.C => {
|
||||
.c => {
|
||||
return error.CPointerSizeNotImplemented;
|
||||
},
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue