Compare commits
No commits in common. "master" and "zig-0.12.0" have entirely different histories.
master
...
zig-0.12.0
29 changed files with 501 additions and 2079 deletions
8
.envrc
8
.envrc
|
@ -1,8 +0,0 @@
|
||||||
# vi: ft=sh
|
|
||||||
# shellcheck shell=bash
|
|
||||||
|
|
||||||
if ! has zvm_direnv_version || ! zvm_direnv_version 2.0.0; then
|
|
||||||
source_url "https://git.lerch.org/lobo/zvm-direnv/raw/tag/2.0.0/direnvrc" "sha256-8Umzxj32hFU6G0a7Wrq0KTNDQ8XEuje2A3s2ljh/hFY="
|
|
||||||
fi
|
|
||||||
|
|
||||||
use zig 0.14.0
|
|
|
@ -1,9 +1,10 @@
|
||||||
name: AWS-Zig Build
|
name: AWS-Zig Build
|
||||||
|
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'master'
|
- '*'
|
||||||
|
- '!zig-develop*'
|
||||||
env:
|
env:
|
||||||
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
|
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
|
||||||
|
@ -16,27 +17,13 @@ jobs:
|
||||||
# image: alpine:3.15.0
|
# image: alpine:3.15.0
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Setup Zig
|
- name: Setup Zig
|
||||||
uses: https://github.com/mlugg/setup-zig@v1.2.1
|
uses: https://git.lerch.org/lobo/setup-zig@v3
|
||||||
with:
|
with:
|
||||||
version: 0.14.0
|
version: 0.12.0
|
||||||
- name: Restore Zig caches
|
|
||||||
uses: https://github.com/Hanaasagi/zig-action-cache@3954aae427f8b05914e08dfd79f15e1f2e435929
|
|
||||||
- name: Ulimit
|
|
||||||
run: ulimit -a
|
|
||||||
- name: Run smoke test
|
|
||||||
run: zig build smoke-test --verbose
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: zig build test --verbose --summary all
|
run: zig build test --verbose
|
||||||
- name: Run tests (release mode)
|
|
||||||
run: zig build test -Doptimize=ReleaseSafe --verbose
|
|
||||||
# Zig build scripts don't have the ability to import depenedencies directly
|
|
||||||
# (https://github.com/ziglang/zig/issues/18164). We can allow downstream
|
|
||||||
# build scripts to import aws with a few tweaks, but we can't @import("src/aws.zig")
|
|
||||||
# until we have our models built. So we have to have the build script
|
|
||||||
# basically modified, only during packaging, to allow this use case
|
|
||||||
#
|
|
||||||
# Zig package manager expects everything to be inside a directory in the archive,
|
# Zig package manager expects everything to be inside a directory in the archive,
|
||||||
# which it then strips out on download. So we need to shove everything inside a directory
|
# which it then strips out on download. So we need to shove everything inside a directory
|
||||||
# the way GitHub/Gitea does for repo archives
|
# the way GitHub/Gitea does for repo archives
|
||||||
|
@ -46,7 +33,6 @@ jobs:
|
||||||
# should be using git archive, but we need our generated code to be part of it
|
# should be using git archive, but we need our generated code to be part of it
|
||||||
- name: Package source code with generated models
|
- name: Package source code with generated models
|
||||||
run: |
|
run: |
|
||||||
sed -i 's#// UNCOMMENT AFTER MODEL GEN TO USE IN BUILD SCRIPTS //##' build.zig
|
|
||||||
tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||||
--format ustar \
|
--format ustar \
|
||||||
--exclude 'zig-*' \
|
--exclude 'zig-*' \
|
||||||
|
@ -75,7 +61,7 @@ jobs:
|
||||||
# - run: echo "URL of foo (1) is ${{ steps.sign.outputs.URL_1 }}"
|
# - run: echo "URL of foo (1) is ${{ steps.sign.outputs.URL_1 }}"
|
||||||
- name: Publish source code with generated models
|
- name: Publish source code with generated models
|
||||||
run: |
|
run: |
|
||||||
curl -s --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
|
curl --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
|
||||||
--upload-file ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
--upload-file ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||||
https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}-with-models.tar.gz
|
https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}-with-models.tar.gz
|
||||||
- name: Build example
|
- name: Build example
|
||||||
|
|
|
@ -1,20 +1,16 @@
|
||||||
name: aws-zig mach nominated build
|
name: aws-zig mach nominated build
|
||||||
|
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '0 12 * * *' # noon UTC, 4AM Pacific
|
- cron: '0 12 * * *' # noon UTC, 4AM Pacific
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'zig-mach'
|
- 'zig-develop*'
|
||||||
env:
|
env:
|
||||||
PKG_PREFIX: nominated-zig
|
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
|
||||||
jobs:
|
jobs:
|
||||||
build-zig-nominated-mach-latest:
|
build-zig-nominated-mach-latest:
|
||||||
container:
|
|
||||||
# We need CAP_SYS_PTRACE for stack traces due to a regression in 0.14.0
|
|
||||||
# TODO: Remove this after https://github.com/ziglang/zig/issues/21815 is
|
|
||||||
# addressed
|
|
||||||
options: --cap-add CAP_SYS_PTRACE
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# Need to use the default container with node and all that, so we can
|
# Need to use the default container with node and all that, so we can
|
||||||
# use JS-based actions like actions/checkout@v3...
|
# use JS-based actions like actions/checkout@v3...
|
||||||
|
@ -22,24 +18,13 @@ jobs:
|
||||||
# image: alpine:3.15.0
|
# image: alpine:3.15.0
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
ref: zig-mach
|
ref: zig-develop
|
||||||
- name: Setup Zig
|
- name: Setup Zig
|
||||||
uses: https://github.com/mlugg/setup-zig@v1.2.1
|
uses: https://git.lerch.org/lobo/setup-zig@v3
|
||||||
with:
|
with:
|
||||||
version: mach-latest
|
version: mach-latest
|
||||||
- name: Restore Zig caches
|
|
||||||
uses: https://github.com/Hanaasagi/zig-action-cache@3954aae427f8b05914e08dfd79f15e1f2e435929
|
|
||||||
- name: Run gen
|
|
||||||
run: zig build gen --verbose
|
|
||||||
- name: Run smoke test
|
|
||||||
run: zig build smoke-test --verbose
|
|
||||||
- name: Run full tests
|
|
||||||
run: zig build test --verbose --summary all
|
|
||||||
# TODO: Zig mach currently tracking behind zig 0.14.0 branch - enable this test after update
|
|
||||||
# - name: Run tests (release mode)
|
|
||||||
# run: zig build test -Doptimize=ReleaseSafe --verbose
|
|
||||||
# Zig package manager expects everything to be inside a directory in the archive,
|
# Zig package manager expects everything to be inside a directory in the archive,
|
||||||
# which it then strips out on download. So we need to shove everything inside a directory
|
# which it then strips out on download. So we need to shove everything inside a directory
|
||||||
# the way GitHub/Gitea does for repo archives
|
# the way GitHub/Gitea does for repo archives
|
||||||
|
@ -49,7 +34,7 @@ jobs:
|
||||||
# should be using git archive, but we need our generated code to be part of it
|
# should be using git archive, but we need our generated code to be part of it
|
||||||
- name: Package source code with generated models
|
- name: Package source code with generated models
|
||||||
run: |
|
run: |
|
||||||
tar -czf ${{ runner.temp }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz \
|
tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||||
--format ustar \
|
--format ustar \
|
||||||
--exclude 'zig-*' \
|
--exclude 'zig-*' \
|
||||||
--transform 's,^,${{ github.sha }}/,' *
|
--transform 's,^,${{ github.sha }}/,' *
|
||||||
|
@ -72,9 +57,9 @@ jobs:
|
||||||
# - run: echo "URL of foo (1) is ${{ steps.sign.outputs.URL_1 }}"
|
# - run: echo "URL of foo (1) is ${{ steps.sign.outputs.URL_1 }}"
|
||||||
- name: Publish source code with generated models
|
- name: Publish source code with generated models
|
||||||
run: |
|
run: |
|
||||||
curl -s --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
|
curl --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
|
||||||
--upload-file ${{ runner.temp }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz \
|
--upload-file ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||||
https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz
|
https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}-with-models.tar.gz
|
||||||
- name: Build example
|
- name: Build example
|
||||||
run: ( cd example && zig build ) # Make sure example builds
|
run: ( cd example && zig build ) # Make sure example builds
|
||||||
- name: Notify
|
- name: Notify
|
||||||
|
|
|
@ -1,20 +1,16 @@
|
||||||
name: aws-zig nightly build
|
name: aws-zig nightly build
|
||||||
|
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '30 12 * * *' # 12:30 UTC, 4:30AM Pacific
|
- cron: '0 12 30 * *' # 12:30 UTC, 4:30AM Pacific
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'zig-develop'
|
- 'zig-develop*'
|
||||||
env:
|
env:
|
||||||
PKG_PREFIX: nightly-zig
|
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
|
||||||
jobs:
|
jobs:
|
||||||
build-zig-nightly:
|
build-zig-nightly:
|
||||||
container:
|
|
||||||
# We need CAP_SYS_PTRACE for stack traces due to a regression in 0.14.0
|
|
||||||
# TODO: Remove this after https://github.com/ziglang/zig/issues/21815 is
|
|
||||||
# addressed
|
|
||||||
options: --cap-add CAP_SYS_PTRACE
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# Need to use the default container with node and all that, so we can
|
# Need to use the default container with node and all that, so we can
|
||||||
# use JS-based actions like actions/checkout@v3...
|
# use JS-based actions like actions/checkout@v3...
|
||||||
|
@ -22,21 +18,15 @@ jobs:
|
||||||
# image: alpine:3.15.0
|
# image: alpine:3.15.0
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
ref: zig-develop
|
ref: zig-develop
|
||||||
- name: Setup Zig
|
- name: Setup Zig
|
||||||
uses: https://github.com/mlugg/setup-zig@v1.2.1
|
uses: https://git.lerch.org/lobo/setup-zig@v3
|
||||||
with:
|
with:
|
||||||
version: master
|
version: master
|
||||||
- name: Restore Zig caches
|
- name: Run tests
|
||||||
uses: https://github.com/Hanaasagi/zig-action-cache@3954aae427f8b05914e08dfd79f15e1f2e435929
|
run: zig build test --verbose
|
||||||
- name: Run smoke test
|
|
||||||
run: zig build smoke-test --verbose
|
|
||||||
- name: Run full tests
|
|
||||||
run: zig build test --verbose --summary all
|
|
||||||
- name: Run tests (release mode)
|
|
||||||
run: zig build test -Doptimize=ReleaseSafe --verbose
|
|
||||||
# Zig package manager expects everything to be inside a directory in the archive,
|
# Zig package manager expects everything to be inside a directory in the archive,
|
||||||
# which it then strips out on download. So we need to shove everything inside a directory
|
# which it then strips out on download. So we need to shove everything inside a directory
|
||||||
# the way GitHub/Gitea does for repo archives
|
# the way GitHub/Gitea does for repo archives
|
||||||
|
@ -46,7 +36,7 @@ jobs:
|
||||||
# should be using git archive, but we need our generated code to be part of it
|
# should be using git archive, but we need our generated code to be part of it
|
||||||
- name: Package source code with generated models
|
- name: Package source code with generated models
|
||||||
run: |
|
run: |
|
||||||
tar -czf ${{ runner.temp }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz \
|
tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||||
--format ustar \
|
--format ustar \
|
||||||
--exclude 'zig-*' \
|
--exclude 'zig-*' \
|
||||||
--transform 's,^,${{ github.sha }}/,' *
|
--transform 's,^,${{ github.sha }}/,' *
|
||||||
|
@ -69,9 +59,9 @@ jobs:
|
||||||
# - run: echo "URL of foo (1) is ${{ steps.sign.outputs.URL_1 }}"
|
# - run: echo "URL of foo (1) is ${{ steps.sign.outputs.URL_1 }}"
|
||||||
- name: Publish source code with generated models
|
- name: Publish source code with generated models
|
||||||
run: |
|
run: |
|
||||||
curl -s --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
|
curl --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
|
||||||
--upload-file ${{ runner.temp }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz \
|
--upload-file ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
||||||
https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz
|
https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}-with-models.tar.gz
|
||||||
- name: Build example
|
- name: Build example
|
||||||
run: ( cd example && zig build ) # Make sure example builds
|
run: ( cd example && zig build ) # Make sure example builds
|
||||||
- name: Notify
|
- name: Notify
|
||||||
|
|
|
@ -1,91 +0,0 @@
|
||||||
name: AWS-Zig Build
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- 'zig-0.13'
|
|
||||||
env:
|
|
||||||
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
|
|
||||||
jobs:
|
|
||||||
build-zig-amd64-host:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
# Need to use the default container with node and all that, so we can
|
|
||||||
# use JS-based actions like actions/checkout@v3...
|
|
||||||
# container:
|
|
||||||
# image: alpine:3.15.0
|
|
||||||
steps:
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
ref: zig-0.13
|
|
||||||
- name: Setup Zig
|
|
||||||
uses: https://github.com/mlugg/setup-zig@v1.2.1
|
|
||||||
with:
|
|
||||||
version: 0.13.0
|
|
||||||
- name: Restore Zig caches
|
|
||||||
uses: https://github.com/Hanaasagi/zig-action-cache@3954aae427f8b05914e08dfd79f15e1f2e435929
|
|
||||||
- name: Run smoke test
|
|
||||||
run: zig build smoke-test --verbose
|
|
||||||
- name: Run full tests
|
|
||||||
run: zig build test --verbose --summary all
|
|
||||||
# Release mode fix not backported to 0.13.0 code
|
|
||||||
#- name: Run tests (release mode)
|
|
||||||
# run: zig build test -Doptimize=ReleaseSafe --verbose
|
|
||||||
# Zig build scripts don't have the ability to import depenedencies directly
|
|
||||||
# (https://github.com/ziglang/zig/issues/18164). We can allow downstream
|
|
||||||
# build scripts to import aws with a few tweaks, but we can't @import("src/aws.zig")
|
|
||||||
# until we have our models built. So we have to have the build script
|
|
||||||
# basically modified, only during packaging, to allow this use case
|
|
||||||
#
|
|
||||||
# Zig package manager expects everything to be inside a directory in the archive,
|
|
||||||
# which it then strips out on download. So we need to shove everything inside a directory
|
|
||||||
# the way GitHub/Gitea does for repo archives
|
|
||||||
#
|
|
||||||
# Also, zig tar process doesn't handle gnu format for long names, nor does it seam to
|
|
||||||
# handle posix long name semantics cleanly either. ustar works. This
|
|
||||||
# should be using git archive, but we need our generated code to be part of it
|
|
||||||
- name: Package source code with generated models
|
|
||||||
run: |
|
|
||||||
sed -i 's#// UNCOMMENT AFTER MODEL GEN TO USE IN BUILD SCRIPTS //##' build.zig
|
|
||||||
tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
|
||||||
--format ustar \
|
|
||||||
--exclude 'zig-*' \
|
|
||||||
*
|
|
||||||
# Something in this PR broke this transform. I don't mind removing it, but
|
|
||||||
# the PR attempts to handle situations with or without a prefix, but it
|
|
||||||
# doesn't. I have not yet determined what the problem is, though
|
|
||||||
# https://github.com/ziglang/zig/pull/19111/files
|
|
||||||
# --transform 's,^,${{ github.sha }}/,' *
|
|
||||||
# - name: Sign
|
|
||||||
# id: sign
|
|
||||||
# uses: https://git.lerch.org/lobo/action-hsm-sign@v1
|
|
||||||
# with:
|
|
||||||
# pin: ${{ secrets.HSM_USER_PIN }}
|
|
||||||
# files: ???
|
|
||||||
# public_key: 'https://emil.lerch.org/serverpublic.pem'
|
|
||||||
# - run: |
|
|
||||||
# echo "Source 0 should be ./bar: ${{ steps.sign.outputs.SOURCE_0 }}"
|
|
||||||
# - run: |
|
|
||||||
# echo "Signature 0 should be ./bar.sig: ${{ steps.sign.outputs.SIG_0 }}"
|
|
||||||
# - run: echo "URL of bar (0) is ${{ steps.sign.outputs.URL_0 }}"
|
|
||||||
# - run: |
|
|
||||||
# echo "Source 1 should be ./foo: ${{ steps.sign.outputs.SOURCE_1 }}"
|
|
||||||
# - run: |
|
|
||||||
# echo "Signature 1 should be ./foo.sig: ${{ steps.sign.outputs.SIG_1 }}"
|
|
||||||
# - run: echo "URL of foo (1) is ${{ steps.sign.outputs.URL_1 }}"
|
|
||||||
- name: Publish source code with generated models
|
|
||||||
run: |
|
|
||||||
curl -s --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
|
|
||||||
--upload-file ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
|
|
||||||
https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}-with-models.tar.gz
|
|
||||||
- name: Build example
|
|
||||||
run: ( cd example && zig build ) # Make sure example builds
|
|
||||||
- name: Notify
|
|
||||||
uses: https://git.lerch.org/lobo/action-notify-ntfy@v2
|
|
||||||
if: always()
|
|
||||||
with:
|
|
||||||
host: ${{ secrets.NTFY_HOST }}
|
|
||||||
topic: ${{ secrets.NTFY_TOPIC }}
|
|
||||||
user: ${{ secrets.NTFY_USER }}
|
|
||||||
password: ${{ secrets.NTFY_PASSWORD }}
|
|
31
.github/workflows/build.yaml
vendored
Normal file
31
.github/workflows/build.yaml
vendored
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
name: AWS-Zig Build
|
||||||
|
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- '*'
|
||||||
|
- '!zig-develop*'
|
||||||
|
jobs:
|
||||||
|
build-zig-0-12-0-amd64:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
ZIG_VERSION: 0.12.0
|
||||||
|
ARCH: x86_64
|
||||||
|
if: ${{ github.env.GITEA_ACTIONS != 'true' }}
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
# ARCH is fine, but we can't substitute directly because zig
|
||||||
|
# uses x86_64 instead of amd64. They also use aarch64 instead of arm64.
|
||||||
|
#
|
||||||
|
# However, arm64/linux isn't quite fully tier 1 yet, so this is more of a
|
||||||
|
# TODO: https://github.com/ziglang/zig/issues/2443
|
||||||
|
- name: Install zig
|
||||||
|
run: |
|
||||||
|
wget -q https://ziglang.org/download/${ZIG_VERSION}/zig-linux-${ARCH}-${ZIG_VERSION}.tar.xz
|
||||||
|
sudo tar x -C /usr/local -f zig-linux-${ARCH}-${ZIG_VERSION}.tar.xz
|
||||||
|
sudo ln -s /usr/local/zig-linux-${ARCH}-${ZIG_VERSION}/zig /usr/local/bin/zig
|
||||||
|
- name: Run tests
|
||||||
|
run: zig build test -Dbroken-windows --verbose # Github runners try to run the windows tests despite disabling foreign checks
|
||||||
|
- name: Build example
|
||||||
|
run: ( cd example && zig build ) # Make sure example builds
|
36
.github/workflows/zig-mach.yaml
vendored
Normal file
36
.github/workflows/zig-mach.yaml
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
name: aws-zig mach nominated build
|
||||||
|
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'zig-develop*'
|
||||||
|
jobs:
|
||||||
|
build-zig-mach-latest:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
# Need to use the default container with node and all that, so we can
|
||||||
|
# use JS-based actions like actions/checkout@v3...
|
||||||
|
# container:
|
||||||
|
# image: alpine:3.15.0
|
||||||
|
env:
|
||||||
|
ZIG_VERSION: mach-latest
|
||||||
|
ARCH: x86_64
|
||||||
|
if: ${{ github.env.GITEA_ACTIONS != 'true' }}
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
# ARCH is fine, but we can't substitute directly because zig
|
||||||
|
# uses x86_64 instead of amd64. They also use aarch64 instead of arm64.
|
||||||
|
#
|
||||||
|
# However, arm64/linux isn't quite fully tier 1 yet, so this is more of a
|
||||||
|
# TODO: https://github.com/ziglang/zig/issues/2443
|
||||||
|
- name: Install zig
|
||||||
|
run: |
|
||||||
|
apt-get update && apt-get install -y jq
|
||||||
|
file="$(curl -Osw '%{filename_effective}' "$(curl -s https://machengine.org/zig/index.json |jq -r '."'${ZIG_VERSION}'"."x86_64-linux".tarball')")"
|
||||||
|
sudo tar x -C /usr/local -f "${file}"
|
||||||
|
sudo ln -s /usr/local/"${file%%.tar.xz}"/zig /usr/local/bin/zig
|
||||||
|
zig version
|
||||||
|
- name: Run tests
|
||||||
|
run: zig build test -Dbroken-windows --verbose
|
||||||
|
- name: Build example
|
||||||
|
run: ( cd example && zig build ) # Make sure example builds
|
36
.github/workflows/zig-nightly.yaml
vendored
Normal file
36
.github/workflows/zig-nightly.yaml
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
name: aws-zig nightly build
|
||||||
|
run-name: ${{ github.actor }} building AWS Zig SDK
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'zig-develop*'
|
||||||
|
jobs:
|
||||||
|
build-zig-nightly:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
# Need to use the default container with node and all that, so we can
|
||||||
|
# use JS-based actions like actions/checkout@v3...
|
||||||
|
# container:
|
||||||
|
# image: alpine:3.15.0
|
||||||
|
env:
|
||||||
|
ZIG_VERSION: master
|
||||||
|
ARCH: x86_64
|
||||||
|
if: ${{ github.env.GITEA_ACTIONS != 'true' }}
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
# ARCH is fine, but we can't substitute directly because zig
|
||||||
|
# uses x86_64 instead of amd64. They also use aarch64 instead of arm64.
|
||||||
|
#
|
||||||
|
# However, arm64/linux isn't quite fully tier 1 yet, so this is more of a
|
||||||
|
# TODO: https://github.com/ziglang/zig/issues/2443
|
||||||
|
- name: Install zig
|
||||||
|
run: |
|
||||||
|
apt-get update && apt-get install -y jq
|
||||||
|
file="$(curl -Osw '%{filename_effective}' "$(curl -s https://ziglang.org/download/index.json |jq -r '."'${ZIG_VERSION}'"."x86_64-linux".tarball')")"
|
||||||
|
sudo tar x -C /usr/local -f "${file}"
|
||||||
|
sudo ln -s /usr/local/"${file%%.tar.xz}"/zig /usr/local/bin/zig
|
||||||
|
zig version
|
||||||
|
- name: Run tests
|
||||||
|
run: zig build test -Dbroken-windows --verbose
|
||||||
|
- name: Build example
|
||||||
|
run: ( cd example && zig build ) # Make sure example builds
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -11,4 +11,3 @@ libs/
|
||||||
src/git_version.zig
|
src/git_version.zig
|
||||||
zig-out
|
zig-out
|
||||||
core
|
core
|
||||||
.zig-cache
|
|
||||||
|
|
75
README.md
75
README.md
|
@ -1,77 +1,61 @@
|
||||||
AWS SDK for Zig
|
AWS SDK for Zig
|
||||||
===============
|
===============
|
||||||
|
|
||||||
[Zig 0.14](https://ziglang.org/download/#release-0.14.0):
|
[Zig 0.12](https://ziglang.org/download/#release-0.12.0):
|
||||||
|
|
||||||
[](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=build.yaml&state=closed)
|
[](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=build.yaml&state=closed)
|
||||||
|
|
||||||
[Last Mach Nominated Zig Version](https://machengine.org/docs/nominated-zig/):
|
[Last Mach Nominated Zig Version](https://machengine.org/about/nominated-zig/):
|
||||||
|
|
||||||
[](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-mach.yaml&state=closed)
|
[](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-mach.yaml&state=closed)
|
||||||
|
|
||||||
[Nightly Zig](https://ziglang.org/download/):
|
[Nightly Zig](https://ziglang.org/download/):
|
||||||
|
|
||||||
[](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-nightly.yaml&state=closed)
|
[](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-nightly.yaml&state=closed)
|
||||||
|
|
||||||
[Zig 0.13](https://ziglang.org/download/#release-0.13.0):
|
|
||||||
|
|
||||||
[](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-previous.yaml&state=closed)
|
|
||||||
|
|
||||||
|
|
||||||
Current executable size for the demo is 980k after compiling with -Doptimize=ReleaseSmall
|
Current executable size for the demo is 980k after compiling with -Doptimize=ReleaseSmall
|
||||||
in x86_64-linux, and will vary based on services used. Tested targets:
|
in x86_linux, and will vary based on services used. Tested targets:
|
||||||
|
|
||||||
* x86_64-linux
|
* x86_64-linux
|
||||||
* riscv64-linux
|
* riscv64-linux\*
|
||||||
* aarch64-linux
|
* aarch64-linux
|
||||||
* x86_64-windows
|
* x86_64-windows\*\*
|
||||||
* arm-linux
|
* arm-linux
|
||||||
* aarch64-macos
|
* aarch64-macos
|
||||||
* x86_64-macos
|
* x86_64-macos
|
||||||
|
|
||||||
Tested targets are built, but not continuously tested, by CI.
|
Tested targets are built, but not continuously tested, by CI.
|
||||||
|
|
||||||
Branches
|
\* On Zig 0.12, riscv64-linux tests take a significant time to compile (each aws.zig test takes approximately 1min, 45 seconds to compile on Intel i9 10th gen)
|
||||||
--------
|
|
||||||
|
|
||||||
* **master**: This branch tracks the latest released zig version
|
\*\* On Zig 0.12, x86_64-windows tests have one test skipped as LLVM consumes all available RAM on the system
|
||||||
* **zig-0.13**: This branch tracks the previous released zig version (0.13 currently).
|
|
||||||
Support for the previous version is best effort, generally
|
|
||||||
degrading over time. Fixes will generally appear in master, then
|
|
||||||
backported into the previous version.
|
|
||||||
* **zig-mach**: This branch tracks the latest mach nominated version. A separate
|
|
||||||
branch is necessary as mach nominated is usually, but not always,
|
|
||||||
more recent than the latest production zig. Support for the mach
|
|
||||||
version is best effort.
|
|
||||||
* **zig-develop**: This branch tracks zig nightly, and is used mainly as a canary
|
|
||||||
for breaking changes that will need to be dealt with when
|
|
||||||
a new mach nominated version or new zig release appear.
|
|
||||||
Expect significant delays in any build failures.
|
|
||||||
|
|
||||||
Other branches/tags exist but are unsupported
|
|
||||||
|
Zig-Develop Branch
|
||||||
|
------------------
|
||||||
|
|
||||||
|
This branch is intended for use with the in-development version of Zig. This
|
||||||
|
starts with 0.12.0-dev.3180+83e578a18. I will try to keep this branch up to date
|
||||||
|
with latest, but with a special eye towards aligning with [Mach Engine's Nominated
|
||||||
|
Zig Versions](https://machengine.org/about/nominated-zig/). As nightly zig versions
|
||||||
|
disappear off the downloads page (and back end server), we can use the mirroring
|
||||||
|
that the Mach Engine participates in to pull these versions.
|
||||||
|
|
||||||
Building
|
Building
|
||||||
--------
|
--------
|
||||||
|
|
||||||
`zig build` should work. It will build the code generation project, fetch model
|
`zig build` should work. It will build the code generation project, fetch model
|
||||||
files from upstream AWS Go SDK v2, run the code generation, then build the main
|
files from upstream AWS Go SDK v2, run the code generation, then build the main
|
||||||
project with the generated code. Testing can be done with `zig build test`. Note that
|
project with the generated code. Testing can be done with `zig test`.
|
||||||
this command tests on all supported architectures, so for a faster testing
|
|
||||||
process, use `zig build smoke-test` instead.
|
|
||||||
|
|
||||||
To make development even faster, a build option is provided to avoid the use of
|
|
||||||
LLVM. To use this, use the command `zig build -Dno-llvm smoke-test`. This
|
|
||||||
can reduce build/test time 300%. Note, however, native code generation in zig
|
|
||||||
is not yet complete, so you may see errors.
|
|
||||||
|
|
||||||
Using
|
Using
|
||||||
-----
|
-----
|
||||||
|
|
||||||
This is designed for use with the Zig package manager, and exposes a module
|
This is designed for use with the Zig 0.11 package manager, and exposes a module
|
||||||
called "aws". Set up `build.zig.zon` and add the dependency/module to your project
|
called "aws". Set up `build.zig.zon` and add the dependency/module to your project
|
||||||
as normal and the package manager should do its thing. A full example can be found
|
as normal and the package manager should do its thing. A full example can be found
|
||||||
in [/example](example/build.zig.zon). This can also be used at build time in
|
in [/example](example/README.md).
|
||||||
a downstream project's `build.zig`.
|
|
||||||
|
|
||||||
Configuring the module and/or Running the demo
|
Configuring the module and/or Running the demo
|
||||||
----------------------------------------------
|
----------------------------------------------
|
||||||
|
@ -79,8 +63,8 @@ Configuring the module and/or Running the demo
|
||||||
This library mimics the aws c libraries for it's work, so it operates like most
|
This library mimics the aws c libraries for it's work, so it operates like most
|
||||||
other 'AWS things'. [/src/main.zig](src/main.zig) gives you a handful of examples
|
other 'AWS things'. [/src/main.zig](src/main.zig) gives you a handful of examples
|
||||||
for working with services. For local testing or alternative endpoints, there's
|
for working with services. For local testing or alternative endpoints, there's
|
||||||
no real standard, so there is code to look for an environment variable
|
no real standard, so there is code to look for `AWS_ENDPOINT_URL` environment
|
||||||
`AWS_ENDPOINT_URL` variable that will supersede all other configuration.
|
variable that will supersede all other configuration.
|
||||||
|
|
||||||
Limitations
|
Limitations
|
||||||
-----------
|
-----------
|
||||||
|
@ -101,6 +85,13 @@ TODO List:
|
||||||
* Implement timeouts and other TODO's in the code
|
* Implement timeouts and other TODO's in the code
|
||||||
* Add option to cache signature keys
|
* Add option to cache signature keys
|
||||||
|
|
||||||
|
Services without TLS 1.3 support
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
All AWS services should support TLS 1.3 at this point, but there are many regions
|
||||||
|
and several partitions, and not all of them have been tested, so your mileage
|
||||||
|
may vary.
|
||||||
|
|
||||||
Dependency tree
|
Dependency tree
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
|
@ -109,6 +100,7 @@ No dependencies:
|
||||||
* aws_http_base: contains basic structures for http requests/results
|
* aws_http_base: contains basic structures for http requests/results
|
||||||
* case: provides functions to change casing
|
* case: provides functions to change casing
|
||||||
* date: provides limited date manipulation functions
|
* date: provides limited date manipulation functions
|
||||||
|
* http_client_17015_issue: zig 0.11 http client, with changes
|
||||||
* json: custom version of earlier stdlib json parser
|
* json: custom version of earlier stdlib json parser
|
||||||
* xml: custom xml parser library
|
* xml: custom xml parser library
|
||||||
* url: custom url encoding
|
* url: custom url encoding
|
||||||
|
@ -117,6 +109,7 @@ aws_credentials: Allows credential handling
|
||||||
aws_authentication
|
aws_authentication
|
||||||
|
|
||||||
aws_http:
|
aws_http:
|
||||||
|
http_client_17015_issue
|
||||||
aws_http_base
|
aws_http_base
|
||||||
aws_signing
|
aws_signing
|
||||||
|
|
||||||
|
|
161
build.zig
161
build.zig
|
@ -4,18 +4,42 @@ const Builder = @import("std").Build;
|
||||||
|
|
||||||
const models_subdir = "codegen/sdk-codegen/aws-models/"; // note will probably not work on windows
|
const models_subdir = "codegen/sdk-codegen/aws-models/"; // note will probably not work on windows
|
||||||
|
|
||||||
// UNCOMMENT AFTER MODEL GEN TO USE IN BUILD SCRIPTS //pub const aws = @import("src/aws.zig");
|
const test_targets = [_]std.zig.CrossTarget{
|
||||||
|
|
||||||
const test_targets = [_]std.Target.Query{
|
|
||||||
.{}, // native
|
.{}, // native
|
||||||
.{ .cpu_arch = .x86_64, .os_tag = .linux },
|
.{
|
||||||
.{ .cpu_arch = .aarch64, .os_tag = .linux },
|
.cpu_arch = .x86_64,
|
||||||
.{ .cpu_arch = .riscv64, .os_tag = .linux },
|
.os_tag = .linux,
|
||||||
.{ .cpu_arch = .arm, .os_tag = .linux },
|
},
|
||||||
.{ .cpu_arch = .x86_64, .os_tag = .windows },
|
.{
|
||||||
.{ .cpu_arch = .aarch64, .os_tag = .macos },
|
.cpu_arch = .aarch64,
|
||||||
.{ .cpu_arch = .x86_64, .os_tag = .macos },
|
.os_tag = .linux,
|
||||||
// .{ .cpu_arch = .wasm32, .os_tag = .wasi },
|
},
|
||||||
|
// // The test executable just spins forever in LLVM using nominated zig 0.12 March 2024
|
||||||
|
// // This is likely a LLVM problem unlikely to be fixed in zig 0.12
|
||||||
|
// .{
|
||||||
|
// .cpu_arch = .riscv64,
|
||||||
|
// .os_tag = .linux,
|
||||||
|
// },
|
||||||
|
.{
|
||||||
|
.cpu_arch = .arm,
|
||||||
|
.os_tag = .linux,
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.cpu_arch = .x86_64,
|
||||||
|
.os_tag = .windows,
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.cpu_arch = .aarch64,
|
||||||
|
.os_tag = .macos,
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.cpu_arch = .x86_64,
|
||||||
|
.os_tag = .macos,
|
||||||
|
},
|
||||||
|
// .{
|
||||||
|
// .cpu_arch = .wasm32,
|
||||||
|
// .os_tag = .wasi,
|
||||||
|
// },
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn build(b: *Builder) !void {
|
pub fn build(b: *Builder) !void {
|
||||||
|
@ -29,23 +53,11 @@ pub fn build(b: *Builder) !void {
|
||||||
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
|
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
|
||||||
const optimize = b.standardOptimizeOption(.{});
|
const optimize = b.standardOptimizeOption(.{});
|
||||||
|
|
||||||
const no_llvm = b.option(
|
|
||||||
bool,
|
|
||||||
"no-llvm",
|
|
||||||
"Disable LLVM",
|
|
||||||
) orelse false;
|
|
||||||
const broken_windows = b.option(
|
const broken_windows = b.option(
|
||||||
bool,
|
bool,
|
||||||
"broken-windows",
|
"broken-windows",
|
||||||
"Windows is broken in this environment (do not run Windows tests)",
|
"Windows is broken in this environment (do not run Windows tests)",
|
||||||
) orelse false;
|
) orelse false;
|
||||||
const no_bin = b.option(bool, "no-bin", "skip emitting binary") orelse false;
|
|
||||||
|
|
||||||
const test_filters: []const []const u8 = b.option(
|
|
||||||
[]const []const u8,
|
|
||||||
"test-filter",
|
|
||||||
"Skip tests that do not match any of the specified filters",
|
|
||||||
) orelse &.{};
|
|
||||||
// TODO: Embed the current git version in the code. We can do this
|
// TODO: Embed the current git version in the code. We can do this
|
||||||
// by looking for .git/HEAD (if it exists, follow the ref to /ref/heads/whatevs,
|
// by looking for .git/HEAD (if it exists, follow the ref to /ref/heads/whatevs,
|
||||||
// grab that commit, and use b.addOptions/exe.addOptions to generate the
|
// grab that commit, and use b.addOptions/exe.addOptions to generate the
|
||||||
|
@ -60,11 +72,10 @@ pub fn build(b: *Builder) !void {
|
||||||
// It relies on code gen and is all fouled up when getting imported
|
// It relies on code gen and is all fouled up when getting imported
|
||||||
const exe = b.addExecutable(.{
|
const exe = b.addExecutable(.{
|
||||||
.name = "demo",
|
.name = "demo",
|
||||||
.root_source_file = b.path("src/main.zig"),
|
.root_source_file = .{ .path = "src/main.zig" },
|
||||||
.target = target,
|
.target = target,
|
||||||
.optimize = optimize,
|
.optimize = optimize,
|
||||||
});
|
});
|
||||||
exe.use_llvm = !no_llvm;
|
|
||||||
const smithy_dep = b.dependency("smithy", .{
|
const smithy_dep = b.dependency("smithy", .{
|
||||||
// These are the arguments to the dependency. It expects a target and optimization level.
|
// These are the arguments to the dependency. It expects a target and optimization level.
|
||||||
.target = target,
|
.target = target,
|
||||||
|
@ -73,6 +84,17 @@ pub fn build(b: *Builder) !void {
|
||||||
const smithy_module = smithy_dep.module("smithy");
|
const smithy_module = smithy_dep.module("smithy");
|
||||||
exe.root_module.addImport("smithy", smithy_module); // not sure this should be here...
|
exe.root_module.addImport("smithy", smithy_module); // not sure this should be here...
|
||||||
|
|
||||||
|
// Expose module to others
|
||||||
|
_ = b.addModule("aws", .{
|
||||||
|
.root_source_file = .{ .path = "src/aws.zig" },
|
||||||
|
.imports = &.{.{ .name = "smithy", .module = smithy_module }},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Expose module to others
|
||||||
|
_ = b.addModule("aws-signing", .{
|
||||||
|
.root_source_file = .{ .path = "src/aws_signing.zig" },
|
||||||
|
.imports = &.{.{ .name = "smithy", .module = smithy_module }},
|
||||||
|
});
|
||||||
// TODO: This does not work correctly due to https://github.com/ziglang/zig/issues/16354
|
// TODO: This does not work correctly due to https://github.com/ziglang/zig/issues/16354
|
||||||
//
|
//
|
||||||
// We are working here with kind of a weird dependency though. So we can do this
|
// We are working here with kind of a weird dependency though. So we can do this
|
||||||
|
@ -95,27 +117,39 @@ pub fn build(b: *Builder) !void {
|
||||||
const run_step = b.step("run", "Run the app");
|
const run_step = b.step("run", "Run the app");
|
||||||
run_step.dependOn(&run_cmd.step);
|
run_step.dependOn(&run_cmd.step);
|
||||||
|
|
||||||
|
const gen_step = blk: {
|
||||||
const cg = b.step("gen", "Generate zig service code from smithy models");
|
const cg = b.step("gen", "Generate zig service code from smithy models");
|
||||||
|
|
||||||
const cg_exe = b.addExecutable(.{
|
const cg_exe = b.addExecutable(.{
|
||||||
.name = "codegen",
|
.name = "codegen",
|
||||||
.root_source_file = b.path("codegen/src/main.zig"),
|
.root_source_file = .{ .path = "codegen/src/main.zig" },
|
||||||
// We need this generated for the host, not the real target
|
// We need this generated for the host, not the real target
|
||||||
.target = b.graph.host,
|
.target = b.host,
|
||||||
.optimize = if (b.verbose) .Debug else .ReleaseSafe,
|
.optimize = if (b.verbose) .Debug else .ReleaseSafe,
|
||||||
});
|
});
|
||||||
cg_exe.root_module.addImport("smithy", smithy_module);
|
cg_exe.root_module.addImport("smithy", smithy_dep.module("smithy"));
|
||||||
var cg_cmd = b.addRunArtifact(cg_exe);
|
var cg_cmd = b.addRunArtifact(cg_exe);
|
||||||
cg_cmd.addArg("--models");
|
cg_cmd.addArg("--models");
|
||||||
|
const hash = hash_blk: {
|
||||||
|
for (b.available_deps) |dep| {
|
||||||
|
const dep_name = dep.@"0";
|
||||||
|
const dep_hash = dep.@"1";
|
||||||
|
if (std.mem.eql(u8, dep_name, "models"))
|
||||||
|
break :hash_blk dep_hash;
|
||||||
|
}
|
||||||
|
return error.DependencyNamedModelsNotFoundInBuildZigZon;
|
||||||
|
};
|
||||||
cg_cmd.addArg(try std.fs.path.join(
|
cg_cmd.addArg(try std.fs.path.join(
|
||||||
b.allocator,
|
b.allocator,
|
||||||
&[_][]const u8{
|
&[_][]const u8{
|
||||||
try b.dependency("models", .{}).path("").getPath3(b, null).toString(b.allocator),
|
b.graph.global_cache_root.path.?,
|
||||||
|
"p",
|
||||||
|
hash,
|
||||||
models_subdir,
|
models_subdir,
|
||||||
},
|
},
|
||||||
));
|
));
|
||||||
cg_cmd.addArg("--output");
|
cg_cmd.addArg("--output");
|
||||||
const cg_output_dir = cg_cmd.addOutputDirectoryArg("src/models");
|
cg_cmd.addDirectoryArg(b.path("src/models"));
|
||||||
if (b.verbose)
|
if (b.verbose)
|
||||||
cg_cmd.addArg("--verbose");
|
cg_cmd.addArg("--verbose");
|
||||||
// cg_cmd.step.dependOn(&fetch_step.step);
|
// cg_cmd.step.dependOn(&fetch_step.step);
|
||||||
|
@ -134,34 +168,10 @@ pub fn build(b: *Builder) !void {
|
||||||
// later about warning on manual changes...
|
// later about warning on manual changes...
|
||||||
|
|
||||||
cg.dependOn(&cg_cmd.step);
|
cg.dependOn(&cg_cmd.step);
|
||||||
|
break :blk cg;
|
||||||
|
};
|
||||||
|
|
||||||
exe.step.dependOn(cg);
|
exe.step.dependOn(gen_step);
|
||||||
|
|
||||||
// This allows us to have each module depend on the
|
|
||||||
// generated service manifest.
|
|
||||||
const service_manifest_module = b.createModule(.{
|
|
||||||
.root_source_file = cg_output_dir.path(b, "service_manifest.zig"),
|
|
||||||
.target = target,
|
|
||||||
.optimize = optimize,
|
|
||||||
});
|
|
||||||
service_manifest_module.addImport("smithy", smithy_module);
|
|
||||||
|
|
||||||
exe.root_module.addImport("service_manifest", service_manifest_module);
|
|
||||||
|
|
||||||
// Expose module to others
|
|
||||||
_ = b.addModule("aws", .{
|
|
||||||
.root_source_file = b.path("src/aws.zig"),
|
|
||||||
.imports = &.{
|
|
||||||
.{ .name = "smithy", .module = smithy_module },
|
|
||||||
.{ .name = "service_manifest", .module = service_manifest_module },
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
// Expose module to others
|
|
||||||
_ = b.addModule("aws-signing", .{
|
|
||||||
.root_source_file = b.path("src/aws_signing.zig"),
|
|
||||||
.imports = &.{.{ .name = "smithy", .module = smithy_module }},
|
|
||||||
});
|
|
||||||
|
|
||||||
// Similar to creating the run step earlier, this exposes a `test` step to
|
// Similar to creating the run step earlier, this exposes a `test` step to
|
||||||
// the `zig build --help` menu, providing a way for the user to request
|
// the `zig build --help` menu, providing a way for the user to request
|
||||||
|
@ -187,48 +197,17 @@ pub fn build(b: *Builder) !void {
|
||||||
// Creates a step for unit testing. This only builds the test executable
|
// Creates a step for unit testing. This only builds the test executable
|
||||||
// but does not run it.
|
// but does not run it.
|
||||||
const unit_tests = b.addTest(.{
|
const unit_tests = b.addTest(.{
|
||||||
.root_source_file = b.path("src/aws.zig"),
|
.root_source_file = .{ .path = "src/aws.zig" },
|
||||||
.target = b.resolveTargetQuery(t),
|
.target = b.resolveTargetQuery(t),
|
||||||
.optimize = optimize,
|
.optimize = optimize,
|
||||||
.filters = test_filters,
|
|
||||||
});
|
});
|
||||||
unit_tests.root_module.addImport("smithy", smithy_module);
|
unit_tests.root_module.addImport("smithy", smithy_dep.module("smithy"));
|
||||||
unit_tests.root_module.addImport("service_manifest", service_manifest_module);
|
unit_tests.step.dependOn(gen_step);
|
||||||
unit_tests.step.dependOn(cg);
|
|
||||||
unit_tests.use_llvm = !no_llvm;
|
|
||||||
|
|
||||||
const run_unit_tests = b.addRunArtifact(unit_tests);
|
const run_unit_tests = b.addRunArtifact(unit_tests);
|
||||||
run_unit_tests.skip_foreign_checks = true;
|
run_unit_tests.skip_foreign_checks = true;
|
||||||
|
|
||||||
test_step.dependOn(&run_unit_tests.step);
|
test_step.dependOn(&run_unit_tests.step);
|
||||||
}
|
}
|
||||||
const check = b.step("check", "Check compilation errors");
|
|
||||||
check.dependOn(&exe.step);
|
|
||||||
|
|
||||||
// Similar to creating the run step earlier, this exposes a `test` step to
|
|
||||||
// the `zig build --help` menu, providing a way for the user to request
|
|
||||||
// running the unit tests.
|
|
||||||
const smoke_test_step = b.step("smoke-test", "Run unit tests");
|
|
||||||
|
|
||||||
// Creates a step for unit testing. This only builds the test executable
|
|
||||||
// but does not run it.
|
|
||||||
const smoke_test = b.addTest(.{
|
|
||||||
.root_source_file = b.path("src/aws.zig"),
|
|
||||||
.target = target,
|
|
||||||
.optimize = optimize,
|
|
||||||
.filters = test_filters,
|
|
||||||
});
|
|
||||||
smoke_test.use_llvm = !no_llvm;
|
|
||||||
smoke_test.root_module.addImport("smithy", smithy_module);
|
|
||||||
smoke_test.root_module.addImport("service_manifest", service_manifest_module);
|
|
||||||
smoke_test.step.dependOn(cg);
|
|
||||||
|
|
||||||
const run_smoke_test = b.addRunArtifact(smoke_test);
|
|
||||||
|
|
||||||
smoke_test_step.dependOn(&run_smoke_test.step);
|
|
||||||
if (no_bin) {
|
|
||||||
b.getInstallStep().dependOn(&exe.step);
|
|
||||||
} else {
|
|
||||||
b.installArtifact(exe);
|
b.installArtifact(exe);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
|
@ -1,20 +1,16 @@
|
||||||
.{
|
.{
|
||||||
.name = .aws,
|
.name = "aws",
|
||||||
.version = "0.0.1",
|
.version = "0.0.1",
|
||||||
.fingerprint = 0x1f26b7b27005bb49,
|
|
||||||
.paths = .{
|
.paths = .{
|
||||||
"build.zig",
|
"build.zig",
|
||||||
"build.zig.zon",
|
"build.zig.zon",
|
||||||
"src",
|
"src",
|
||||||
"codegen",
|
|
||||||
"README.md",
|
|
||||||
"LICENSE",
|
|
||||||
},
|
},
|
||||||
|
|
||||||
.dependencies = .{
|
.dependencies = .{
|
||||||
.smithy = .{
|
.smithy = .{
|
||||||
.url = "https://git.lerch.org/lobo/smithy/archive/a4c6ec6dfe552c57bab601c7d99e8de02bbab1fe.tar.gz",
|
.url = "https://git.lerch.org/lobo/smithy/archive/1e534201c4df5ea4f615faeedc69d414adbec0b1.tar.gz",
|
||||||
.hash = "smithy-1.0.0-uAyBgS_MAgC4qgc9QaEy5Y5Nf7kv32buQZBYugqNQsAn",
|
.hash = "1220af63ae0498010004af79936cedf3fe6702f516daab77ebbd97a274eba1b42aad",
|
||||||
},
|
},
|
||||||
.models = .{
|
.models = .{
|
||||||
.url = "https://github.com/aws/aws-sdk-go-v2/archive/58cf6509525a12d64fd826da883bfdbacbd2f00e.tar.gz",
|
.url = "https://github.com/aws/aws-sdk-go-v2/archive/58cf6509525a12d64fd826da883bfdbacbd2f00e.tar.gz",
|
||||||
|
|
|
@ -2,33 +2,7 @@ const std = @import("std");
|
||||||
// options is a json.Options, but since we're using our hacked json.zig we don't want to
|
// options is a json.Options, but since we're using our hacked json.zig we don't want to
|
||||||
// specifically call this out
|
// specifically call this out
|
||||||
pub fn serializeMap(map: anytype, key: []const u8, options: anytype, out_stream: anytype) !bool {
|
pub fn serializeMap(map: anytype, key: []const u8, options: anytype, out_stream: anytype) !bool {
|
||||||
if (@typeInfo(@TypeOf(map)) == .optional) {
|
if (map.len == 0) return true;
|
||||||
if (map == null)
|
|
||||||
return false
|
|
||||||
else
|
|
||||||
return serializeMapInternal(map.?, key, options, out_stream);
|
|
||||||
}
|
|
||||||
return serializeMapInternal(map, key, options, out_stream);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn serializeMapInternal(map: anytype, key: []const u8, options: anytype, out_stream: anytype) !bool {
|
|
||||||
if (map.len == 0) {
|
|
||||||
var child_options = options;
|
|
||||||
if (child_options.whitespace) |*child_ws|
|
|
||||||
child_ws.indent_level += 1;
|
|
||||||
|
|
||||||
try out_stream.writeByte('"');
|
|
||||||
try out_stream.writeAll(key);
|
|
||||||
_ = try out_stream.write("\":");
|
|
||||||
if (options.whitespace) |ws| {
|
|
||||||
if (ws.separator) {
|
|
||||||
try out_stream.writeByte(' ');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
try out_stream.writeByte('{');
|
|
||||||
try out_stream.writeByte('}');
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
// TODO: Map might be [][]struct{key, value} rather than []struct{key, value}
|
// TODO: Map might be [][]struct{key, value} rather than []struct{key, value}
|
||||||
var child_options = options;
|
var child_options = options;
|
||||||
if (child_options.whitespace) |*child_ws|
|
if (child_options.whitespace) |*child_ws|
|
||||||
|
|
|
@ -34,7 +34,7 @@ pub fn main() anyerror!void {
|
||||||
models_dir = try std.fs.cwd().openDir(args[i + 1], .{ .iterate = true });
|
models_dir = try std.fs.cwd().openDir(args[i + 1], .{ .iterate = true });
|
||||||
}
|
}
|
||||||
// TODO: Seems like we should remove this in favor of a package
|
// TODO: Seems like we should remove this in favor of a package
|
||||||
try output_dir.writeFile(.{ .sub_path = "json.zig", .data = json_zig });
|
try output_dir.writeFile("json.zig", json_zig);
|
||||||
|
|
||||||
// TODO: We need a different way to handle this file...
|
// TODO: We need a different way to handle this file...
|
||||||
const manifest_file_started = false;
|
const manifest_file_started = false;
|
||||||
|
@ -123,11 +123,11 @@ fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir) !void {
|
||||||
// re-calculate so we can store the manifest
|
// re-calculate so we can store the manifest
|
||||||
model_digest = calculated_manifest.model_dir_hash_digest;
|
model_digest = calculated_manifest.model_dir_hash_digest;
|
||||||
calculated_manifest = try calculateDigests(models_dir, output_dir, &thread_pool);
|
calculated_manifest = try calculateDigests(models_dir, output_dir, &thread_pool);
|
||||||
try output_dir.writeFile(.{ .sub_path = "output_manifest.json", .data = try std.json.stringifyAlloc(
|
try output_dir.writeFile("output_manifest.json", try std.json.stringifyAlloc(
|
||||||
allocator,
|
allocator,
|
||||||
calculated_manifest,
|
calculated_manifest,
|
||||||
.{ .whitespace = .indent_2 },
|
.{ .whitespace = .indent_2 },
|
||||||
) });
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
var model_digest: ?[Hasher.hex_multihash_len]u8 = null;
|
var model_digest: ?[Hasher.hex_multihash_len]u8 = null;
|
||||||
|
@ -435,7 +435,7 @@ fn generateServices(allocator: std.mem.Allocator, comptime _: []const u8, file:
|
||||||
|
|
||||||
fn generateAdditionalTypes(allocator: std.mem.Allocator, file_state: FileGenerationState, writer: anytype) !void {
|
fn generateAdditionalTypes(allocator: std.mem.Allocator, file_state: FileGenerationState, writer: anytype) !void {
|
||||||
// More types may be added during processing
|
// More types may be added during processing
|
||||||
while (file_state.additional_types_to_generate.pop()) |t| {
|
while (file_state.additional_types_to_generate.popOrNull()) |t| {
|
||||||
if (file_state.additional_types_generated.getEntry(t.name) != null) continue;
|
if (file_state.additional_types_generated.getEntry(t.name) != null) continue;
|
||||||
// std.log.info("\t\t{s}", .{t.name});
|
// std.log.info("\t\t{s}", .{t.name});
|
||||||
var type_stack = std.ArrayList(*const smithy.ShapeInfo).init(allocator);
|
var type_stack = std.ArrayList(*const smithy.ShapeInfo).init(allocator);
|
||||||
|
@ -716,7 +716,7 @@ fn generateTypeFor(shape_id: []const u8, writer: anytype, state: GenerationState
|
||||||
// The serializer will have to deal with the idea we might be an array
|
// The serializer will have to deal with the idea we might be an array
|
||||||
return try generateTypeFor(shape.set.member_target, writer, state, true);
|
return try generateTypeFor(shape.set.member_target, writer, state, true);
|
||||||
},
|
},
|
||||||
.timestamp => |s| try generateSimpleTypeFor(s, "f128", writer),
|
.timestamp => |s| try generateSimpleTypeFor(s, "i64", writer),
|
||||||
.blob => |s| try generateSimpleTypeFor(s, "[]const u8", writer),
|
.blob => |s| try generateSimpleTypeFor(s, "[]const u8", writer),
|
||||||
.boolean => |s| try generateSimpleTypeFor(s, "bool", writer),
|
.boolean => |s| try generateSimpleTypeFor(s, "bool", writer),
|
||||||
.double => |s| try generateSimpleTypeFor(s, "f64", writer),
|
.double => |s| try generateSimpleTypeFor(s, "f64", writer),
|
||||||
|
|
|
@ -19,12 +19,18 @@ pub fn build(b: *std.Build) void {
|
||||||
.name = "tmp",
|
.name = "tmp",
|
||||||
// In this case the main source file is merely a path, however, in more
|
// In this case the main source file is merely a path, however, in more
|
||||||
// complicated build scripts, this could be a generated file.
|
// complicated build scripts, this could be a generated file.
|
||||||
.root_source_file = b.path("src/main.zig"),
|
.root_source_file = .{ .path = "src/main.zig" },
|
||||||
.target = target,
|
.target = target,
|
||||||
.optimize = optimize,
|
.optimize = optimize,
|
||||||
});
|
});
|
||||||
|
|
||||||
const aws_dep = b.dependency("aws", .{
|
// const smithy_dep = b.dependency("smithy", .{
|
||||||
|
// // These are the two arguments to the dependency. It expects a target and optimization level.
|
||||||
|
// .target = target,
|
||||||
|
// .optimize = optimize,
|
||||||
|
// });
|
||||||
|
// exe.addModule("smithy", smithy_dep.module("smithy"));
|
||||||
|
const aws_dep = b.dependency("aws-zig", .{
|
||||||
// These are the two arguments to the dependency. It expects a target and optimization level.
|
// These are the two arguments to the dependency. It expects a target and optimization level.
|
||||||
.target = target,
|
.target = target,
|
||||||
.optimize = optimize,
|
.optimize = optimize,
|
||||||
|
@ -62,7 +68,7 @@ pub fn build(b: *std.Build) void {
|
||||||
// Creates a step for unit testing. This only builds the test executable
|
// Creates a step for unit testing. This only builds the test executable
|
||||||
// but does not run it.
|
// but does not run it.
|
||||||
const unit_tests = b.addTest(.{
|
const unit_tests = b.addTest(.{
|
||||||
.root_source_file = b.path("src/main.zig"),
|
.root_source_file = .{ .path = "src/main.zig" },
|
||||||
.target = target,
|
.target = target,
|
||||||
.optimize = optimize,
|
.optimize = optimize,
|
||||||
});
|
});
|
||||||
|
|
|
@ -1,13 +1,16 @@
|
||||||
.{
|
.{
|
||||||
.name = .myapp,
|
.name = "myapp",
|
||||||
.version = "0.0.1",
|
.version = "0.0.1",
|
||||||
.fingerprint = 0x8798022a511224c5,
|
|
||||||
.paths = .{""},
|
.paths = .{""},
|
||||||
|
|
||||||
.dependencies = .{
|
.dependencies = .{
|
||||||
.aws = .{
|
.smithy = .{
|
||||||
.url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/7a6086447c1249b0e5b5b5f3873d2f7932bea56d/7a6086447c1249b0e5b5b5f3873d2f7932bea56d-with-models.tar.gz",
|
.url = "https://git.lerch.org/lobo/smithy/archive/1e534201c4df5ea4f615faeedc69d414adbec0b1.tar.gz",
|
||||||
.hash = "aws-0.0.1-SbsFcGN_CQCBjurpc2GEMw4c_qAkGu6KpuVnLBLY4L4q",
|
.hash = "1220af63ae0498010004af79936cedf3fe6702f516daab77ebbd97a274eba1b42aad",
|
||||||
|
},
|
||||||
|
.@"aws-zig" = .{
|
||||||
|
.url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/a0773971f2f52182c8a5235582500d36afda2e81/a0773971f2f52182c8a5235582500d36afda2e81-with-models.tar.gz",
|
||||||
|
.hash = "1220198f7b734c1cc6a683ad13246439a59be934156a2df3a734bcaf15433b33eead",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,8 +36,10 @@ pub fn main() anyerror!void {
|
||||||
.client = client,
|
.client = client,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// As of 2023-08-28, only ECS from this list supports TLS v1.3
|
||||||
|
// AWS commitment is to enable all services by 2023-12-31
|
||||||
const services = aws.Services(.{ .sts, .kms }){};
|
const services = aws.Services(.{ .sts, .kms }){};
|
||||||
try stdout.print("Calling KMS ListKeys\n", .{});
|
try stdout.print("Calling KMS ListKeys, a TLS 1.3 enabled service\n", .{});
|
||||||
try stdout.print("You likely have at least some AWS-generated keys in your account,\n", .{});
|
try stdout.print("You likely have at least some AWS-generated keys in your account,\n", .{});
|
||||||
try stdout.print("but if the account has not had many services used, this may return 0 keys\n\n", .{});
|
try stdout.print("but if the account has not had many services used, this may return 0 keys\n\n", .{});
|
||||||
const call_kms = try aws.Request(services.kms.list_keys).call(.{}, options);
|
const call_kms = try aws.Request(services.kms.list_keys).call(.{}, options);
|
||||||
|
@ -49,7 +51,8 @@ pub fn main() anyerror!void {
|
||||||
}
|
}
|
||||||
defer call_kms.deinit();
|
defer call_kms.deinit();
|
||||||
|
|
||||||
try stdout.print("\n\n\nCalling STS GetCallerIdentity\n", .{});
|
try stdout.print("\n\n\nCalling STS GetCallerIdentity. This does not have TLS 1.3 in September 2023\n", .{});
|
||||||
|
try stdout.print("A failure may occur\n\n", .{});
|
||||||
const call = try aws.Request(services.sts.get_caller_identity).call(.{}, options);
|
const call = try aws.Request(services.sts.get_caller_identity).call(.{}, options);
|
||||||
defer call.deinit();
|
defer call.deinit();
|
||||||
try stdout.print("\tarn: {s}\n", .{call.response.arn.?});
|
try stdout.print("\tarn: {s}\n", .{call.response.arn.?});
|
||||||
|
|
618
src/aws.zig
618
src/aws.zig
|
@ -8,74 +8,8 @@ const case = @import("case.zig");
|
||||||
const date = @import("date.zig");
|
const date = @import("date.zig");
|
||||||
const servicemodel = @import("servicemodel.zig");
|
const servicemodel = @import("servicemodel.zig");
|
||||||
const xml_shaper = @import("xml_shaper.zig");
|
const xml_shaper = @import("xml_shaper.zig");
|
||||||
const xml_serializer = @import("xml_serializer.zig");
|
|
||||||
|
|
||||||
const scoped_log = std.log.scoped(.aws);
|
const log = std.log.scoped(.aws);
|
||||||
|
|
||||||
/// control all logs directly/indirectly used by aws sdk. Not recommended for
|
|
||||||
/// use under normal circumstances, but helpful for times when the zig logging
|
|
||||||
/// controls are insufficient (e.g. use in build script)
|
|
||||||
pub fn globalLogControl(aws_level: std.log.Level, http_level: std.log.Level, signing_level: std.log.Level, off: bool) void {
|
|
||||||
const signing = @import("aws_signing.zig");
|
|
||||||
const credentials = @import("aws_credentials.zig");
|
|
||||||
logs_off = off;
|
|
||||||
signing.logs_off = off;
|
|
||||||
credentials.logs_off = off;
|
|
||||||
awshttp.logs_off = off;
|
|
||||||
log_level = aws_level;
|
|
||||||
awshttp.log_level = http_level;
|
|
||||||
signing.log_level = signing_level;
|
|
||||||
credentials.log_level = signing_level;
|
|
||||||
}
|
|
||||||
/// Specifies logging level. This should not be touched unless the normal
|
|
||||||
/// zig logging capabilities are inaccessible (e.g. during a build)
|
|
||||||
pub var log_level: std.log.Level = .debug;
|
|
||||||
|
|
||||||
/// Turn off logging completely
|
|
||||||
pub var logs_off: bool = false;
|
|
||||||
const log = struct {
|
|
||||||
/// Log an error message. This log level is intended to be used
|
|
||||||
/// when something has gone wrong. This might be recoverable or might
|
|
||||||
/// be followed by the program exiting.
|
|
||||||
pub fn err(
|
|
||||||
comptime format: []const u8,
|
|
||||||
args: anytype,
|
|
||||||
) void {
|
|
||||||
if (!logs_off and @intFromEnum(std.log.Level.err) <= @intFromEnum(log_level))
|
|
||||||
scoped_log.err(format, args);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log a warning message. This log level is intended to be used if
|
|
||||||
/// it is uncertain whether something has gone wrong or not, but the
|
|
||||||
/// circumstances would be worth investigating.
|
|
||||||
pub fn warn(
|
|
||||||
comptime format: []const u8,
|
|
||||||
args: anytype,
|
|
||||||
) void {
|
|
||||||
if (!logs_off and @intFromEnum(std.log.Level.warn) <= @intFromEnum(log_level))
|
|
||||||
scoped_log.warn(format, args);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log an info message. This log level is intended to be used for
|
|
||||||
/// general messages about the state of the program.
|
|
||||||
pub fn info(
|
|
||||||
comptime format: []const u8,
|
|
||||||
args: anytype,
|
|
||||||
) void {
|
|
||||||
if (!logs_off and @intFromEnum(std.log.Level.info) <= @intFromEnum(log_level))
|
|
||||||
scoped_log.info(format, args);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log a debug message. This log level is intended to be used for
|
|
||||||
/// messages which are only useful for debugging.
|
|
||||||
pub fn debug(
|
|
||||||
comptime format: []const u8,
|
|
||||||
args: anytype,
|
|
||||||
) void {
|
|
||||||
if (!logs_off and @intFromEnum(std.log.Level.debug) <= @intFromEnum(log_level))
|
|
||||||
scoped_log.debug(format, args);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const Options = struct {
|
pub const Options = struct {
|
||||||
region: []const u8 = "aws-global",
|
region: []const u8 = "aws-global",
|
||||||
|
@ -85,18 +19,6 @@ pub const Options = struct {
|
||||||
|
|
||||||
/// Used for testing to provide consistent signing. If null, will use current time
|
/// Used for testing to provide consistent signing. If null, will use current time
|
||||||
signing_time: ?i64 = null,
|
signing_time: ?i64 = null,
|
||||||
diagnostics: ?*Diagnostics = null,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const Diagnostics = struct {
|
|
||||||
http_code: i64,
|
|
||||||
response_body: []const u8,
|
|
||||||
allocator: std.mem.Allocator,
|
|
||||||
|
|
||||||
pub fn deinit(self: *Diagnostics) void {
|
|
||||||
self.allocator.free(self.response_body);
|
|
||||||
self.response_body = undefined;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Using this constant may blow up build times. Recommed using Services()
|
/// Using this constant may blow up build times. Recommed using Services()
|
||||||
|
@ -192,15 +114,12 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
log.debug("Rest method: '{s}'", .{aws_request.method});
|
log.debug("Rest method: '{s}'", .{aws_request.method});
|
||||||
log.debug("Rest success code: '{d}'", .{Action.http_config.success_code});
|
log.debug("Rest success code: '{d}'", .{Action.http_config.success_code});
|
||||||
log.debug("Rest raw uri: '{s}'", .{Action.http_config.uri});
|
log.debug("Rest raw uri: '{s}'", .{Action.http_config.uri});
|
||||||
var al = std.ArrayList([]const u8).init(options.client.allocator);
|
|
||||||
defer al.deinit();
|
|
||||||
aws_request.path = try buildPath(
|
aws_request.path = try buildPath(
|
||||||
options.client.allocator,
|
options.client.allocator,
|
||||||
Action.http_config.uri,
|
Action.http_config.uri,
|
||||||
ActionRequest,
|
ActionRequest,
|
||||||
request,
|
request,
|
||||||
!std.mem.eql(u8, Self.service_meta.sdk_id, "S3"),
|
!std.mem.eql(u8, Self.service_meta.sdk_id, "S3"),
|
||||||
&al,
|
|
||||||
);
|
);
|
||||||
defer options.client.allocator.free(aws_request.path);
|
defer options.client.allocator.free(aws_request.path);
|
||||||
log.debug("Rest processed uri: '{s}'", .{aws_request.path});
|
log.debug("Rest processed uri: '{s}'", .{aws_request.path});
|
||||||
|
@ -232,12 +151,10 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
defer nameAllocator.deinit();
|
defer nameAllocator.deinit();
|
||||||
if (Self.service_meta.aws_protocol == .rest_json_1) {
|
if (Self.service_meta.aws_protocol == .rest_json_1) {
|
||||||
if (std.mem.eql(u8, "PUT", aws_request.method) or std.mem.eql(u8, "POST", aws_request.method)) {
|
if (std.mem.eql(u8, "PUT", aws_request.method) or std.mem.eql(u8, "POST", aws_request.method)) {
|
||||||
try json.stringify(request, .{ .whitespace = .{}, .emit_null = false, .exclude_fields = al.items }, buffer.writer());
|
try json.stringify(request, .{ .whitespace = .{} }, buffer.writer());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
aws_request.body = buffer.items;
|
aws_request.body = buffer.items;
|
||||||
var rest_xml_body: ?[]const u8 = null;
|
|
||||||
defer if (rest_xml_body) |b| options.client.allocator.free(b);
|
|
||||||
if (Self.service_meta.aws_protocol == .rest_xml) {
|
if (Self.service_meta.aws_protocol == .rest_xml) {
|
||||||
if (std.mem.eql(u8, "PUT", aws_request.method) or std.mem.eql(u8, "POST", aws_request.method)) {
|
if (std.mem.eql(u8, "PUT", aws_request.method) or std.mem.eql(u8, "POST", aws_request.method)) {
|
||||||
if (@hasDecl(ActionRequest, "http_payload")) {
|
if (@hasDecl(ActionRequest, "http_payload")) {
|
||||||
|
@ -245,49 +162,7 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
// the http_payload declaration on the request type.
|
// the http_payload declaration on the request type.
|
||||||
// Hopefully these will always be ?[]const u8, otherwise
|
// Hopefully these will always be ?[]const u8, otherwise
|
||||||
// we should see a compile error on this line
|
// we should see a compile error on this line
|
||||||
const payload = @field(request, ActionRequest.http_payload);
|
aws_request.body = @field(request, ActionRequest.http_payload).?;
|
||||||
const T = @TypeOf(payload);
|
|
||||||
var body_assigned = false;
|
|
||||||
if (T == ?[]const u8) {
|
|
||||||
aws_request.body = payload.?;
|
|
||||||
body_assigned = true;
|
|
||||||
}
|
|
||||||
if (T == []const u8) {
|
|
||||||
aws_request.body = payload;
|
|
||||||
body_assigned = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!body_assigned) {
|
|
||||||
const sm = ActionRequest.metaInfo().service_metadata;
|
|
||||||
if (!std.mem.eql(u8, sm.endpoint_prefix, "s3"))
|
|
||||||
// Because the attributes below are most likely only
|
|
||||||
// applicable to s3, we are better off to fail
|
|
||||||
// early. This portion of the code base should
|
|
||||||
// only be executed for s3 as no other known
|
|
||||||
// service uses this protocol
|
|
||||||
return error.NotImplemented;
|
|
||||||
|
|
||||||
const attrs = try std.fmt.allocPrint(
|
|
||||||
options.client.allocator,
|
|
||||||
"xmlns=\"http://{s}.amazonaws.com/doc/{s}/\"",
|
|
||||||
.{ sm.endpoint_prefix, sm.version },
|
|
||||||
);
|
|
||||||
defer options.client.allocator.free(attrs); // once serialized, the value should be copied over
|
|
||||||
|
|
||||||
// Need to serialize this
|
|
||||||
rest_xml_body = try xml_serializer.stringifyAlloc(
|
|
||||||
options.client.allocator,
|
|
||||||
payload,
|
|
||||||
.{
|
|
||||||
.whitespace = .indent_2,
|
|
||||||
.root_name = request.fieldNameFor(ActionRequest.http_payload),
|
|
||||||
.root_attributes = attrs,
|
|
||||||
.emit_null_optional_fields = false,
|
|
||||||
.include_declaration = false,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
aws_request.body = rest_xml_body.?;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
return error.NotImplemented;
|
return error.NotImplemented;
|
||||||
}
|
}
|
||||||
|
@ -300,7 +175,6 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
.dualstack = options.dualstack,
|
.dualstack = options.dualstack,
|
||||||
.client = options.client,
|
.client = options.client,
|
||||||
.signing_time = options.signing_time,
|
.signing_time = options.signing_time,
|
||||||
.diagnostics = options.diagnostics,
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -398,10 +272,6 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
defer response.deinit();
|
defer response.deinit();
|
||||||
if (response.response_code != options.success_http_code) {
|
if (response.response_code != options.success_http_code) {
|
||||||
try reportTraffic(options.client.allocator, "Call Failed", aws_request, response, log.err);
|
try reportTraffic(options.client.allocator, "Call Failed", aws_request, response, log.err);
|
||||||
if (options.diagnostics) |d| {
|
|
||||||
d.http_code = response.response_code;
|
|
||||||
d.response_body = try d.allocator.dupe(u8, response.body);
|
|
||||||
}
|
|
||||||
return error.HttpFailure;
|
return error.HttpFailure;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -483,7 +353,7 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
// First, we need to determine if we care about a response at all
|
// First, we need to determine if we care about a response at all
|
||||||
// If the expected result has no fields, there's no sense in
|
// If the expected result has no fields, there's no sense in
|
||||||
// doing any more work. Let's bail early
|
// doing any more work. Let's bail early
|
||||||
comptime var expected_body_field_len = std.meta.fields(action.Response).len;
|
var expected_body_field_len = std.meta.fields(action.Response).len;
|
||||||
if (@hasDecl(action.Response, "http_header"))
|
if (@hasDecl(action.Response, "http_header"))
|
||||||
expected_body_field_len -= std.meta.fields(@TypeOf(action.Response.http_header)).len;
|
expected_body_field_len -= std.meta.fields(@TypeOf(action.Response.http_header)).len;
|
||||||
if (@hasDecl(action.Response, "http_payload")) {
|
if (@hasDecl(action.Response, "http_payload")) {
|
||||||
|
@ -509,6 +379,8 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
|
|
||||||
// We don't care about the body if there are no fields we expect there...
|
// We don't care about the body if there are no fields we expect there...
|
||||||
if (std.meta.fields(action.Response).len == 0 or expected_body_field_len == 0) {
|
if (std.meta.fields(action.Response).len == 0 or expected_body_field_len == 0) {
|
||||||
|
// ^^ This should be redundant, but is necessary. I suspect it's a compiler quirk
|
||||||
|
//
|
||||||
// Do we care if an unexpected body comes in?
|
// Do we care if an unexpected body comes in?
|
||||||
return FullResponseType{
|
return FullResponseType{
|
||||||
.response = .{},
|
.response = .{},
|
||||||
|
@ -562,9 +434,9 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
// And the response property below will pull whatever is the ActionResult object
|
// And the response property below will pull whatever is the ActionResult object
|
||||||
// We can grab index [0] as structs are guaranteed by zig to be returned in the order
|
// We can grab index [0] as structs are guaranteed by zig to be returned in the order
|
||||||
// declared, and we're declaring in that order in ServerResponse().
|
// declared, and we're declaring in that order in ServerResponse().
|
||||||
const real_response = @field(parsed_response, @typeInfo(response_types.NormalResponse).@"struct".fields[0].name);
|
const real_response = @field(parsed_response, @typeInfo(response_types.NormalResponse).Struct.fields[0].name);
|
||||||
return FullResponseType{
|
return FullResponseType{
|
||||||
.response = @field(real_response, @typeInfo(@TypeOf(real_response)).@"struct".fields[0].name),
|
.response = @field(real_response, @typeInfo(@TypeOf(real_response)).Struct.fields[0].name),
|
||||||
.response_metadata = .{
|
.response_metadata = .{
|
||||||
.request_id = try options.client.allocator.dupe(u8, real_response.ResponseMetadata.RequestId),
|
.request_id = try options.client.allocator.dupe(u8, real_response.ResponseMetadata.RequestId),
|
||||||
},
|
},
|
||||||
|
@ -733,6 +605,7 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
|
|
||||||
fn ParsedJsonData(comptime T: type) type {
|
fn ParsedJsonData(comptime T: type) type {
|
||||||
return struct {
|
return struct {
|
||||||
|
raw_response_parsed: bool,
|
||||||
parsed_response_ptr: *T,
|
parsed_response_ptr: *T,
|
||||||
allocator: std.mem.Allocator,
|
allocator: std.mem.Allocator,
|
||||||
|
|
||||||
|
@ -741,6 +614,7 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
pub fn deinit(self: MySelf) void {
|
pub fn deinit(self: MySelf) void {
|
||||||
// This feels like it should result in a use after free, but it
|
// This feels like it should result in a use after free, but it
|
||||||
// seems to be working?
|
// seems to be working?
|
||||||
|
if (self.raw_response_parsed)
|
||||||
self.allocator.destroy(self.parsed_response_ptr);
|
self.allocator.destroy(self.parsed_response_ptr);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -752,17 +626,13 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
|
|
||||||
// Extract the first json key
|
// Extract the first json key
|
||||||
const key = firstJsonKey(data);
|
const key = firstJsonKey(data);
|
||||||
const found_normal_json_response =
|
const found_normal_json_response = std.mem.eql(u8, key, action.action_name ++ "Response") or
|
||||||
std.mem.eql(u8, key, action.action_name ++ "Response") or
|
std.mem.eql(u8, key, action.action_name ++ "Result");
|
||||||
std.mem.eql(u8, key, action.action_name ++ "Result") or
|
var raw_response_parsed = false;
|
||||||
isOtherNormalResponse(response_types.NormalResponse, key);
|
|
||||||
var stream = json.TokenStream.init(data);
|
var stream = json.TokenStream.init(data);
|
||||||
const parsed_response_ptr = blk: {
|
const parsed_response_ptr = blk: {
|
||||||
const ptr = try options.client.allocator.create(response_types.NormalResponse);
|
if (!response_types.isRawPossible or found_normal_json_response)
|
||||||
errdefer options.client.allocator.destroy(ptr);
|
break :blk &(json.parse(response_types.NormalResponse, &stream, parser_options) catch |e| {
|
||||||
|
|
||||||
if (!response_types.isRawPossible or found_normal_json_response) {
|
|
||||||
ptr.* = (json.parse(response_types.NormalResponse, &stream, parser_options) catch |e| {
|
|
||||||
log.err(
|
log.err(
|
||||||
\\Call successful, but unexpected response from service.
|
\\Call successful, but unexpected response from service.
|
||||||
\\This could be the result of a bug or a stale set of code generated
|
\\This could be the result of a bug or a stale set of code generated
|
||||||
|
@ -778,10 +648,9 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
return e;
|
return e;
|
||||||
});
|
});
|
||||||
|
|
||||||
break :blk ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
log.debug("Appears server has provided a raw response", .{});
|
log.debug("Appears server has provided a raw response", .{});
|
||||||
|
raw_response_parsed = true;
|
||||||
|
const ptr = try options.client.allocator.create(response_types.NormalResponse);
|
||||||
@field(ptr.*, std.meta.fields(action.Response)[0].name) =
|
@field(ptr.*, std.meta.fields(action.Response)[0].name) =
|
||||||
json.parse(response_types.RawResponse, &stream, parser_options) catch |e| {
|
json.parse(response_types.RawResponse, &stream, parser_options) catch |e| {
|
||||||
log.err(
|
log.err(
|
||||||
|
@ -801,32 +670,21 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
break :blk ptr;
|
break :blk ptr;
|
||||||
};
|
};
|
||||||
return ParsedJsonData(response_types.NormalResponse){
|
return ParsedJsonData(response_types.NormalResponse){
|
||||||
.parsed_response_ptr = parsed_response_ptr,
|
.raw_response_parsed = raw_response_parsed,
|
||||||
|
.parsed_response_ptr = @constCast(parsed_response_ptr), //TODO: why doesn't changing const->var above fix this?
|
||||||
.allocator = options.client.allocator,
|
.allocator = options.client.allocator,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn isOtherNormalResponse(comptime T: type, first_key: []const u8) bool {
|
|
||||||
const fields = std.meta.fields(T);
|
|
||||||
if (fields.len != 1) return false;
|
|
||||||
const first_field = fields[0];
|
|
||||||
if (!@hasDecl(T, "fieldNameFor")) return false;
|
|
||||||
const expected_key = T.fieldNameFor(undefined, first_field.name);
|
|
||||||
return std.mem.eql(u8, first_key, expected_key);
|
|
||||||
}
|
|
||||||
fn coerceFromString(comptime T: type, val: []const u8) anyerror!T {
|
fn coerceFromString(comptime T: type, val: []const u8) anyerror!T {
|
||||||
if (@typeInfo(T) == .optional) return try coerceFromString(@typeInfo(T).optional.child, val);
|
if (@typeInfo(T) == .Optional) return try coerceFromString(@typeInfo(T).Optional.child, val);
|
||||||
// TODO: This is terrible...fix it
|
// TODO: This is terrible...fix it
|
||||||
switch (T) {
|
switch (T) {
|
||||||
bool => return std.ascii.eqlIgnoreCase(val, "true"),
|
bool => return std.ascii.eqlIgnoreCase(val, "true"),
|
||||||
i64, i128 => return parseInt(T, val) catch |e| {
|
i64 => return parseInt(T, val) catch |e| {
|
||||||
log.err("Invalid string representing {s}: {s}", .{ @typeName(T), val });
|
log.err("Invalid string representing i64: {s}", .{val});
|
||||||
return e;
|
|
||||||
},
|
|
||||||
f64, f128 => return std.fmt.parseFloat(T, val) catch |e| {
|
|
||||||
log.err("Invalid string representing {s}: {s}", .{ @typeName(T), val });
|
|
||||||
return e;
|
return e;
|
||||||
},
|
},
|
||||||
else => return val,
|
else => return val,
|
||||||
|
@ -842,20 +700,14 @@ fn parseInt(comptime T: type, val: []const u8) !T {
|
||||||
return e;
|
return e;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
if (T == f128) {
|
|
||||||
return @as(f128, date.parseEnglishToTimestamp(val)) catch |e| {
|
|
||||||
log.err("Error coercing date string '{s}' to timestamp value", .{val});
|
|
||||||
return e;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
log.err("Error parsing string '{s}' to integer", .{val});
|
log.err("Error parsing string '{s}' to integer", .{val});
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn generalAllocPrint(allocator: std.mem.Allocator, val: anytype) !?[]const u8 {
|
fn generalAllocPrint(allocator: std.mem.Allocator, val: anytype) !?[]const u8 {
|
||||||
switch (@typeInfo(@TypeOf(val))) {
|
switch (@typeInfo(@TypeOf(val))) {
|
||||||
.optional => if (val) |v| return generalAllocPrint(allocator, v) else return null,
|
.Optional => if (val) |v| return generalAllocPrint(allocator, v) else return null,
|
||||||
.array, .pointer => return try std.fmt.allocPrint(allocator, "{s}", .{val}),
|
.Array, .Pointer => return try std.fmt.allocPrint(allocator, "{s}", .{val}),
|
||||||
else => return try std.fmt.allocPrint(allocator, "{any}", .{val}),
|
else => return try std.fmt.allocPrint(allocator, "{any}", .{val}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -974,20 +826,20 @@ fn ServerResponse(comptime action: anytype) type {
|
||||||
RequestId: []u8,
|
RequestId: []u8,
|
||||||
};
|
};
|
||||||
const Result = @Type(.{
|
const Result = @Type(.{
|
||||||
.@"struct" = .{
|
.Struct = .{
|
||||||
.layout = .auto,
|
.layout = .auto,
|
||||||
.fields = &[_]std.builtin.Type.StructField{
|
.fields = &[_]std.builtin.Type.StructField{
|
||||||
.{
|
.{
|
||||||
.name = action.action_name ++ "Result",
|
.name = action.action_name ++ "Result",
|
||||||
.type = T,
|
.type = T,
|
||||||
.default_value_ptr = null,
|
.default_value = null,
|
||||||
.is_comptime = false,
|
.is_comptime = false,
|
||||||
.alignment = 0,
|
.alignment = 0,
|
||||||
},
|
},
|
||||||
.{
|
.{
|
||||||
.name = "ResponseMetadata",
|
.name = "ResponseMetadata",
|
||||||
.type = ResponseMetadata,
|
.type = ResponseMetadata,
|
||||||
.default_value_ptr = null,
|
.default_value = null,
|
||||||
.is_comptime = false,
|
.is_comptime = false,
|
||||||
.alignment = 0,
|
.alignment = 0,
|
||||||
},
|
},
|
||||||
|
@ -997,13 +849,13 @@ fn ServerResponse(comptime action: anytype) type {
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
return @Type(.{
|
return @Type(.{
|
||||||
.@"struct" = .{
|
.Struct = .{
|
||||||
.layout = .auto,
|
.layout = .auto,
|
||||||
.fields = &[_]std.builtin.Type.StructField{
|
.fields = &[_]std.builtin.Type.StructField{
|
||||||
.{
|
.{
|
||||||
.name = action.action_name ++ "Response",
|
.name = action.action_name ++ "Response",
|
||||||
.type = Result,
|
.type = Result,
|
||||||
.default_value_ptr = null,
|
.default_value = null,
|
||||||
.is_comptime = false,
|
.is_comptime = false,
|
||||||
.alignment = 0,
|
.alignment = 0,
|
||||||
},
|
},
|
||||||
|
@ -1063,8 +915,8 @@ fn FullResponse(comptime action: anytype) type {
|
||||||
}
|
}
|
||||||
fn safeFree(allocator: std.mem.Allocator, obj: anytype) void {
|
fn safeFree(allocator: std.mem.Allocator, obj: anytype) void {
|
||||||
switch (@typeInfo(@TypeOf(obj))) {
|
switch (@typeInfo(@TypeOf(obj))) {
|
||||||
.pointer => allocator.free(obj),
|
.Pointer => allocator.free(obj),
|
||||||
.optional => if (obj) |o| safeFree(allocator, o),
|
.Optional => if (obj) |o| safeFree(allocator, o),
|
||||||
else => {},
|
else => {},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1078,7 +930,6 @@ fn buildPath(
|
||||||
comptime ActionRequest: type,
|
comptime ActionRequest: type,
|
||||||
request: anytype,
|
request: anytype,
|
||||||
encode_slash: bool,
|
encode_slash: bool,
|
||||||
replaced_fields: *std.ArrayList([]const u8),
|
|
||||||
) ![]const u8 {
|
) ![]const u8 {
|
||||||
var buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
|
var buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
|
||||||
// const writer = buffer.writer();
|
// const writer = buffer.writer();
|
||||||
|
@ -1100,7 +951,6 @@ fn buildPath(
|
||||||
const replacement_label = raw_uri[start..end];
|
const replacement_label = raw_uri[start..end];
|
||||||
inline for (std.meta.fields(ActionRequest)) |field| {
|
inline for (std.meta.fields(ActionRequest)) |field| {
|
||||||
if (std.mem.eql(u8, request.fieldNameFor(field.name), replacement_label)) {
|
if (std.mem.eql(u8, request.fieldNameFor(field.name), replacement_label)) {
|
||||||
try replaced_fields.append(replacement_label);
|
|
||||||
var replacement_buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
|
var replacement_buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
|
||||||
defer replacement_buffer.deinit();
|
defer replacement_buffer.deinit();
|
||||||
var encoded_buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
|
var encoded_buffer = try std.ArrayList(u8).initCapacity(allocator, raw_uri.len);
|
||||||
|
@ -1173,7 +1023,7 @@ fn buildQuery(allocator: std.mem.Allocator, request: anytype) ![]const u8 {
|
||||||
var prefix = "?";
|
var prefix = "?";
|
||||||
if (@hasDecl(@TypeOf(request), "http_query")) {
|
if (@hasDecl(@TypeOf(request), "http_query")) {
|
||||||
const query_arguments = @field(@TypeOf(request), "http_query");
|
const query_arguments = @field(@TypeOf(request), "http_query");
|
||||||
inline for (@typeInfo(@TypeOf(query_arguments)).@"struct".fields) |arg| {
|
inline for (@typeInfo(@TypeOf(query_arguments)).Struct.fields) |arg| {
|
||||||
const val = @field(request, arg.name);
|
const val = @field(request, arg.name);
|
||||||
if (try addQueryArg(arg.type, prefix, @field(query_arguments, arg.name), val, writer))
|
if (try addQueryArg(arg.type, prefix, @field(query_arguments, arg.name), val, writer))
|
||||||
prefix = "&";
|
prefix = "&";
|
||||||
|
@ -1184,14 +1034,14 @@ fn buildQuery(allocator: std.mem.Allocator, request: anytype) ![]const u8 {
|
||||||
|
|
||||||
fn addQueryArg(comptime ValueType: type, prefix: []const u8, key: []const u8, value: anytype, writer: anytype) !bool {
|
fn addQueryArg(comptime ValueType: type, prefix: []const u8, key: []const u8, value: anytype, writer: anytype) !bool {
|
||||||
switch (@typeInfo(@TypeOf(value))) {
|
switch (@typeInfo(@TypeOf(value))) {
|
||||||
.optional => {
|
.Optional => {
|
||||||
if (value) |v|
|
if (value) |v|
|
||||||
return try addQueryArg(ValueType, prefix, key, v, writer);
|
return try addQueryArg(ValueType, prefix, key, v, writer);
|
||||||
return false;
|
return false;
|
||||||
},
|
},
|
||||||
// if this is a pointer, we want to make sure it is more than just a string
|
// if this is a pointer, we want to make sure it is more than just a string
|
||||||
.pointer => |ptr| {
|
.Pointer => |ptr| {
|
||||||
if (ptr.child == u8 or ptr.size != .slice) {
|
if (ptr.child == u8 or ptr.size != .Slice) {
|
||||||
// This is just a string
|
// This is just a string
|
||||||
return try addBasicQueryArg(prefix, key, value, writer);
|
return try addBasicQueryArg(prefix, key, value, writer);
|
||||||
}
|
}
|
||||||
|
@ -1202,7 +1052,7 @@ fn addQueryArg(comptime ValueType: type, prefix: []const u8, key: []const u8, va
|
||||||
}
|
}
|
||||||
return std.mem.eql(u8, "&", p);
|
return std.mem.eql(u8, "&", p);
|
||||||
},
|
},
|
||||||
.array => |arr| {
|
.Array => |arr| {
|
||||||
if (arr.child == u8)
|
if (arr.child == u8)
|
||||||
return try addBasicQueryArg(prefix, key, value, writer);
|
return try addBasicQueryArg(prefix, key, value, writer);
|
||||||
var p = prefix;
|
var p = prefix;
|
||||||
|
@ -1322,8 +1172,8 @@ fn reportTraffic(
|
||||||
fn typeForField(comptime T: type, comptime field_name: []const u8) !type {
|
fn typeForField(comptime T: type, comptime field_name: []const u8) !type {
|
||||||
const ti = @typeInfo(T);
|
const ti = @typeInfo(T);
|
||||||
switch (ti) {
|
switch (ti) {
|
||||||
.@"struct" => {
|
.Struct => {
|
||||||
inline for (ti.@"struct".fields) |field| {
|
inline for (ti.Struct.fields) |field| {
|
||||||
if (std.mem.eql(u8, field.name, field_name))
|
if (std.mem.eql(u8, field.name, field_name))
|
||||||
return field.type;
|
return field.type;
|
||||||
}
|
}
|
||||||
|
@ -1337,7 +1187,7 @@ test "custom serialization for map objects" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var buffer = std.ArrayList(u8).init(allocator);
|
var buffer = std.ArrayList(u8).init(allocator);
|
||||||
defer buffer.deinit();
|
defer buffer.deinit();
|
||||||
var tags = try std.ArrayList(@typeInfo(try typeForField(services.lambda.tag_resource.Request, "tags")).pointer.child).initCapacity(allocator, 2);
|
var tags = try std.ArrayList(@typeInfo(try typeForField(services.lambda.tag_resource.Request, "tags")).Pointer.child).initCapacity(allocator, 2);
|
||||||
defer tags.deinit();
|
defer tags.deinit();
|
||||||
tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" });
|
tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" });
|
||||||
tags.appendAssumeCapacity(.{ .key = "Baz", .value = "Qux" });
|
tags.appendAssumeCapacity(.{ .key = "Baz", .value = "Qux" });
|
||||||
|
@ -1354,58 +1204,6 @@ test "custom serialization for map objects" {
|
||||||
, buffer.items);
|
, buffer.items);
|
||||||
}
|
}
|
||||||
|
|
||||||
test "proper serialization for kms" {
|
|
||||||
// Github issue #8
|
|
||||||
// https://github.com/elerch/aws-sdk-for-zig/issues/8
|
|
||||||
const allocator = std.testing.allocator;
|
|
||||||
var buffer = std.ArrayList(u8).init(allocator);
|
|
||||||
defer buffer.deinit();
|
|
||||||
const req = services.kms.encrypt.Request{
|
|
||||||
.encryption_algorithm = "SYMMETRIC_DEFAULT",
|
|
||||||
// Since encryption_context is not null, we expect "{}" to be the value
|
|
||||||
// here, not "[]", because this is our special AWS map pattern
|
|
||||||
.encryption_context = &.{},
|
|
||||||
.key_id = "42",
|
|
||||||
.plaintext = "foo",
|
|
||||||
.dry_run = false,
|
|
||||||
.grant_tokens = &[_][]const u8{},
|
|
||||||
};
|
|
||||||
try json.stringify(req, .{ .whitespace = .{} }, buffer.writer());
|
|
||||||
try std.testing.expectEqualStrings(
|
|
||||||
\\{
|
|
||||||
\\ "KeyId": "42",
|
|
||||||
\\ "Plaintext": "foo",
|
|
||||||
\\ "EncryptionContext": {},
|
|
||||||
\\ "GrantTokens": [],
|
|
||||||
\\ "EncryptionAlgorithm": "SYMMETRIC_DEFAULT",
|
|
||||||
\\ "DryRun": false
|
|
||||||
\\}
|
|
||||||
, buffer.items);
|
|
||||||
|
|
||||||
var buffer_null = std.ArrayList(u8).init(allocator);
|
|
||||||
defer buffer_null.deinit();
|
|
||||||
const req_null = services.kms.encrypt.Request{
|
|
||||||
.encryption_algorithm = "SYMMETRIC_DEFAULT",
|
|
||||||
// Since encryption_context here *IS* null, we expect simply "null" to be the value
|
|
||||||
.encryption_context = null,
|
|
||||||
.key_id = "42",
|
|
||||||
.plaintext = "foo",
|
|
||||||
.dry_run = false,
|
|
||||||
.grant_tokens = &[_][]const u8{},
|
|
||||||
};
|
|
||||||
try json.stringify(req_null, .{ .whitespace = .{} }, buffer_null.writer());
|
|
||||||
try std.testing.expectEqualStrings(
|
|
||||||
\\{
|
|
||||||
\\ "KeyId": "42",
|
|
||||||
\\ "Plaintext": "foo",
|
|
||||||
\\ "EncryptionContext": null,
|
|
||||||
\\ "GrantTokens": [],
|
|
||||||
\\ "EncryptionAlgorithm": "SYMMETRIC_DEFAULT",
|
|
||||||
\\ "DryRun": false
|
|
||||||
\\}
|
|
||||||
, buffer_null.items);
|
|
||||||
}
|
|
||||||
|
|
||||||
test "REST Json v1 builds proper queries" {
|
test "REST Json v1 builds proper queries" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
const svs = Services(.{.lambda}){};
|
const svs = Services(.{.lambda}){};
|
||||||
|
@ -1442,27 +1240,23 @@ test "REST Json v1 serializes lists in queries" {
|
||||||
}
|
}
|
||||||
test "REST Json v1 buildpath substitutes" {
|
test "REST Json v1 buildpath substitutes" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var al = std.ArrayList([]const u8).init(allocator);
|
|
||||||
defer al.deinit();
|
|
||||||
const svs = Services(.{.lambda}){};
|
const svs = Services(.{.lambda}){};
|
||||||
const request = svs.lambda.list_functions.Request{
|
const request = svs.lambda.list_functions.Request{
|
||||||
.max_items = 1,
|
.max_items = 1,
|
||||||
};
|
};
|
||||||
const input_path = "https://myhost/{MaxItems}/";
|
const input_path = "https://myhost/{MaxItems}/";
|
||||||
const output_path = try buildPath(allocator, input_path, @TypeOf(request), request, true, &al);
|
const output_path = try buildPath(allocator, input_path, @TypeOf(request), request, true);
|
||||||
defer allocator.free(output_path);
|
defer allocator.free(output_path);
|
||||||
try std.testing.expectEqualStrings("https://myhost/1/", output_path);
|
try std.testing.expectEqualStrings("https://myhost/1/", output_path);
|
||||||
}
|
}
|
||||||
test "REST Json v1 buildpath handles restricted characters" {
|
test "REST Json v1 buildpath handles restricted characters" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var al = std.ArrayList([]const u8).init(allocator);
|
|
||||||
defer al.deinit();
|
|
||||||
const svs = Services(.{.lambda}){};
|
const svs = Services(.{.lambda}){};
|
||||||
const request = svs.lambda.list_functions.Request{
|
const request = svs.lambda.list_functions.Request{
|
||||||
.marker = ":",
|
.marker = ":",
|
||||||
};
|
};
|
||||||
const input_path = "https://myhost/{Marker}/";
|
const input_path = "https://myhost/{Marker}/";
|
||||||
const output_path = try buildPath(allocator, input_path, @TypeOf(request), request, true, &al);
|
const output_path = try buildPath(allocator, input_path, @TypeOf(request), request, true);
|
||||||
defer allocator.free(output_path);
|
defer allocator.free(output_path);
|
||||||
try std.testing.expectEqualStrings("https://myhost/%3A/", output_path);
|
try std.testing.expectEqualStrings("https://myhost/%3A/", output_path);
|
||||||
}
|
}
|
||||||
|
@ -1581,54 +1375,11 @@ const TestOptions = struct {
|
||||||
request_target: []const u8 = undefined,
|
request_target: []const u8 = undefined,
|
||||||
request_headers: []std.http.Header = undefined,
|
request_headers: []std.http.Header = undefined,
|
||||||
test_server_runtime_uri: ?[]u8 = null,
|
test_server_runtime_uri: ?[]u8 = null,
|
||||||
server_ready: std.Thread.Semaphore = .{},
|
server_ready: bool = false,
|
||||||
requests_processed: usize = 0,
|
requests_processed: usize = 0,
|
||||||
|
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
|
|
||||||
/// Builtin hashmap for strings as keys.
|
|
||||||
/// Key memory is managed by the caller. Keys and values
|
|
||||||
/// will not automatically be freed.
|
|
||||||
pub fn StringCaseInsensitiveHashMap(comptime V: type) type {
|
|
||||||
return std.HashMap([]const u8, V, StringInsensitiveContext, std.hash_map.default_max_load_percentage);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const StringInsensitiveContext = struct {
|
|
||||||
pub fn hash(self: @This(), s: []const u8) u64 {
|
|
||||||
_ = self;
|
|
||||||
return hashString(s);
|
|
||||||
}
|
|
||||||
pub fn eql(self: @This(), a: []const u8, b: []const u8) bool {
|
|
||||||
_ = self;
|
|
||||||
return eqlString(a, b);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
pub fn eqlString(a: []const u8, b: []const u8) bool {
|
|
||||||
return std.ascii.eqlIgnoreCase(a, b);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn hashString(s: []const u8) u64 {
|
|
||||||
var buf: [1024]u8 = undefined;
|
|
||||||
if (s.len > buf.len) unreachable; // tolower has a debug assert, but we want non-debug check too
|
|
||||||
const lower_s = std.ascii.lowerString(buf[0..], s);
|
|
||||||
return std.hash.Wyhash.hash(0, lower_s);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn expectNoDuplicateHeaders(self: *Self) !void {
|
|
||||||
// As header keys are
|
|
||||||
var hm = StringCaseInsensitiveHashMap(void).init(self.allocator);
|
|
||||||
try hm.ensureTotalCapacity(@intCast(self.request_headers.len));
|
|
||||||
defer hm.deinit();
|
|
||||||
for (self.request_headers) |h| {
|
|
||||||
if (hm.getKey(h.name)) |_| {
|
|
||||||
log.err("Duplicate key detected. Key name: {s}", .{h.name});
|
|
||||||
return error.duplicateKeyDetected;
|
|
||||||
}
|
|
||||||
try hm.put(h.name, {});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn expectHeader(self: *Self, name: []const u8, value: []const u8) !void {
|
fn expectHeader(self: *Self, name: []const u8, value: []const u8) !void {
|
||||||
for (self.request_headers) |h|
|
for (self.request_headers) |h|
|
||||||
if (std.ascii.eqlIgnoreCase(name, h.name) and
|
if (std.ascii.eqlIgnoreCase(name, h.name) and
|
||||||
|
@ -1636,18 +1387,10 @@ const TestOptions = struct {
|
||||||
return error.HeaderOrValueNotFound;
|
return error.HeaderOrValueNotFound;
|
||||||
}
|
}
|
||||||
fn waitForReady(self: *Self) !void {
|
fn waitForReady(self: *Self) !void {
|
||||||
// Set 10s timeout...this is way longer than necessary
|
// While this doesn't return an error, we can use !void
|
||||||
log.debug("waiting for ready", .{});
|
// to prepare for addition of timeout
|
||||||
try self.server_ready.timedWait(1000 * std.time.ns_per_ms);
|
while (!self.server_ready)
|
||||||
// var deadline = std.Thread.Futex.Deadline.init(1000 * std.time.ns_per_ms);
|
std.time.sleep(100);
|
||||||
// if (self.futex_word.load(.acquire) != 0) return;
|
|
||||||
// log.debug("futex zero", .{});
|
|
||||||
// // note that this seems backwards from the documentation...
|
|
||||||
// deadline.wait(self.futex_word, 1) catch {
|
|
||||||
// log.err("futex value {d}", .{self.futex_word.load(.acquire)});
|
|
||||||
// return error.TestServerTimeoutWaitingForReady;
|
|
||||||
// };
|
|
||||||
log.debug("the wait is over!", .{});
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1675,9 +1418,8 @@ fn threadMain(options: *TestOptions) !void {
|
||||||
// var aa = arena.allocator();
|
// var aa = arena.allocator();
|
||||||
// We're in control of all requests/responses, so this flag will tell us
|
// We're in control of all requests/responses, so this flag will tell us
|
||||||
// when it's time to shut down
|
// when it's time to shut down
|
||||||
if (options.server_remaining_requests == 0)
|
while (options.server_remaining_requests > 0) {
|
||||||
options.server_ready.post(); // This will cause the wait for server to return
|
options.server_remaining_requests -= 1;
|
||||||
while (options.server_remaining_requests > 0) : (options.server_remaining_requests -= 1) {
|
|
||||||
processRequest(options, &http_server) catch |e| {
|
processRequest(options, &http_server) catch |e| {
|
||||||
log.err("Unexpected error processing request: {any}", .{e});
|
log.err("Unexpected error processing request: {any}", .{e});
|
||||||
if (@errorReturnTrace()) |trace| {
|
if (@errorReturnTrace()) |trace| {
|
||||||
|
@ -1688,13 +1430,12 @@ fn threadMain(options: *TestOptions) !void {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn processRequest(options: *TestOptions, net_server: *std.net.Server) !void {
|
fn processRequest(options: *TestOptions, net_server: *std.net.Server) !void {
|
||||||
|
options.server_ready = true;
|
||||||
|
errdefer options.server_ready = false;
|
||||||
log.debug(
|
log.debug(
|
||||||
"tid {d} (server): server waiting to accept. requests remaining: {d}",
|
"tid {d} (server): server waiting to accept. requests remaining: {d}",
|
||||||
.{ std.Thread.getCurrentId(), options.server_remaining_requests },
|
.{ std.Thread.getCurrentId(), options.server_remaining_requests + 1 },
|
||||||
);
|
);
|
||||||
// options.futex_word.store(1, .release);
|
|
||||||
// errdefer options.futex_word.store(0, .release);
|
|
||||||
options.server_ready.post();
|
|
||||||
var connection = try net_server.accept();
|
var connection = try net_server.accept();
|
||||||
defer connection.stream.close();
|
defer connection.stream.close();
|
||||||
var read_buffer: [1024 * 16]u8 = undefined;
|
var read_buffer: [1024 * 16]u8 = undefined;
|
||||||
|
@ -1713,6 +1454,8 @@ fn processRequest(options: *TestOptions, net_server: *std.net.Server) !void {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn serveRequest(options: *TestOptions, request: *std.http.Server.Request) !void {
|
fn serveRequest(options: *TestOptions, request: *std.http.Server.Request) !void {
|
||||||
|
options.server_ready = false;
|
||||||
|
|
||||||
options.requests_processed += 1;
|
options.requests_processed += 1;
|
||||||
options.request_body = try (try request.reader()).readAllAlloc(options.allocator, std.math.maxInt(usize));
|
options.request_body = try (try request.reader()).readAllAlloc(options.allocator, std.math.maxInt(usize));
|
||||||
options.request_method = request.head.method;
|
options.request_method = request.head.method;
|
||||||
|
@ -1754,7 +1497,7 @@ const TestSetup = struct {
|
||||||
request_options: TestOptions,
|
request_options: TestOptions,
|
||||||
server_thread: std.Thread = undefined,
|
server_thread: std.Thread = undefined,
|
||||||
creds: aws_auth.Credentials = undefined,
|
creds: aws_auth.Credentials = undefined,
|
||||||
client: Client = undefined,
|
client: *Client = undefined,
|
||||||
started: bool = false,
|
started: bool = false,
|
||||||
|
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
|
@ -1782,8 +1525,7 @@ const TestSetup = struct {
|
||||||
// Not sure why we're getting sprayed here, but we have an arena allocator, and this
|
// Not sure why we're getting sprayed here, but we have an arena allocator, and this
|
||||||
// is testing, so yolo
|
// is testing, so yolo
|
||||||
awshttp.endpoint_override = self.request_options.test_server_runtime_uri;
|
awshttp.endpoint_override = self.request_options.test_server_runtime_uri;
|
||||||
if (awshttp.endpoint_override == null) return error.TestSetupStartFailure;
|
log.debug("endpoint override set to {?s}", .{awshttp.endpoint_override});
|
||||||
std.log.debug("endpoint override set to {?s}", .{awshttp.endpoint_override});
|
|
||||||
self.creds = aws_auth.Credentials.init(
|
self.creds = aws_auth.Credentials.init(
|
||||||
self.allocator,
|
self.allocator,
|
||||||
try self.allocator.dupe(u8, "ACCESS"),
|
try self.allocator.dupe(u8, "ACCESS"),
|
||||||
|
@ -1791,8 +1533,8 @@ const TestSetup = struct {
|
||||||
null,
|
null,
|
||||||
);
|
);
|
||||||
aws_creds.static_credentials = self.creds;
|
aws_creds.static_credentials = self.creds;
|
||||||
const client = Client.init(self.allocator, .{});
|
var client = Client.init(self.allocator, .{});
|
||||||
self.client = client;
|
self.client = &client;
|
||||||
return .{
|
return .{
|
||||||
.region = "us-west-2",
|
.region = "us-west-2",
|
||||||
.client = client,
|
.client = client,
|
||||||
|
@ -1801,27 +1543,6 @@ const TestSetup = struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn stop(self: *Self) void {
|
fn stop(self: *Self) void {
|
||||||
if (self.request_options.server_remaining_requests > 0)
|
|
||||||
if (test_error_log_enabled)
|
|
||||||
std.log.err(
|
|
||||||
"Test server has {d} request(s) remaining to issue! Draining",
|
|
||||||
.{self.request_options.server_remaining_requests},
|
|
||||||
)
|
|
||||||
else
|
|
||||||
std.log.info(
|
|
||||||
"Test server has {d} request(s) remaining to issue! Draining",
|
|
||||||
.{self.request_options.server_remaining_requests},
|
|
||||||
);
|
|
||||||
|
|
||||||
var rr = self.request_options.server_remaining_requests;
|
|
||||||
while (rr > 0) : (rr -= 1) {
|
|
||||||
std.log.debug("rr: {d}", .{self.request_options.server_remaining_requests});
|
|
||||||
// We need to drain all remaining requests, otherwise the server
|
|
||||||
// will hang indefinitely
|
|
||||||
var client = std.http.Client{ .allocator = self.allocator };
|
|
||||||
defer client.deinit();
|
|
||||||
_ = client.fetch(.{ .location = .{ .url = self.request_options.test_server_runtime_uri.? } }) catch unreachable;
|
|
||||||
}
|
|
||||||
self.server_thread.join();
|
self.server_thread.join();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1872,58 +1593,6 @@ test "query_no_input: sts getCallerIdentity comptime" {
|
||||||
try std.testing.expectEqualStrings("123456789012", call.response.account.?);
|
try std.testing.expectEqualStrings("123456789012", call.response.account.?);
|
||||||
try std.testing.expectEqualStrings("8f0d54da-1230-40f7-b4ac-95015c4b84cd", call.response_metadata.request_id);
|
try std.testing.expectEqualStrings("8f0d54da-1230-40f7-b4ac-95015c4b84cd", call.response_metadata.request_id);
|
||||||
}
|
}
|
||||||
test "query_with_input: iam getRole runtime" {
|
|
||||||
// sqs switched from query to json in aws sdk for go v2 commit f5a08768ef820ff5efd62a49ba50c61c9ca5dbcb
|
|
||||||
const allocator = std.testing.allocator;
|
|
||||||
var test_harness = TestSetup.init(.{
|
|
||||||
.allocator = allocator,
|
|
||||||
.server_response =
|
|
||||||
\\<GetRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
|
||||||
\\<GetRoleResult>
|
|
||||||
\\ <Role>
|
|
||||||
\\ <Path>/application_abc/component_xyz/</Path>
|
|
||||||
\\ <Arn>arn:aws:iam::123456789012:role/application_abc/component_xyz/S3Access</Arn>
|
|
||||||
\\ <RoleName>S3Access</RoleName>
|
|
||||||
\\ <AssumeRolePolicyDocument>
|
|
||||||
\\ {"Version":"2012-10-17","Statement":[{"Effect":"Allow",
|
|
||||||
\\ "Principal":{"Service":["ec2.amazonaws.com"]},"Action":["sts:AssumeRole"]}]}
|
|
||||||
\\ </AssumeRolePolicyDocument>
|
|
||||||
\\ <CreateDate>2012-05-08T23:34:01Z</CreateDate>
|
|
||||||
\\ <RoleId>AROADBQP57FF2AEXAMPLE</RoleId>
|
|
||||||
\\ <RoleLastUsed>
|
|
||||||
\\ <LastUsedDate>2019-11-20T17:09:20Z</LastUsedDate>
|
|
||||||
\\ <Region>us-east-1</Region>
|
|
||||||
\\ </RoleLastUsed>
|
|
||||||
\\ </Role>
|
|
||||||
\\</GetRoleResult>
|
|
||||||
\\<ResponseMetadata>
|
|
||||||
\\ <RequestId>df37e965-9967-11e1-a4c3-270EXAMPLE04</RequestId>
|
|
||||||
\\</ResponseMetadata>
|
|
||||||
\\</GetRoleResponse>
|
|
||||||
,
|
|
||||||
.server_response_headers = &.{
|
|
||||||
.{ .name = "Content-Type", .value = "text/xml" },
|
|
||||||
.{ .name = "x-amzn-RequestId", .value = "df37e965-9967-11e1-a4c3-270EXAMPLE04" },
|
|
||||||
},
|
|
||||||
});
|
|
||||||
defer test_harness.deinit();
|
|
||||||
const options = try test_harness.start();
|
|
||||||
const iam = (Services(.{.iam}){}).iam;
|
|
||||||
const call = try test_harness.client.call(iam.get_role.Request{
|
|
||||||
.role_name = "S3Access",
|
|
||||||
}, options);
|
|
||||||
defer call.deinit();
|
|
||||||
test_harness.stop();
|
|
||||||
// Request expectations
|
|
||||||
try std.testing.expectEqual(std.http.Method.POST, test_harness.request_options.request_method);
|
|
||||||
try std.testing.expectEqualStrings("/", test_harness.request_options.request_target);
|
|
||||||
try std.testing.expectEqualStrings(
|
|
||||||
\\Action=GetRole&Version=2010-05-08&RoleName=S3Access
|
|
||||||
, test_harness.request_options.request_body);
|
|
||||||
// Response expectations
|
|
||||||
try std.testing.expectEqualStrings("arn:aws:iam::123456789012:role/application_abc/component_xyz/S3Access", call.response.role.arn);
|
|
||||||
try std.testing.expectEqualStrings("df37e965-9967-11e1-a4c3-270EXAMPLE04", call.response_metadata.request_id);
|
|
||||||
}
|
|
||||||
test "query_with_input: sts getAccessKeyInfo runtime" {
|
test "query_with_input: sts getAccessKeyInfo runtime" {
|
||||||
// sqs switched from query to json in aws sdk for go v2 commit f5a08768ef820ff5efd62a49ba50c61c9ca5dbcb
|
// sqs switched from query to json in aws sdk for go v2 commit f5a08768ef820ff5efd62a49ba50c61c9ca5dbcb
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
|
@ -2181,7 +1850,7 @@ test "rest_json_1_work_with_lambda: lambda tagResource (only), to excercise zig
|
||||||
defer test_harness.deinit();
|
defer test_harness.deinit();
|
||||||
const options = try test_harness.start();
|
const options = try test_harness.start();
|
||||||
const lambda = (Services(.{.lambda}){}).lambda;
|
const lambda = (Services(.{.lambda}){}).lambda;
|
||||||
var tags = try std.ArrayList(@typeInfo(try typeForField(lambda.tag_resource.Request, "tags")).pointer.child).initCapacity(allocator, 1);
|
var tags = try std.ArrayList(@typeInfo(try typeForField(lambda.tag_resource.Request, "tags")).Pointer.child).initCapacity(allocator, 1);
|
||||||
defer tags.deinit();
|
defer tags.deinit();
|
||||||
tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" });
|
tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" });
|
||||||
const req = services.lambda.tag_resource.Request{ .resource = "arn:aws:lambda:us-west-2:550620852718:function:awsome-lambda-LambdaStackawsomeLambda", .tags = tags.items };
|
const req = services.lambda.tag_resource.Request{ .resource = "arn:aws:lambda:us-west-2:550620852718:function:awsome-lambda-LambdaStackawsomeLambda", .tags = tags.items };
|
||||||
|
@ -2192,6 +1861,7 @@ test "rest_json_1_work_with_lambda: lambda tagResource (only), to excercise zig
|
||||||
try std.testing.expectEqual(std.http.Method.POST, test_harness.request_options.request_method);
|
try std.testing.expectEqual(std.http.Method.POST, test_harness.request_options.request_method);
|
||||||
try std.testing.expectEqualStrings(
|
try std.testing.expectEqualStrings(
|
||||||
\\{
|
\\{
|
||||||
|
\\ "Resource": "arn:aws:lambda:us-west-2:550620852718:function:awsome-lambda-LambdaStackawsomeLambda",
|
||||||
\\ "Tags": {
|
\\ "Tags": {
|
||||||
\\ "Foo": "Bar"
|
\\ "Foo": "Bar"
|
||||||
\\ }
|
\\ }
|
||||||
|
@ -2202,45 +1872,6 @@ test "rest_json_1_work_with_lambda: lambda tagResource (only), to excercise zig
|
||||||
// Response expectations
|
// Response expectations
|
||||||
try std.testing.expectEqualStrings("a521e152-6e32-4e67-9fb3-abc94e34551b", call.response_metadata.request_id);
|
try std.testing.expectEqualStrings("a521e152-6e32-4e67-9fb3-abc94e34551b", call.response_metadata.request_id);
|
||||||
}
|
}
|
||||||
test "rest_json_1_url_parameters_not_in_request: lambda update_function_code" {
|
|
||||||
const allocator = std.testing.allocator;
|
|
||||||
var test_harness = TestSetup.init(.{
|
|
||||||
.allocator = allocator,
|
|
||||||
.server_response = "{\"CodeSize\": 42}",
|
|
||||||
.server_response_status = .ok,
|
|
||||||
.server_response_headers = &.{
|
|
||||||
.{ .name = "Content-Type", .value = "application/json" },
|
|
||||||
.{ .name = "x-amzn-RequestId", .value = "a521e152-6e32-4e67-9fb3-abc94e34551b" },
|
|
||||||
},
|
|
||||||
});
|
|
||||||
defer test_harness.deinit();
|
|
||||||
const options = try test_harness.start();
|
|
||||||
const lambda = (Services(.{.lambda}){}).lambda;
|
|
||||||
const architectures = [_][]const u8{"x86_64"};
|
|
||||||
const arches: [][]const u8 = @constCast(architectures[0..]);
|
|
||||||
const req = services.lambda.update_function_code.Request{
|
|
||||||
.function_name = "functionname",
|
|
||||||
.architectures = arches,
|
|
||||||
.zip_file = "zipfile",
|
|
||||||
};
|
|
||||||
const call = try Request(lambda.update_function_code).call(req, options);
|
|
||||||
defer call.deinit();
|
|
||||||
test_harness.stop();
|
|
||||||
// Request expectations
|
|
||||||
try std.testing.expectEqual(std.http.Method.PUT, test_harness.request_options.request_method);
|
|
||||||
try std.testing.expectEqualStrings(
|
|
||||||
\\{
|
|
||||||
\\ "ZipFile": "zipfile",
|
|
||||||
\\ "Architectures": [
|
|
||||||
\\ "x86_64"
|
|
||||||
\\ ]
|
|
||||||
\\}
|
|
||||||
, test_harness.request_options.request_body);
|
|
||||||
// Due to 17015, we see %253A instead of %3A
|
|
||||||
try std.testing.expectEqualStrings("/2015-03-31/functions/functionname/code", test_harness.request_options.request_target);
|
|
||||||
// Response expectations
|
|
||||||
try std.testing.expectEqualStrings("a521e152-6e32-4e67-9fb3-abc94e34551b", call.response_metadata.request_id);
|
|
||||||
}
|
|
||||||
test "ec2_query_no_input: EC2 describe regions" {
|
test "ec2_query_no_input: EC2 describe regions" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(.{
|
var test_harness = TestSetup.init(.{
|
||||||
|
@ -2273,6 +1904,7 @@ test "ec2_query_no_input: EC2 describe regions" {
|
||||||
// riscv64-linux also seems to have another problem with LLVM basically infinitely
|
// riscv64-linux also seems to have another problem with LLVM basically infinitely
|
||||||
// doing something. My guess is the @embedFile is freaking out LLVM
|
// doing something. My guess is the @embedFile is freaking out LLVM
|
||||||
test "ec2_query_with_input: EC2 describe instances" {
|
test "ec2_query_with_input: EC2 describe instances" {
|
||||||
|
if (builtin.cpu.arch == .x86_64 and builtin.os.tag == .windows) return error.SkipZigTest;
|
||||||
if (builtin.cpu.arch == .riscv64 and builtin.os.tag == .linux) return error.SkipZigTest;
|
if (builtin.cpu.arch == .riscv64 and builtin.os.tag == .linux) return error.SkipZigTest;
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(.{
|
var test_harness = TestSetup.init(.{
|
||||||
|
@ -2303,44 +1935,6 @@ test "ec2_query_with_input: EC2 describe instances" {
|
||||||
try std.testing.expectEqualStrings("i-0212d7d1f62b96676", call.response.reservations.?[1].instances.?[0].instance_id.?);
|
try std.testing.expectEqualStrings("i-0212d7d1f62b96676", call.response.reservations.?[1].instances.?[0].instance_id.?);
|
||||||
try std.testing.expectEqualStrings("123456789012:found-me", call.response.reservations.?[1].instances.?[0].tags.?[0].value.?);
|
try std.testing.expectEqualStrings("123456789012:found-me", call.response.reservations.?[1].instances.?[0].tags.?[0].value.?);
|
||||||
}
|
}
|
||||||
test "rest_xml_with_input_s3: S3 create bucket" {
|
|
||||||
const allocator = std.testing.allocator;
|
|
||||||
var test_harness = TestSetup.init(.{
|
|
||||||
.allocator = allocator,
|
|
||||||
.server_response =
|
|
||||||
\\
|
|
||||||
,
|
|
||||||
.server_response_headers = &.{ // I don't see content type coming back in actual S3 requests
|
|
||||||
.{ .name = "x-amzn-RequestId", .value = "9PEYBAZ9J7TPRX43" },
|
|
||||||
.{ .name = "x-amz-id-2", .value = "u7lzgW0tIyRP15vSUsVOXxJ37OfVCO8lZmLIVuqeq5EE4tNp9qebb5fy+/kendlZpR4YQE+y4Xg=" },
|
|
||||||
},
|
|
||||||
});
|
|
||||||
defer test_harness.deinit();
|
|
||||||
errdefer test_harness.creds.deinit();
|
|
||||||
const options = try test_harness.start();
|
|
||||||
const s3 = (Services(.{.s3}){}).s3;
|
|
||||||
const call = try test_harness.client.call(s3.create_bucket.Request{
|
|
||||||
.bucket = "",
|
|
||||||
.create_bucket_configuration = .{
|
|
||||||
.location_constraint = "us-west-2",
|
|
||||||
},
|
|
||||||
}, options);
|
|
||||||
defer call.deinit();
|
|
||||||
test_harness.stop();
|
|
||||||
// Request expectations
|
|
||||||
try std.testing.expectEqual(std.http.Method.PUT, test_harness.request_options.request_method);
|
|
||||||
try std.testing.expectEqualStrings("/", test_harness.request_options.request_target);
|
|
||||||
try std.testing.expectEqualStrings(
|
|
||||||
\\<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
|
||||||
\\ <LocationConstraint>us-west-2</LocationConstraint>
|
|
||||||
\\</CreateBucketConfiguration>
|
|
||||||
, test_harness.request_options.request_body);
|
|
||||||
// Response expectations
|
|
||||||
try std.testing.expectEqualStrings(
|
|
||||||
"9PEYBAZ9J7TPRX43, host_id: u7lzgW0tIyRP15vSUsVOXxJ37OfVCO8lZmLIVuqeq5EE4tNp9qebb5fy+/kendlZpR4YQE+y4Xg=",
|
|
||||||
call.response_metadata.request_id,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
test "rest_xml_no_input: S3 list buckets" {
|
test "rest_xml_no_input: S3 list buckets" {
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(.{
|
var test_harness = TestSetup.init(.{
|
||||||
|
@ -2399,9 +1993,6 @@ test "rest_xml_anything_but_s3: CloudFront list key groups" {
|
||||||
try std.testing.expectEqual(@as(i64, 100), call.response.key_group_list.?.max_items);
|
try std.testing.expectEqual(@as(i64, 100), call.response.key_group_list.?.max_items);
|
||||||
}
|
}
|
||||||
test "rest_xml_with_input: S3 put object" {
|
test "rest_xml_with_input: S3 put object" {
|
||||||
// const old = std.testing.log_level;
|
|
||||||
// defer std.testing.log_level = old;
|
|
||||||
// std.testing.log_level = .debug;
|
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
var test_harness = TestSetup.init(.{
|
var test_harness = TestSetup.init(.{
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
|
@ -2428,14 +2019,13 @@ test "rest_xml_with_input: S3 put object" {
|
||||||
.body = "bar",
|
.body = "bar",
|
||||||
.storage_class = "STANDARD",
|
.storage_class = "STANDARD",
|
||||||
}, s3opts);
|
}, s3opts);
|
||||||
defer result.deinit();
|
|
||||||
for (test_harness.request_options.request_headers) |header| {
|
for (test_harness.request_options.request_headers) |header| {
|
||||||
std.log.info("Request header: {s}: {s}", .{ header.name, header.value });
|
std.log.info("Request header: {s}: {s}", .{ header.name, header.value });
|
||||||
}
|
}
|
||||||
try test_harness.request_options.expectNoDuplicateHeaders();
|
|
||||||
std.log.info("PutObject Request id: {s}", .{result.response_metadata.request_id});
|
std.log.info("PutObject Request id: {s}", .{result.response_metadata.request_id});
|
||||||
std.log.info("PutObject etag: {s}", .{result.response.e_tag.?});
|
std.log.info("PutObject etag: {s}", .{result.response.e_tag.?});
|
||||||
//mysfitszj3t6webstack-hostingbucketa91a61fe-1ep3ezkgwpxr0.s3.us-west-2.amazonaws.com
|
//mysfitszj3t6webstack-hostingbucketa91a61fe-1ep3ezkgwpxr0.s3.us-west-2.amazonaws.com
|
||||||
|
defer result.deinit();
|
||||||
test_harness.stop();
|
test_harness.stop();
|
||||||
// Request expectations
|
// Request expectations
|
||||||
try std.testing.expectEqual(std.http.Method.PUT, test_harness.request_options.request_method);
|
try std.testing.expectEqual(std.http.Method.PUT, test_harness.request_options.request_method);
|
||||||
|
@ -2449,89 +2039,3 @@ test "rest_xml_with_input: S3 put object" {
|
||||||
try std.testing.expectEqualStrings("AES256", result.response.server_side_encryption.?);
|
try std.testing.expectEqualStrings("AES256", result.response.server_side_encryption.?);
|
||||||
try std.testing.expectEqualStrings("37b51d194a7513e45b56f6524f2d51f2", result.response.e_tag.?);
|
try std.testing.expectEqualStrings("37b51d194a7513e45b56f6524f2d51f2", result.response.e_tag.?);
|
||||||
}
|
}
|
||||||
test "raw ECR timestamps" {
|
|
||||||
// This is a way to test the json parsing. Ultimately the more robust tests
|
|
||||||
// should be preferred, but in this case we were tracking down an issue
|
|
||||||
// for which the root cause was the incorrect type being passed to the parse
|
|
||||||
// routine
|
|
||||||
const allocator = std.testing.allocator;
|
|
||||||
const ecr = (Services(.{.ecr}){}).ecr;
|
|
||||||
const options = json.ParseOptions{
|
|
||||||
.allocator = allocator,
|
|
||||||
.allow_camel_case_conversion = true, // new option
|
|
||||||
.allow_snake_case_conversion = true, // new option
|
|
||||||
.allow_unknown_fields = true, // new option. Cannot yet handle non-struct fields though
|
|
||||||
.allow_missing_fields = false, // new option. Cannot yet handle non-struct fields though
|
|
||||||
};
|
|
||||||
var stream = json.TokenStream.init(
|
|
||||||
\\{"authorizationData":[{"authorizationToken":"***","expiresAt":1.7385984915E9,"proxyEndpoint":"https://146325435496.dkr.ecr.us-west-2.amazonaws.com"}]}
|
|
||||||
);
|
|
||||||
const ptr = try json.parse(ecr.get_authorization_token.Response, &stream, options);
|
|
||||||
defer json.parseFree(ecr.get_authorization_token.Response, ptr, options);
|
|
||||||
}
|
|
||||||
test "json_1_1: ECR timestamps" {
|
|
||||||
// See: https://github.com/elerch/aws-sdk-for-zig/issues/5
|
|
||||||
// const old = std.testing.log_level;
|
|
||||||
// defer std.testing.log_level = old;
|
|
||||||
// std.testing.log_level = .debug;
|
|
||||||
const allocator = std.testing.allocator;
|
|
||||||
var test_harness = TestSetup.init(.{
|
|
||||||
.allocator = allocator,
|
|
||||||
.server_response =
|
|
||||||
\\{"authorizationData":[{"authorizationToken":"***","expiresAt":1.7385984915E9,"proxyEndpoint":"https://146325435496.dkr.ecr.us-west-2.amazonaws.com"}]}
|
|
||||||
// \\{"authorizationData":[{"authorizationToken":"***","expiresAt":1.738598491557E9,"proxyEndpoint":"https://146325435496.dkr.ecr.us-west-2.amazonaws.com"}]}
|
|
||||||
,
|
|
||||||
.server_response_headers = &.{
|
|
||||||
.{ .name = "Content-Type", .value = "application/json" },
|
|
||||||
.{ .name = "x-amzn-RequestId", .value = "QBI72OUIN8U9M9AG6PCSADJL4JVV4KQNSO5AEMVJF66Q9ASUAAJG" },
|
|
||||||
},
|
|
||||||
});
|
|
||||||
defer test_harness.deinit();
|
|
||||||
const options = try test_harness.start();
|
|
||||||
const ecr = (Services(.{.ecr}){}).ecr;
|
|
||||||
std.log.debug("Typeof response {}", .{@TypeOf(ecr.get_authorization_token.Response{})});
|
|
||||||
const call = try test_harness.client.call(ecr.get_authorization_token.Request{}, options);
|
|
||||||
defer call.deinit();
|
|
||||||
test_harness.stop();
|
|
||||||
// Request expectations
|
|
||||||
try std.testing.expectEqual(std.http.Method.POST, test_harness.request_options.request_method);
|
|
||||||
try std.testing.expectEqualStrings("/", test_harness.request_options.request_target);
|
|
||||||
try test_harness.request_options.expectHeader("X-Amz-Target", "AmazonEC2ContainerRegistry_V20150921.GetAuthorizationToken");
|
|
||||||
// Response expectations
|
|
||||||
try std.testing.expectEqualStrings("QBI72OUIN8U9M9AG6PCSADJL4JVV4KQNSO5AEMVJF66Q9ASUAAJG", call.response_metadata.request_id);
|
|
||||||
try std.testing.expectEqual(@as(usize, 1), call.response.authorization_data.?.len);
|
|
||||||
try std.testing.expectEqualStrings("***", call.response.authorization_data.?[0].authorization_token.?);
|
|
||||||
try std.testing.expectEqualStrings("https://146325435496.dkr.ecr.us-west-2.amazonaws.com", call.response.authorization_data.?[0].proxy_endpoint.?);
|
|
||||||
// try std.testing.expectEqual(@as(i64, 1.73859841557E9), call.response.authorization_data.?[0].expires_at.?);
|
|
||||||
try std.testing.expectEqual(@as(f128, 1.7385984915E9), call.response.authorization_data.?[0].expires_at.?);
|
|
||||||
}
|
|
||||||
var test_error_log_enabled = true;
|
|
||||||
test "test server timeout works" {
|
|
||||||
// const old = std.testing.log_level;
|
|
||||||
// defer std.testing.log_level = old;
|
|
||||||
// std.testing.log_level = .debug;
|
|
||||||
// defer std.testing.log_level = old;
|
|
||||||
// std.testing.log_level = .debug;
|
|
||||||
test_error_log_enabled = false;
|
|
||||||
defer test_error_log_enabled = true;
|
|
||||||
std.log.debug("test start", .{});
|
|
||||||
const allocator = std.testing.allocator;
|
|
||||||
var test_harness = TestSetup.init(.{
|
|
||||||
.allocator = allocator,
|
|
||||||
.server_response =
|
|
||||||
\\{}
|
|
||||||
,
|
|
||||||
.server_response_headers = &.{
|
|
||||||
.{ .name = "Content-Type", .value = "application/json" },
|
|
||||||
.{ .name = "x-amzn-RequestId", .value = "QBI72OUIN8U9M9AG6PCSADJL4JVV4KQNSO5AEMVJF66Q9ASUAAJG" },
|
|
||||||
},
|
|
||||||
});
|
|
||||||
defer test_harness.deinit();
|
|
||||||
defer test_harness.creds.deinit(); // Usually this gets done during the call,
|
|
||||||
// but we're purposely not making a call
|
|
||||||
// here, so we have to deinit() manually
|
|
||||||
_ = try test_harness.start();
|
|
||||||
std.log.debug("harness started", .{});
|
|
||||||
test_harness.stop();
|
|
||||||
std.log.debug("test complete", .{});
|
|
||||||
}
|
|
||||||
|
|
|
@ -11,56 +11,7 @@ const std = @import("std");
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
const auth = @import("aws_authentication.zig");
|
const auth = @import("aws_authentication.zig");
|
||||||
|
|
||||||
const scoped_log = std.log.scoped(.aws_credentials);
|
const log = std.log.scoped(.aws_credentials);
|
||||||
/// Specifies logging level. This should not be touched unless the normal
|
|
||||||
/// zig logging capabilities are inaccessible (e.g. during a build)
|
|
||||||
pub var log_level: std.log.Level = .debug;
|
|
||||||
|
|
||||||
/// Turn off logging completely
|
|
||||||
pub var logs_off: bool = false;
|
|
||||||
const log = struct {
|
|
||||||
/// Log an error message. This log level is intended to be used
|
|
||||||
/// when something has gone wrong. This might be recoverable or might
|
|
||||||
/// be followed by the program exiting.
|
|
||||||
pub fn err(
|
|
||||||
comptime format: []const u8,
|
|
||||||
args: anytype,
|
|
||||||
) void {
|
|
||||||
if (!logs_off and @intFromEnum(std.log.Level.err) <= @intFromEnum(log_level))
|
|
||||||
scoped_log.err(format, args);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log a warning message. This log level is intended to be used if
|
|
||||||
/// it is uncertain whether something has gone wrong or not, but the
|
|
||||||
/// circumstances would be worth investigating.
|
|
||||||
pub fn warn(
|
|
||||||
comptime format: []const u8,
|
|
||||||
args: anytype,
|
|
||||||
) void {
|
|
||||||
if (!logs_off and @intFromEnum(std.log.Level.warn) <= @intFromEnum(log_level))
|
|
||||||
scoped_log.warn(format, args);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log an info message. This log level is intended to be used for
|
|
||||||
/// general messages about the state of the program.
|
|
||||||
pub fn info(
|
|
||||||
comptime format: []const u8,
|
|
||||||
args: anytype,
|
|
||||||
) void {
|
|
||||||
if (!logs_off and @intFromEnum(std.log.Level.info) <= @intFromEnum(log_level))
|
|
||||||
scoped_log.info(format, args);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log a debug message. This log level is intended to be used for
|
|
||||||
/// messages which are only useful for debugging.
|
|
||||||
pub fn debug(
|
|
||||||
comptime format: []const u8,
|
|
||||||
args: anytype,
|
|
||||||
) void {
|
|
||||||
if (!logs_off and @intFromEnum(std.log.Level.debug) <= @intFromEnum(log_level))
|
|
||||||
scoped_log.debug(format, args);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const Profile = struct {
|
pub const Profile = struct {
|
||||||
/// Credential file. Defaults to AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials
|
/// Credential file. Defaults to AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials
|
||||||
|
|
|
@ -17,57 +17,7 @@ const CN_NORTHWEST_1_HASH = std.hash_map.hashString("cn-northwest-1");
|
||||||
const US_ISO_EAST_1_HASH = std.hash_map.hashString("us-iso-east-1");
|
const US_ISO_EAST_1_HASH = std.hash_map.hashString("us-iso-east-1");
|
||||||
const US_ISOB_EAST_1_HASH = std.hash_map.hashString("us-isob-east-1");
|
const US_ISOB_EAST_1_HASH = std.hash_map.hashString("us-isob-east-1");
|
||||||
|
|
||||||
const scoped_log = std.log.scoped(.awshttp);
|
const log = std.log.scoped(.awshttp);
|
||||||
|
|
||||||
/// Specifies logging level. This should not be touched unless the normal
|
|
||||||
/// zig logging capabilities are inaccessible (e.g. during a build)
|
|
||||||
pub var log_level: std.log.Level = .debug;
|
|
||||||
|
|
||||||
/// Turn off logging completely
|
|
||||||
pub var logs_off: bool = false;
|
|
||||||
const log = struct {
|
|
||||||
/// Log an error message. This log level is intended to be used
|
|
||||||
/// when something has gone wrong. This might be recoverable or might
|
|
||||||
/// be followed by the program exiting.
|
|
||||||
pub fn err(
|
|
||||||
comptime format: []const u8,
|
|
||||||
args: anytype,
|
|
||||||
) void {
|
|
||||||
if (!logs_off and @intFromEnum(std.log.Level.err) <= @intFromEnum(log_level))
|
|
||||||
scoped_log.err(format, args);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log a warning message. This log level is intended to be used if
|
|
||||||
/// it is uncertain whether something has gone wrong or not, but the
|
|
||||||
/// circumstances would be worth investigating.
|
|
||||||
pub fn warn(
|
|
||||||
comptime format: []const u8,
|
|
||||||
args: anytype,
|
|
||||||
) void {
|
|
||||||
if (!logs_off and @intFromEnum(std.log.Level.warn) <= @intFromEnum(log_level))
|
|
||||||
scoped_log.warn(format, args);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log an info message. This log level is intended to be used for
|
|
||||||
/// general messages about the state of the program.
|
|
||||||
pub fn info(
|
|
||||||
comptime format: []const u8,
|
|
||||||
args: anytype,
|
|
||||||
) void {
|
|
||||||
if (!logs_off and @intFromEnum(std.log.Level.info) <= @intFromEnum(log_level))
|
|
||||||
scoped_log.info(format, args);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log a debug message. This log level is intended to be used for
|
|
||||||
/// messages which are only useful for debugging.
|
|
||||||
pub fn debug(
|
|
||||||
comptime format: []const u8,
|
|
||||||
args: anytype,
|
|
||||||
) void {
|
|
||||||
if (!logs_off and @intFromEnum(std.log.Level.debug) <= @intFromEnum(log_level))
|
|
||||||
scoped_log.debug(format, args);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const AwsError = error{
|
pub const AwsError = error{
|
||||||
AddHeaderError,
|
AddHeaderError,
|
||||||
|
@ -240,16 +190,6 @@ pub const AwsHttp = struct {
|
||||||
.response_storage = .{ .dynamic = &resp_payload },
|
.response_storage = .{ .dynamic = &resp_payload },
|
||||||
.raw_uri = true,
|
.raw_uri = true,
|
||||||
.location = .{ .url = url },
|
.location = .{ .url = url },
|
||||||
// we need full control over most headers. I wish libraries would do a
|
|
||||||
// better job of having default headers as an opt-in...
|
|
||||||
.headers = .{
|
|
||||||
.host = .omit,
|
|
||||||
.authorization = .omit,
|
|
||||||
.user_agent = .omit,
|
|
||||||
.connection = .default, // we can let the client manage this...it has no impact to us
|
|
||||||
.accept_encoding = .default, // accept encoding (gzip, deflate) *should* be ok
|
|
||||||
.content_type = .omit,
|
|
||||||
},
|
|
||||||
.extra_headers = headers.items,
|
.extra_headers = headers.items,
|
||||||
});
|
});
|
||||||
// TODO: Need to test for payloads > 2^14. I believe one of our tests does this, but not sure
|
// TODO: Need to test for payloads > 2^14. I believe one of our tests does this, but not sure
|
||||||
|
@ -301,7 +241,6 @@ pub const AwsHttp = struct {
|
||||||
|
|
||||||
fn getRegion(service: []const u8, region: []const u8) []const u8 {
|
fn getRegion(service: []const u8, region: []const u8) []const u8 {
|
||||||
if (std.mem.eql(u8, service, "cloudfront")) return "us-east-1";
|
if (std.mem.eql(u8, service, "cloudfront")) return "us-east-1";
|
||||||
if (std.mem.eql(u8, service, "iam")) return "us-east-1";
|
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -389,26 +328,6 @@ fn endpointException(
|
||||||
dualstack: []const u8,
|
dualstack: []const u8,
|
||||||
domain: []const u8,
|
domain: []const u8,
|
||||||
) !?EndPoint {
|
) !?EndPoint {
|
||||||
// Global endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#global-endpoints):
|
|
||||||
// ✓ Amazon CloudFront
|
|
||||||
// AWS Global Accelerator
|
|
||||||
// ✓ AWS Identity and Access Management (IAM)
|
|
||||||
// AWS Network Manager
|
|
||||||
// AWS Organizations
|
|
||||||
// Amazon Route 53
|
|
||||||
// AWS Shield Advanced
|
|
||||||
// AWS WAF Classic
|
|
||||||
|
|
||||||
if (std.mem.eql(u8, service, "iam")) {
|
|
||||||
return EndPoint{
|
|
||||||
.uri = try allocator.dupe(u8, "https://iam.amazonaws.com"),
|
|
||||||
.host = try allocator.dupe(u8, "iam.amazonaws.com"),
|
|
||||||
.scheme = "https",
|
|
||||||
.port = 443,
|
|
||||||
.allocator = allocator,
|
|
||||||
.path = try allocator.dupe(u8, request.path),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
if (std.mem.eql(u8, service, "cloudfront")) {
|
if (std.mem.eql(u8, service, "cloudfront")) {
|
||||||
return EndPoint{
|
return EndPoint{
|
||||||
.uri = try allocator.dupe(u8, "https://cloudfront.amazonaws.com"),
|
.uri = try allocator.dupe(u8, "https://cloudfront.amazonaws.com"),
|
||||||
|
|
|
@ -22,7 +22,7 @@ pub const Result = struct {
|
||||||
self.allocator.free(h.value);
|
self.allocator.free(h.value);
|
||||||
}
|
}
|
||||||
self.allocator.free(self.headers);
|
self.allocator.free(self.headers);
|
||||||
//log.debug("http result deinit complete", .{});
|
log.debug("http result deinit complete", .{});
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -3,57 +3,8 @@ const base = @import("aws_http_base.zig");
|
||||||
const auth = @import("aws_authentication.zig");
|
const auth = @import("aws_authentication.zig");
|
||||||
const date = @import("date.zig");
|
const date = @import("date.zig");
|
||||||
|
|
||||||
const scoped_log = std.log.scoped(.aws_signing);
|
const log = std.log.scoped(.aws_signing);
|
||||||
|
|
||||||
/// Specifies logging level. This should not be touched unless the normal
|
|
||||||
/// zig logging capabilities are inaccessible (e.g. during a build)
|
|
||||||
pub var log_level: std.log.Level = .debug;
|
|
||||||
|
|
||||||
/// Turn off logging completely
|
|
||||||
pub var logs_off: bool = false;
|
|
||||||
const log = struct {
|
|
||||||
/// Log an error message. This log level is intended to be used
|
|
||||||
/// when something has gone wrong. This might be recoverable or might
|
|
||||||
/// be followed by the program exiting.
|
|
||||||
pub fn err(
|
|
||||||
comptime format: []const u8,
|
|
||||||
args: anytype,
|
|
||||||
) void {
|
|
||||||
if (!logs_off and @intFromEnum(std.log.Level.err) <= @intFromEnum(log_level))
|
|
||||||
scoped_log.err(format, args);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log a warning message. This log level is intended to be used if
|
|
||||||
/// it is uncertain whether something has gone wrong or not, but the
|
|
||||||
/// circumstances would be worth investigating.
|
|
||||||
pub fn warn(
|
|
||||||
comptime format: []const u8,
|
|
||||||
args: anytype,
|
|
||||||
) void {
|
|
||||||
if (!logs_off and @intFromEnum(std.log.Level.warn) <= @intFromEnum(log_level))
|
|
||||||
scoped_log.warn(format, args);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log an info message. This log level is intended to be used for
|
|
||||||
/// general messages about the state of the program.
|
|
||||||
pub fn info(
|
|
||||||
comptime format: []const u8,
|
|
||||||
args: anytype,
|
|
||||||
) void {
|
|
||||||
if (!logs_off and @intFromEnum(std.log.Level.info) <= @intFromEnum(log_level))
|
|
||||||
scoped_log.info(format, args);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log a debug message. This log level is intended to be used for
|
|
||||||
/// messages which are only useful for debugging.
|
|
||||||
pub fn debug(
|
|
||||||
comptime format: []const u8,
|
|
||||||
args: anytype,
|
|
||||||
) void {
|
|
||||||
if (!logs_off and @intFromEnum(std.log.Level.debug) <= @intFromEnum(log_level))
|
|
||||||
scoped_log.debug(format, args);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
// TODO: Remove this?! This is an aws_signing, so we should know a thing
|
// TODO: Remove this?! This is an aws_signing, so we should know a thing
|
||||||
// or two about aws. So perhaps the right level of abstraction here
|
// or two about aws. So perhaps the right level of abstraction here
|
||||||
// is to have our service signing idiosyncracies dealt with in this
|
// is to have our service signing idiosyncracies dealt with in this
|
||||||
|
@ -441,7 +392,7 @@ fn verifyParsedAuthorization(
|
||||||
// Credential=ACCESS/20230908/us-west-2/s3/aws4_request
|
// Credential=ACCESS/20230908/us-west-2/s3/aws4_request
|
||||||
// SignedHeaders=accept;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class
|
// SignedHeaders=accept;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class
|
||||||
// Signature=fcc43ce73a34c9bd1ddf17e8a435f46a859812822f944f9eeb2aabcd64b03523
|
// Signature=fcc43ce73a34c9bd1ddf17e8a435f46a859812822f944f9eeb2aabcd64b03523
|
||||||
var credential_iterator = std.mem.splitScalar(u8, credential, '/');
|
var credential_iterator = std.mem.split(u8, credential, "/");
|
||||||
const access_key = credential_iterator.next().?;
|
const access_key = credential_iterator.next().?;
|
||||||
const credentials = credentials_fn(access_key) orelse return error.CredentialsNotFound;
|
const credentials = credentials_fn(access_key) orelse return error.CredentialsNotFound;
|
||||||
// TODO: https://stackoverflow.com/questions/29276609/aws-authentication-requires-a-valid-date-or-x-amz-date-header-curl
|
// TODO: https://stackoverflow.com/questions/29276609/aws-authentication-requires-a-valid-date-or-x-amz-date-header-curl
|
||||||
|
@ -662,12 +613,12 @@ fn canonicalUri(allocator: std.mem.Allocator, path: []const u8, double_encode: b
|
||||||
}
|
}
|
||||||
defer allocator.free(encoded_once);
|
defer allocator.free(encoded_once);
|
||||||
var encoded_twice = try encodeUri(allocator, encoded_once);
|
var encoded_twice = try encodeUri(allocator, encoded_once);
|
||||||
defer allocator.free(encoded_twice);
|
|
||||||
log.debug("encoded path (2): {s}", .{encoded_twice});
|
log.debug("encoded path (2): {s}", .{encoded_twice});
|
||||||
if (std.mem.lastIndexOf(u8, encoded_twice, "?")) |i| {
|
if (std.mem.lastIndexOf(u8, encoded_twice, "?")) |i| {
|
||||||
return try allocator.dupe(u8, encoded_twice[0..i]);
|
_ = allocator.resize(encoded_twice, i);
|
||||||
|
return encoded_twice[0..i];
|
||||||
}
|
}
|
||||||
return try allocator.dupe(u8, encoded_twice);
|
return encoded_twice;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn encodeParamPart(allocator: std.mem.Allocator, path: []const u8) ![]const u8 {
|
fn encodeParamPart(allocator: std.mem.Allocator, path: []const u8) ![]const u8 {
|
||||||
|
@ -799,7 +750,7 @@ fn canonicalQueryString(allocator: std.mem.Allocator, path: []const u8) ![]const
|
||||||
const query = path[first_question.? + 1 ..];
|
const query = path[first_question.? + 1 ..];
|
||||||
|
|
||||||
// Split this by component
|
// Split this by component
|
||||||
var portions = std.mem.splitScalar(u8, query, '&');
|
var portions = std.mem.split(u8, query, "&");
|
||||||
var sort_me = std.ArrayList([]const u8).init(allocator);
|
var sort_me = std.ArrayList([]const u8).init(allocator);
|
||||||
defer sort_me.deinit();
|
defer sort_me.deinit();
|
||||||
while (portions.next()) |item|
|
while (portions.next()) |item|
|
||||||
|
@ -936,7 +887,6 @@ fn canonicalHeaderValue(allocator: std.mem.Allocator, value: []const u8) ![]cons
|
||||||
const in_quote = false;
|
const in_quote = false;
|
||||||
var start: usize = 0;
|
var start: usize = 0;
|
||||||
const rc = try allocator.alloc(u8, value.len);
|
const rc = try allocator.alloc(u8, value.len);
|
||||||
defer allocator.free(rc);
|
|
||||||
var rc_inx: usize = 0;
|
var rc_inx: usize = 0;
|
||||||
for (value, 0..) |c, i| {
|
for (value, 0..) |c, i| {
|
||||||
if (!started and !std.ascii.isWhitespace(c)) {
|
if (!started and !std.ascii.isWhitespace(c)) {
|
||||||
|
@ -954,7 +904,8 @@ fn canonicalHeaderValue(allocator: std.mem.Allocator, value: []const u8) ![]cons
|
||||||
// Trim end
|
// Trim end
|
||||||
while (std.ascii.isWhitespace(rc[rc_inx - 1]))
|
while (std.ascii.isWhitespace(rc[rc_inx - 1]))
|
||||||
rc_inx -= 1;
|
rc_inx -= 1;
|
||||||
return try allocator.dupe(u8, rc[0..rc_inx]);
|
_ = allocator.resize(rc, rc_inx);
|
||||||
|
return rc[0..rc_inx];
|
||||||
}
|
}
|
||||||
fn lessThan(context: void, lhs: std.http.Header, rhs: std.http.Header) bool {
|
fn lessThan(context: void, lhs: std.http.Header, rhs: std.http.Header) bool {
|
||||||
_ = context;
|
_ = context;
|
||||||
|
@ -986,7 +937,6 @@ test "canonical uri" {
|
||||||
const path = "/documents and settings/?foo=bar";
|
const path = "/documents and settings/?foo=bar";
|
||||||
const expected = "/documents%2520and%2520settings/";
|
const expected = "/documents%2520and%2520settings/";
|
||||||
const actual = try canonicalUri(allocator, path, true);
|
const actual = try canonicalUri(allocator, path, true);
|
||||||
|
|
||||||
defer allocator.free(actual);
|
defer allocator.free(actual);
|
||||||
try std.testing.expectEqualStrings(expected, actual);
|
try std.testing.expectEqualStrings(expected, actual);
|
||||||
|
|
||||||
|
|
147
src/json.zig
147
src/json.zig
|
@ -1560,21 +1560,21 @@ fn skipValue(tokens: *TokenStream) SkipValueError!void {
|
||||||
|
|
||||||
fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options: ParseOptions) !T {
|
fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options: ParseOptions) !T {
|
||||||
switch (@typeInfo(T)) {
|
switch (@typeInfo(T)) {
|
||||||
.bool => {
|
.Bool => {
|
||||||
return switch (token) {
|
return switch (token) {
|
||||||
.True => true,
|
.True => true,
|
||||||
.False => false,
|
.False => false,
|
||||||
else => error.UnexpectedToken,
|
else => error.UnexpectedToken,
|
||||||
};
|
};
|
||||||
},
|
},
|
||||||
.float, .comptime_float => {
|
.Float, .ComptimeFloat => {
|
||||||
const numberToken = switch (token) {
|
const numberToken = switch (token) {
|
||||||
.Number => |n| n,
|
.Number => |n| n,
|
||||||
else => return error.UnexpectedToken,
|
else => return error.UnexpectedToken,
|
||||||
};
|
};
|
||||||
return try std.fmt.parseFloat(T, numberToken.slice(tokens.slice, tokens.i - 1));
|
return try std.fmt.parseFloat(T, numberToken.slice(tokens.slice, tokens.i - 1));
|
||||||
},
|
},
|
||||||
.int, .comptime_int => {
|
.Int, .ComptimeInt => {
|
||||||
const numberToken = switch (token) {
|
const numberToken = switch (token) {
|
||||||
.Number => |n| n,
|
.Number => |n| n,
|
||||||
else => return error.UnexpectedToken,
|
else => return error.UnexpectedToken,
|
||||||
|
@ -1587,14 +1587,14 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
if (std.math.round(float) != float) return error.InvalidNumber;
|
if (std.math.round(float) != float) return error.InvalidNumber;
|
||||||
return @as(T, @intFromFloat(float));
|
return @as(T, @intFromFloat(float));
|
||||||
},
|
},
|
||||||
.optional => |optionalInfo| {
|
.Optional => |optionalInfo| {
|
||||||
if (token == .Null) {
|
if (token == .Null) {
|
||||||
return null;
|
return null;
|
||||||
} else {
|
} else {
|
||||||
return try parseInternal(optionalInfo.child, token, tokens, options);
|
return try parseInternal(optionalInfo.child, token, tokens, options);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.@"enum" => |enumInfo| {
|
.Enum => |enumInfo| {
|
||||||
switch (token) {
|
switch (token) {
|
||||||
.Number => |numberToken| {
|
.Number => |numberToken| {
|
||||||
if (!numberToken.is_integer) return error.UnexpectedToken;
|
if (!numberToken.is_integer) return error.UnexpectedToken;
|
||||||
|
@ -1618,7 +1618,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
else => return error.UnexpectedToken,
|
else => return error.UnexpectedToken,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.@"union" => |unionInfo| {
|
.Union => |unionInfo| {
|
||||||
if (unionInfo.tag_type) |_| {
|
if (unionInfo.tag_type) |_| {
|
||||||
// try each of the union fields until we find one that matches
|
// try each of the union fields until we find one that matches
|
||||||
inline for (unionInfo.fields) |u_field| {
|
inline for (unionInfo.fields) |u_field| {
|
||||||
|
@ -1642,7 +1642,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
@compileError("Unable to parse into untagged union '" ++ @typeName(T) ++ "'");
|
@compileError("Unable to parse into untagged union '" ++ @typeName(T) ++ "'");
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.@"struct" => |structInfo| {
|
.Struct => |structInfo| {
|
||||||
switch (token) {
|
switch (token) {
|
||||||
.ObjectBegin => {},
|
.ObjectBegin => {},
|
||||||
else => return error.UnexpectedToken,
|
else => return error.UnexpectedToken,
|
||||||
|
@ -1723,7 +1723,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
}
|
}
|
||||||
inline for (structInfo.fields, 0..) |field, i| {
|
inline for (structInfo.fields, 0..) |field, i| {
|
||||||
if (!fields_seen[i]) {
|
if (!fields_seen[i]) {
|
||||||
if (field.default_value_ptr) |default_value_ptr| {
|
if (field.default_value) |default_value_ptr| {
|
||||||
if (!field.is_comptime) {
|
if (!field.is_comptime) {
|
||||||
const default_value = @as(*align(1) const field.type, @ptrCast(default_value_ptr)).*;
|
const default_value = @as(*align(1) const field.type, @ptrCast(default_value_ptr)).*;
|
||||||
@field(r, field.name) = default_value;
|
@field(r, field.name) = default_value;
|
||||||
|
@ -1736,7 +1736,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
}
|
}
|
||||||
return r;
|
return r;
|
||||||
},
|
},
|
||||||
.array => |arrayInfo| {
|
.Array => |arrayInfo| {
|
||||||
switch (token) {
|
switch (token) {
|
||||||
.ArrayBegin => {
|
.ArrayBegin => {
|
||||||
var r: T = undefined;
|
var r: T = undefined;
|
||||||
|
@ -1770,21 +1770,21 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
else => return error.UnexpectedToken,
|
else => return error.UnexpectedToken,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.pointer => |ptrInfo| {
|
.Pointer => |ptrInfo| {
|
||||||
const allocator = options.allocator orelse return error.AllocatorRequired;
|
const allocator = options.allocator orelse return error.AllocatorRequired;
|
||||||
switch (ptrInfo.size) {
|
switch (ptrInfo.size) {
|
||||||
.one => {
|
.One => {
|
||||||
const r: T = try allocator.create(ptrInfo.child);
|
const r: T = try allocator.create(ptrInfo.child);
|
||||||
errdefer allocator.destroy(r);
|
errdefer allocator.destroy(r);
|
||||||
r.* = try parseInternal(ptrInfo.child, token, tokens, options);
|
r.* = try parseInternal(ptrInfo.child, token, tokens, options);
|
||||||
return r;
|
return r;
|
||||||
},
|
},
|
||||||
.slice => {
|
.Slice => {
|
||||||
switch (token) {
|
switch (token) {
|
||||||
.ArrayBegin => {
|
.ArrayBegin => {
|
||||||
var arraylist = std.ArrayList(ptrInfo.child).init(allocator);
|
var arraylist = std.ArrayList(ptrInfo.child).init(allocator);
|
||||||
errdefer {
|
errdefer {
|
||||||
while (arraylist.pop()) |v| {
|
while (arraylist.popOrNull()) |v| {
|
||||||
parseFree(ptrInfo.child, v, options);
|
parseFree(ptrInfo.child, v, options);
|
||||||
}
|
}
|
||||||
arraylist.deinit();
|
arraylist.deinit();
|
||||||
|
@ -1829,7 +1829,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
if (value_type == null) return error.UnexpectedToken;
|
if (value_type == null) return error.UnexpectedToken;
|
||||||
var arraylist = std.ArrayList(ptrInfo.child).init(allocator);
|
var arraylist = std.ArrayList(ptrInfo.child).init(allocator);
|
||||||
errdefer {
|
errdefer {
|
||||||
while (arraylist.pop()) |v| {
|
while (arraylist.popOrNull()) |v| {
|
||||||
parseFree(ptrInfo.child, v, options);
|
parseFree(ptrInfo.child, v, options);
|
||||||
}
|
}
|
||||||
arraylist.deinit();
|
arraylist.deinit();
|
||||||
|
@ -1863,8 +1863,8 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
fn typeForField(comptime T: type, comptime field_name: []const u8) ?type {
|
fn typeForField(comptime T: type, comptime field_name: []const u8) ?type {
|
||||||
const ti = @typeInfo(T);
|
const ti = @typeInfo(T);
|
||||||
switch (ti) {
|
switch (ti) {
|
||||||
.@"struct" => {
|
.Struct => {
|
||||||
inline for (ti.@"struct".fields) |field| {
|
inline for (ti.Struct.fields) |field| {
|
||||||
if (std.mem.eql(u8, field.name, field_name))
|
if (std.mem.eql(u8, field.name, field_name))
|
||||||
return field.type;
|
return field.type;
|
||||||
}
|
}
|
||||||
|
@ -1878,14 +1878,14 @@ fn isMapPattern(comptime T: type) bool {
|
||||||
// We should be getting a type that is a pointer to a slice.
|
// We should be getting a type that is a pointer to a slice.
|
||||||
// Let's just double check before proceeding
|
// Let's just double check before proceeding
|
||||||
const ti = @typeInfo(T);
|
const ti = @typeInfo(T);
|
||||||
if (ti != .pointer) return false;
|
if (ti != .Pointer) return false;
|
||||||
if (ti.pointer.size != .slice) return false;
|
if (ti.Pointer.size != .Slice) return false;
|
||||||
const ti_child = @typeInfo(ti.pointer.child);
|
const ti_child = @typeInfo(ti.Pointer.child);
|
||||||
if (ti_child != .@"struct") return false;
|
if (ti_child != .Struct) return false;
|
||||||
if (ti_child.@"struct".fields.len != 2) return false;
|
if (ti_child.Struct.fields.len != 2) return false;
|
||||||
var key_found = false;
|
var key_found = false;
|
||||||
var value_found = false;
|
var value_found = false;
|
||||||
inline for (ti_child.@"struct".fields) |field| {
|
inline for (ti_child.Struct.fields) |field| {
|
||||||
if (std.mem.eql(u8, "key", field.name))
|
if (std.mem.eql(u8, "key", field.name))
|
||||||
key_found = true;
|
key_found = true;
|
||||||
if (std.mem.eql(u8, "value", field.name))
|
if (std.mem.eql(u8, "value", field.name))
|
||||||
|
@ -1895,7 +1895,6 @@ fn isMapPattern(comptime T: type) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse(comptime T: type, tokens: *TokenStream, options: ParseOptions) !T {
|
pub fn parse(comptime T: type, tokens: *TokenStream, options: ParseOptions) !T {
|
||||||
// std.log.debug("parsing {s} into type {s}", .{ tokens.slice, @typeName(T) });
|
|
||||||
const token = (try tokens.next()) orelse return error.UnexpectedEndOfJson;
|
const token = (try tokens.next()) orelse return error.UnexpectedEndOfJson;
|
||||||
return parseInternal(T, token, tokens, options);
|
return parseInternal(T, token, tokens, options);
|
||||||
}
|
}
|
||||||
|
@ -1904,13 +1903,13 @@ pub fn parse(comptime T: type, tokens: *TokenStream, options: ParseOptions) !T {
|
||||||
/// Should be called with the same type and `ParseOptions` that were passed to `parse`
|
/// Should be called with the same type and `ParseOptions` that were passed to `parse`
|
||||||
pub fn parseFree(comptime T: type, value: T, options: ParseOptions) void {
|
pub fn parseFree(comptime T: type, value: T, options: ParseOptions) void {
|
||||||
switch (@typeInfo(T)) {
|
switch (@typeInfo(T)) {
|
||||||
.bool, .float, .comptime_float, .int, .comptime_int, .@"enum" => {},
|
.Bool, .Float, .ComptimeFloat, .Int, .ComptimeInt, .Enum => {},
|
||||||
.optional => {
|
.Optional => {
|
||||||
if (value) |v| {
|
if (value) |v| {
|
||||||
return parseFree(@TypeOf(v), v, options);
|
return parseFree(@TypeOf(v), v, options);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.@"union" => |unionInfo| {
|
.Union => |unionInfo| {
|
||||||
if (unionInfo.tag_type) |UnionTagType| {
|
if (unionInfo.tag_type) |UnionTagType| {
|
||||||
inline for (unionInfo.fields) |u_field| {
|
inline for (unionInfo.fields) |u_field| {
|
||||||
if (value == @field(UnionTagType, u_field.name)) {
|
if (value == @field(UnionTagType, u_field.name)) {
|
||||||
|
@ -1922,24 +1921,24 @@ pub fn parseFree(comptime T: type, value: T, options: ParseOptions) void {
|
||||||
unreachable;
|
unreachable;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.@"struct" => |structInfo| {
|
.Struct => |structInfo| {
|
||||||
inline for (structInfo.fields) |field| {
|
inline for (structInfo.fields) |field| {
|
||||||
parseFree(field.type, @field(value, field.name), options);
|
parseFree(field.type, @field(value, field.name), options);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.array => |arrayInfo| {
|
.Array => |arrayInfo| {
|
||||||
for (value) |v| {
|
for (value) |v| {
|
||||||
parseFree(arrayInfo.child, v, options);
|
parseFree(arrayInfo.child, v, options);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.pointer => |ptrInfo| {
|
.Pointer => |ptrInfo| {
|
||||||
const allocator = options.allocator orelse unreachable;
|
const allocator = options.allocator orelse unreachable;
|
||||||
switch (ptrInfo.size) {
|
switch (ptrInfo.size) {
|
||||||
.one => {
|
.One => {
|
||||||
parseFree(ptrInfo.child, value.*, options);
|
parseFree(ptrInfo.child, value.*, options);
|
||||||
allocator.destroy(value);
|
allocator.destroy(value);
|
||||||
},
|
},
|
||||||
.slice => {
|
.Slice => {
|
||||||
for (value) |v| {
|
for (value) |v| {
|
||||||
parseFree(ptrInfo.child, v, options);
|
parseFree(ptrInfo.child, v, options);
|
||||||
}
|
}
|
||||||
|
@ -2284,7 +2283,7 @@ pub const Parser = struct {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
var value = p.stack.pop().?;
|
var value = p.stack.pop();
|
||||||
try p.pushToParent(&value);
|
try p.pushToParent(&value);
|
||||||
},
|
},
|
||||||
.String => |s| {
|
.String => |s| {
|
||||||
|
@ -2350,7 +2349,7 @@ pub const Parser = struct {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
var value = p.stack.pop().?;
|
var value = p.stack.pop();
|
||||||
try p.pushToParent(&value);
|
try p.pushToParent(&value);
|
||||||
},
|
},
|
||||||
.ObjectBegin => {
|
.ObjectBegin => {
|
||||||
|
@ -2511,7 +2510,7 @@ pub fn unescapeValidString(output: []u8, input: []const u8) !void {
|
||||||
mem.nativeToLittle(u16, firstCodeUnit),
|
mem.nativeToLittle(u16, firstCodeUnit),
|
||||||
mem.nativeToLittle(u16, secondCodeUnit),
|
mem.nativeToLittle(u16, secondCodeUnit),
|
||||||
};
|
};
|
||||||
if (std.unicode.utf16LeToUtf8(output[outIndex..], &utf16le_seq)) |byteCount| {
|
if (std.unicode.utf16leToUtf8(output[outIndex..], &utf16le_seq)) |byteCount| {
|
||||||
outIndex += byteCount;
|
outIndex += byteCount;
|
||||||
inIndex += 12;
|
inIndex += 12;
|
||||||
} else |_| {
|
} else |_| {
|
||||||
|
@ -2757,10 +2756,6 @@ pub const StringifyOptions = struct {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
emit_null: bool = true,
|
|
||||||
|
|
||||||
exclude_fields: ?[][]const u8 = null,
|
|
||||||
|
|
||||||
/// Controls the whitespace emitted
|
/// Controls the whitespace emitted
|
||||||
whitespace: ?Whitespace = null,
|
whitespace: ?Whitespace = null,
|
||||||
|
|
||||||
|
@ -2812,38 +2807,38 @@ pub fn stringify(
|
||||||
) !void {
|
) !void {
|
||||||
const T = @TypeOf(value);
|
const T = @TypeOf(value);
|
||||||
switch (@typeInfo(T)) {
|
switch (@typeInfo(T)) {
|
||||||
.float, .comptime_float => {
|
.Float, .ComptimeFloat => {
|
||||||
return std.fmt.format(out_stream, "{e}", .{value});
|
return std.fmt.format(out_stream, "{e}", .{value});
|
||||||
},
|
},
|
||||||
.int, .comptime_int => {
|
.Int, .ComptimeInt => {
|
||||||
return std.fmt.formatIntValue(value, "", std.fmt.FormatOptions{}, out_stream);
|
return std.fmt.formatIntValue(value, "", std.fmt.FormatOptions{}, out_stream);
|
||||||
},
|
},
|
||||||
.bool => {
|
.Bool => {
|
||||||
return out_stream.writeAll(if (value) "true" else "false");
|
return out_stream.writeAll(if (value) "true" else "false");
|
||||||
},
|
},
|
||||||
.null => {
|
.Null => {
|
||||||
return out_stream.writeAll("null");
|
return out_stream.writeAll("null");
|
||||||
},
|
},
|
||||||
.optional => {
|
.Optional => {
|
||||||
if (value) |payload| {
|
if (value) |payload| {
|
||||||
return try stringify(payload, options, out_stream);
|
return try stringify(payload, options, out_stream);
|
||||||
} else {
|
} else {
|
||||||
return try stringify(null, options, out_stream);
|
return try stringify(null, options, out_stream);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.@"enum" => {
|
.Enum => {
|
||||||
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
||||||
return value.jsonStringify(options, out_stream);
|
return value.jsonStringify(options, out_stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
@compileError("Unable to stringify enum '" ++ @typeName(T) ++ "'");
|
@compileError("Unable to stringify enum '" ++ @typeName(T) ++ "'");
|
||||||
},
|
},
|
||||||
.@"union" => {
|
.Union => {
|
||||||
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
||||||
return value.jsonStringify(options, out_stream);
|
return value.jsonStringify(options, out_stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
const info = @typeInfo(T).@"union";
|
const info = @typeInfo(T).Union;
|
||||||
if (info.tag_type) |UnionTagType| {
|
if (info.tag_type) |UnionTagType| {
|
||||||
inline for (info.fields) |u_field| {
|
inline for (info.fields) |u_field| {
|
||||||
if (value == @field(UnionTagType, u_field.name)) {
|
if (value == @field(UnionTagType, u_field.name)) {
|
||||||
|
@ -2854,13 +2849,13 @@ pub fn stringify(
|
||||||
@compileError("Unable to stringify untagged union '" ++ @typeName(T) ++ "'");
|
@compileError("Unable to stringify untagged union '" ++ @typeName(T) ++ "'");
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.@"struct" => |S| {
|
.Struct => |S| {
|
||||||
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
if (comptime std.meta.hasFn(T, "jsonStringify")) {
|
||||||
return value.jsonStringify(options, out_stream);
|
return value.jsonStringify(options, out_stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
try out_stream.writeByte('{');
|
try out_stream.writeByte('{');
|
||||||
var field_output = false;
|
comptime var field_output = false;
|
||||||
var child_options = options;
|
var child_options = options;
|
||||||
if (child_options.whitespace) |*child_whitespace| {
|
if (child_options.whitespace) |*child_whitespace| {
|
||||||
child_whitespace.indent_level += 1;
|
child_whitespace.indent_level += 1;
|
||||||
|
@ -2869,46 +2864,34 @@ pub fn stringify(
|
||||||
// don't include void fields
|
// don't include void fields
|
||||||
if (Field.type == void) continue;
|
if (Field.type == void) continue;
|
||||||
|
|
||||||
var output_this_field = true;
|
|
||||||
if (!options.emit_null and @typeInfo(Field.type) == .optional and @field(value, Field.name) == null) output_this_field = false;
|
|
||||||
|
|
||||||
const final_name = if (comptime std.meta.hasFn(T, "fieldNameFor"))
|
|
||||||
value.fieldNameFor(Field.name)
|
|
||||||
else
|
|
||||||
Field.name;
|
|
||||||
if (options.exclude_fields) |exclude_fields| {
|
|
||||||
for (exclude_fields) |exclude_field| {
|
|
||||||
if (std.mem.eql(u8, final_name, exclude_field)) {
|
|
||||||
output_this_field = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!field_output) {
|
if (!field_output) {
|
||||||
field_output = output_this_field;
|
field_output = true;
|
||||||
} else {
|
} else {
|
||||||
if (output_this_field) try out_stream.writeByte(',');
|
try out_stream.writeByte(',');
|
||||||
}
|
}
|
||||||
if (child_options.whitespace) |child_whitespace| {
|
if (child_options.whitespace) |child_whitespace| {
|
||||||
if (output_this_field) try out_stream.writeByte('\n');
|
try out_stream.writeByte('\n');
|
||||||
if (output_this_field) try child_whitespace.outputIndent(out_stream);
|
try child_whitespace.outputIndent(out_stream);
|
||||||
}
|
}
|
||||||
var field_written = false;
|
var field_written = false;
|
||||||
if (comptime std.meta.hasFn(T, "jsonStringifyField")) {
|
if (comptime std.meta.hasFn(T, "jsonStringifyField"))
|
||||||
if (output_this_field) field_written = try value.jsonStringifyField(Field.name, child_options, out_stream);
|
field_written = try value.jsonStringifyField(Field.name, child_options, out_stream);
|
||||||
}
|
|
||||||
|
|
||||||
if (!field_written) {
|
if (!field_written) {
|
||||||
if (output_this_field) {
|
if (comptime std.meta.hasFn(T, "fieldNameFor")) {
|
||||||
try stringify(final_name, options, out_stream);
|
const name = value.fieldNameFor(Field.name);
|
||||||
try out_stream.writeByte(':');
|
try stringify(name, options, out_stream);
|
||||||
|
} else {
|
||||||
|
try stringify(Field.name, options, out_stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
try out_stream.writeByte(':');
|
||||||
if (child_options.whitespace) |child_whitespace| {
|
if (child_options.whitespace) |child_whitespace| {
|
||||||
if (child_whitespace.separator) {
|
if (child_whitespace.separator) {
|
||||||
if (output_this_field) try out_stream.writeByte(' ');
|
try out_stream.writeByte(' ');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (output_this_field) try stringify(@field(value, Field.name), child_options, out_stream);
|
try stringify(@field(value, Field.name), child_options, out_stream);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (field_output) {
|
if (field_output) {
|
||||||
|
@ -2920,10 +2903,10 @@ pub fn stringify(
|
||||||
try out_stream.writeByte('}');
|
try out_stream.writeByte('}');
|
||||||
return;
|
return;
|
||||||
},
|
},
|
||||||
.error_set => return stringify(@as([]const u8, @errorName(value)), options, out_stream),
|
.ErrorSet => return stringify(@as([]const u8, @errorName(value)), options, out_stream),
|
||||||
.pointer => |ptr_info| switch (ptr_info.size) {
|
.Pointer => |ptr_info| switch (ptr_info.size) {
|
||||||
.one => switch (@typeInfo(ptr_info.child)) {
|
.One => switch (@typeInfo(ptr_info.child)) {
|
||||||
.array => {
|
.Array => {
|
||||||
const Slice = []const std.meta.Elem(ptr_info.child);
|
const Slice = []const std.meta.Elem(ptr_info.child);
|
||||||
return stringify(@as(Slice, value), options, out_stream);
|
return stringify(@as(Slice, value), options, out_stream);
|
||||||
},
|
},
|
||||||
|
@ -2933,7 +2916,7 @@ pub fn stringify(
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// TODO: .Many when there is a sentinel (waiting for https://github.com/ziglang/zig/pull/3972)
|
// TODO: .Many when there is a sentinel (waiting for https://github.com/ziglang/zig/pull/3972)
|
||||||
.slice => {
|
.Slice => {
|
||||||
if (ptr_info.child == u8 and options.string == .String and std.unicode.utf8ValidateSlice(value)) {
|
if (ptr_info.child == u8 and options.string == .String and std.unicode.utf8ValidateSlice(value)) {
|
||||||
try out_stream.writeByte('\"');
|
try out_stream.writeByte('\"');
|
||||||
var i: usize = 0;
|
var i: usize = 0;
|
||||||
|
@ -3002,8 +2985,8 @@ pub fn stringify(
|
||||||
},
|
},
|
||||||
else => @compileError("Unable to stringify type '" ++ @typeName(T) ++ "'"),
|
else => @compileError("Unable to stringify type '" ++ @typeName(T) ++ "'"),
|
||||||
},
|
},
|
||||||
.array => return stringify(&value, options, out_stream),
|
.Array => return stringify(&value, options, out_stream),
|
||||||
.vector => |info| {
|
.Vector => |info| {
|
||||||
const array: [info.len]info.child = value;
|
const array: [info.len]info.child = value;
|
||||||
return stringify(&array, options, out_stream);
|
return stringify(&array, options, out_stream);
|
||||||
},
|
},
|
||||||
|
|
16
src/main.zig
16
src/main.zig
|
@ -32,8 +32,8 @@ pub fn log(
|
||||||
const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix;
|
const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix;
|
||||||
|
|
||||||
// Print the message to stderr, silently ignoring any errors
|
// Print the message to stderr, silently ignoring any errors
|
||||||
std.debug.lockStdErr();
|
std.debug.getStderrMutex().lock();
|
||||||
defer std.debug.unlockStdErr();
|
defer std.debug.getStderrMutex().unlock();
|
||||||
const stderr = std.io.getStdErr().writer();
|
const stderr = std.io.getStdErr().writer();
|
||||||
nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return;
|
nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return;
|
||||||
}
|
}
|
||||||
|
@ -97,7 +97,7 @@ pub fn main() anyerror!void {
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
inline for (@typeInfo(Tests).@"enum".fields) |f| {
|
inline for (@typeInfo(Tests).Enum.fields) |f| {
|
||||||
if (std.mem.eql(u8, f.name, arg)) {
|
if (std.mem.eql(u8, f.name, arg)) {
|
||||||
try tests.append(@field(Tests, f.name));
|
try tests.append(@field(Tests, f.name));
|
||||||
break;
|
break;
|
||||||
|
@ -105,7 +105,7 @@ pub fn main() anyerror!void {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (tests.items.len == 0) {
|
if (tests.items.len == 0) {
|
||||||
inline for (@typeInfo(Tests).@"enum".fields) |f|
|
inline for (@typeInfo(Tests).Enum.fields) |f|
|
||||||
try tests.append(@field(Tests, f.name));
|
try tests.append(@field(Tests, f.name));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,7 +192,7 @@ pub fn main() anyerror!void {
|
||||||
const func = fns[0];
|
const func = fns[0];
|
||||||
const arn = func.function_arn.?;
|
const arn = func.function_arn.?;
|
||||||
// This is a bit ugly. Maybe a helper function in the library would help?
|
// This is a bit ugly. Maybe a helper function in the library would help?
|
||||||
var tags = try std.ArrayList(@typeInfo(try typeForField(services.lambda.tag_resource.Request, "tags")).pointer.child).initCapacity(allocator, 1);
|
var tags = try std.ArrayList(@typeInfo(try typeForField(services.lambda.tag_resource.Request, "tags")).Pointer.child).initCapacity(allocator, 1);
|
||||||
defer tags.deinit();
|
defer tags.deinit();
|
||||||
tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" });
|
tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" });
|
||||||
const req = services.lambda.tag_resource.Request{ .resource = arn, .tags = tags.items };
|
const req = services.lambda.tag_resource.Request{ .resource = arn, .tags = tags.items };
|
||||||
|
@ -371,7 +371,7 @@ fn proxyFromString(string: []const u8) !std.http.Client.Proxy {
|
||||||
rc.port = 443;
|
rc.port = 443;
|
||||||
rc.protocol = .tls;
|
rc.protocol = .tls;
|
||||||
} else return error.InvalidScheme;
|
} else return error.InvalidScheme;
|
||||||
var split_iterator = std.mem.splitScalar(u8, remaining, ':');
|
var split_iterator = std.mem.split(u8, remaining, ":");
|
||||||
rc.host = std.mem.trimRight(u8, split_iterator.first(), "/");
|
rc.host = std.mem.trimRight(u8, split_iterator.first(), "/");
|
||||||
if (split_iterator.next()) |port|
|
if (split_iterator.next()) |port|
|
||||||
rc.port = try std.fmt.parseInt(u16, port, 10);
|
rc.port = try std.fmt.parseInt(u16, port, 10);
|
||||||
|
@ -380,8 +380,8 @@ fn proxyFromString(string: []const u8) !std.http.Client.Proxy {
|
||||||
fn typeForField(comptime T: type, comptime field_name: []const u8) !type {
|
fn typeForField(comptime T: type, comptime field_name: []const u8) !type {
|
||||||
const ti = @typeInfo(T);
|
const ti = @typeInfo(T);
|
||||||
switch (ti) {
|
switch (ti) {
|
||||||
.@"struct" => {
|
.Struct => {
|
||||||
inline for (ti.@"struct".fields) |field| {
|
inline for (ti.Struct.fields) |field| {
|
||||||
if (std.mem.eql(u8, field.name, field_name))
|
if (std.mem.eql(u8, field.name, field_name))
|
||||||
return field.type;
|
return field.type;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const service_list = @import("service_manifest");
|
const service_list = @import("models/service_manifest.zig");
|
||||||
const expectEqualStrings = std.testing.expectEqualStrings;
|
const expectEqualStrings = std.testing.expectEqualStrings;
|
||||||
|
|
||||||
pub fn Services(comptime service_imports: anytype) type {
|
pub fn Services(comptime service_imports: anytype) type {
|
||||||
|
@ -12,7 +12,7 @@ pub fn Services(comptime service_imports: anytype) type {
|
||||||
item.* = .{
|
item.* = .{
|
||||||
.name = @tagName(service_imports[i]),
|
.name = @tagName(service_imports[i]),
|
||||||
.type = @TypeOf(import_field),
|
.type = @TypeOf(import_field),
|
||||||
.default_value_ptr = &import_field,
|
.default_value = &import_field,
|
||||||
.is_comptime = false,
|
.is_comptime = false,
|
||||||
.alignment = 0,
|
.alignment = 0,
|
||||||
};
|
};
|
||||||
|
@ -20,7 +20,7 @@ pub fn Services(comptime service_imports: anytype) type {
|
||||||
|
|
||||||
// finally, generate the type
|
// finally, generate the type
|
||||||
return @Type(.{
|
return @Type(.{
|
||||||
.@"struct" = .{
|
.Struct = .{
|
||||||
.layout = .auto,
|
.layout = .auto,
|
||||||
.fields = &fields,
|
.fields = &fields,
|
||||||
.decls = &[_]std.builtin.Type.Declaration{},
|
.decls = &[_]std.builtin.Type.Declaration{},
|
||||||
|
|
19
src/url.zig
19
src/url.zig
|
@ -24,11 +24,10 @@ fn encodeStruct(
|
||||||
comptime options: EncodingOptions,
|
comptime options: EncodingOptions,
|
||||||
) !bool {
|
) !bool {
|
||||||
var rc = first;
|
var rc = first;
|
||||||
var arena = std.heap.ArenaAllocator.init(allocator);
|
inline for (@typeInfo(@TypeOf(obj)).Struct.fields) |field| {
|
||||||
defer arena.deinit();
|
const field_name = try options.field_name_transformer(allocator, field.name);
|
||||||
const arena_alloc = arena.allocator();
|
defer if (options.field_name_transformer.* != defaultTransformer)
|
||||||
inline for (@typeInfo(@TypeOf(obj)).@"struct".fields) |field| {
|
allocator.free(field_name);
|
||||||
const field_name = try options.field_name_transformer(arena_alloc, field.name);
|
|
||||||
// @compileLog(@typeInfo(field.field_type).Pointer);
|
// @compileLog(@typeInfo(field.field_type).Pointer);
|
||||||
rc = try encodeInternal(allocator, parent, field_name, rc, @field(obj, field.name), writer, options);
|
rc = try encodeInternal(allocator, parent, field_name, rc, @field(obj, field.name), writer, options);
|
||||||
}
|
}
|
||||||
|
@ -48,10 +47,10 @@ pub fn encodeInternal(
|
||||||
// @compileLog(@typeInfo(@TypeOf(obj)));
|
// @compileLog(@typeInfo(@TypeOf(obj)));
|
||||||
var rc = first;
|
var rc = first;
|
||||||
switch (@typeInfo(@TypeOf(obj))) {
|
switch (@typeInfo(@TypeOf(obj))) {
|
||||||
.optional => if (obj) |o| {
|
.Optional => if (obj) |o| {
|
||||||
rc = try encodeInternal(allocator, parent, field_name, first, o, writer, options);
|
rc = try encodeInternal(allocator, parent, field_name, first, o, writer, options);
|
||||||
},
|
},
|
||||||
.pointer => |ti| if (ti.size == .one) {
|
.Pointer => |ti| if (ti.size == .One) {
|
||||||
rc = try encodeInternal(allocator, parent, field_name, first, obj.*, writer, options);
|
rc = try encodeInternal(allocator, parent, field_name, first, obj.*, writer, options);
|
||||||
} else {
|
} else {
|
||||||
if (!first) _ = try writer.write("&");
|
if (!first) _ = try writer.write("&");
|
||||||
|
@ -62,7 +61,7 @@ pub fn encodeInternal(
|
||||||
try writer.print("{s}{s}={any}", .{ parent, field_name, obj });
|
try writer.print("{s}{s}={any}", .{ parent, field_name, obj });
|
||||||
rc = false;
|
rc = false;
|
||||||
},
|
},
|
||||||
.@"struct" => if (std.mem.eql(u8, "", field_name)) {
|
.Struct => if (std.mem.eql(u8, "", field_name)) {
|
||||||
rc = try encodeStruct(allocator, parent, first, obj, writer, options);
|
rc = try encodeStruct(allocator, parent, first, obj, writer, options);
|
||||||
} else {
|
} else {
|
||||||
// TODO: It would be lovely if we could concat at compile time or allocPrint at runtime
|
// TODO: It would be lovely if we could concat at compile time or allocPrint at runtime
|
||||||
|
@ -74,12 +73,12 @@ pub fn encodeInternal(
|
||||||
rc = try encodeStruct(allocator, new_parent, first, obj, writer, options);
|
rc = try encodeStruct(allocator, new_parent, first, obj, writer, options);
|
||||||
// try encodeStruct(parent ++ field_name ++ ".", first, obj, writer, options);
|
// try encodeStruct(parent ++ field_name ++ ".", first, obj, writer, options);
|
||||||
},
|
},
|
||||||
.array => {
|
.Array => {
|
||||||
if (!first) _ = try writer.write("&");
|
if (!first) _ = try writer.write("&");
|
||||||
try writer.print("{s}{s}={s}", .{ parent, field_name, obj });
|
try writer.print("{s}{s}={s}", .{ parent, field_name, obj });
|
||||||
rc = false;
|
rc = false;
|
||||||
},
|
},
|
||||||
.int, .comptime_int, .float, .comptime_float => {
|
.Int, .ComptimeInt, .Float, .ComptimeFloat => {
|
||||||
if (!first) _ = try writer.write("&");
|
if (!first) _ = try writer.write("&");
|
||||||
try writer.print("{s}{s}={d}", .{ parent, field_name, obj });
|
try writer.print("{s}{s}={d}", .{ parent, field_name, obj });
|
||||||
rc = false;
|
rc = false;
|
||||||
|
|
|
@ -653,10 +653,7 @@ fn dupeAndUnescape(alloc: Allocator, text: []const u8) ![]const u8 {
|
||||||
|
|
||||||
// This error is not strictly true, but we need to match one of the items
|
// This error is not strictly true, but we need to match one of the items
|
||||||
// from the error set provided by the other stdlib calls at the calling site
|
// from the error set provided by the other stdlib calls at the calling site
|
||||||
if (!alloc.resize(str, j)) {
|
if (!alloc.resize(str, j)) return error.OutOfMemory;
|
||||||
defer alloc.free(str);
|
|
||||||
return alloc.dupe(u8, str[0..j]) catch return error.OutOfMemory;
|
|
||||||
}
|
|
||||||
return str[0..j];
|
return str[0..j];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,793 +0,0 @@
|
||||||
const std = @import("std");
|
|
||||||
const mem = std.mem;
|
|
||||||
const Allocator = mem.Allocator;
|
|
||||||
|
|
||||||
/// Options for controlling XML serialization behavior
|
|
||||||
pub const StringifyOptions = struct {
|
|
||||||
/// Controls whitespace insertion for easier human readability
|
|
||||||
whitespace: Whitespace = .minified,
|
|
||||||
|
|
||||||
/// Should optional fields with null value be written?
|
|
||||||
emit_null_optional_fields: bool = true,
|
|
||||||
|
|
||||||
// TODO: Implement
|
|
||||||
/// Arrays/slices of u8 are typically encoded as strings. This option emits them as arrays of numbers instead. Does not affect calls to objectField*().
|
|
||||||
emit_strings_as_arrays: bool = false,
|
|
||||||
|
|
||||||
/// Controls whether to include XML declaration at the beginning
|
|
||||||
include_declaration: bool = true,
|
|
||||||
|
|
||||||
/// Root element name to use when serializing a value that doesn't have a natural name
|
|
||||||
root_name: ?[]const u8 = "root",
|
|
||||||
|
|
||||||
/// Root attributes (e.g. xmlns="...") that will be added to the root element node only
|
|
||||||
root_attributes: []const u8 = "",
|
|
||||||
|
|
||||||
/// Function to determine the element name for an array item based on the element
|
|
||||||
/// name of the array containing the elements. See arrayElementPluralToSingluarTransformation
|
|
||||||
/// and arrayElementNoopTransformation functions for examples
|
|
||||||
arrayElementNameConversion: *const fn (allocator: std.mem.Allocator, name: ?[]const u8) error{OutOfMemory}!?[]const u8 = arrayElementPluralToSingluarTransformation,
|
|
||||||
|
|
||||||
pub const Whitespace = enum {
|
|
||||||
minified,
|
|
||||||
indent_1,
|
|
||||||
indent_2,
|
|
||||||
indent_3,
|
|
||||||
indent_4,
|
|
||||||
indent_8,
|
|
||||||
indent_tab,
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Error set for XML serialization
|
|
||||||
pub const XmlSerializeError = error{
|
|
||||||
/// Unsupported type for XML serialization
|
|
||||||
UnsupportedType,
|
|
||||||
/// Out of memory
|
|
||||||
OutOfMemory,
|
|
||||||
/// Write error
|
|
||||||
WriteError,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Serializes a value to XML and writes it to the provided writer
|
|
||||||
pub fn stringify(
|
|
||||||
value: anytype,
|
|
||||||
options: StringifyOptions,
|
|
||||||
writer: anytype,
|
|
||||||
) !void {
|
|
||||||
// Write XML declaration if requested
|
|
||||||
if (options.include_declaration)
|
|
||||||
try writer.writeAll("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
|
|
||||||
|
|
||||||
// Start serialization with the root element
|
|
||||||
const root_name = options.root_name;
|
|
||||||
if (@typeInfo(@TypeOf(value)) != .optional or value == null)
|
|
||||||
try serializeValue(value, root_name, options, writer.any(), 0)
|
|
||||||
else
|
|
||||||
try serializeValue(value.?, root_name, options, writer.any(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serializes a value to XML and returns an allocated string
|
|
||||||
pub fn stringifyAlloc(
|
|
||||||
allocator: Allocator,
|
|
||||||
value: anytype,
|
|
||||||
options: StringifyOptions,
|
|
||||||
) ![]u8 {
|
|
||||||
var list = std.ArrayList(u8).init(allocator);
|
|
||||||
errdefer list.deinit();
|
|
||||||
|
|
||||||
try stringify(value, options, list.writer());
|
|
||||||
return list.toOwnedSlice();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Internal function to serialize a value with proper indentation
|
|
||||||
fn serializeValue(
|
|
||||||
value: anytype,
|
|
||||||
element_name: ?[]const u8,
|
|
||||||
options: StringifyOptions,
|
|
||||||
writer: anytype,
|
|
||||||
depth: usize,
|
|
||||||
) !void {
|
|
||||||
const T = @TypeOf(value);
|
|
||||||
|
|
||||||
// const output_indent = !(!options.emit_null_optional_fields and @typeInfo(@TypeOf(value)) == .optional and value == null);
|
|
||||||
const output_indent = options.emit_null_optional_fields or @typeInfo(@TypeOf(value)) != .optional or value != null;
|
|
||||||
|
|
||||||
if (output_indent and element_name != null)
|
|
||||||
try writeIndent(writer, depth, options.whitespace);
|
|
||||||
|
|
||||||
// Start element tag
|
|
||||||
if (@typeInfo(T) != .optional and @typeInfo(T) != .array) {
|
|
||||||
if (element_name) |n| {
|
|
||||||
try writer.writeAll("<");
|
|
||||||
try writer.writeAll(n);
|
|
||||||
if (depth == 0 and options.root_attributes.len > 0) {
|
|
||||||
try writer.writeByte(' ');
|
|
||||||
try writer.writeAll(options.root_attributes);
|
|
||||||
}
|
|
||||||
try writer.writeAll(">");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle different types
|
|
||||||
switch (@typeInfo(T)) {
|
|
||||||
.bool => try writer.writeAll(if (value) "true" else "false"),
|
|
||||||
.int, .comptime_int, .float, .comptime_float => try writer.print("{}", .{value}),
|
|
||||||
.pointer => |ptr_info| {
|
|
||||||
switch (ptr_info.size) {
|
|
||||||
.one => {
|
|
||||||
// We don't want to write the opening tag a second time, so
|
|
||||||
// we will pass null, then come back and close before returning
|
|
||||||
//
|
|
||||||
// ...but...in the event of a *[]const u8, we do want to pass that in,
|
|
||||||
// but only if emit_strings_as_arrays is true
|
|
||||||
const child_ti = @typeInfo(ptr_info.child);
|
|
||||||
const el_name = if (options.emit_strings_as_arrays and child_ti == .array and child_ti.array.child == u8)
|
|
||||||
element_name
|
|
||||||
else
|
|
||||||
null;
|
|
||||||
try serializeValue(value.*, el_name, options, writer, depth);
|
|
||||||
try writeClose(writer, element_name);
|
|
||||||
return;
|
|
||||||
},
|
|
||||||
.slice => {
|
|
||||||
if (ptr_info.child == u8) {
|
|
||||||
// String type
|
|
||||||
try serializeString(writer, element_name, value, options, depth);
|
|
||||||
} else {
|
|
||||||
// Array of values
|
|
||||||
if (options.whitespace != .minified) {
|
|
||||||
try writer.writeByte('\n');
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf: [256]u8 = undefined;
|
|
||||||
var fba = std.heap.FixedBufferAllocator.init(&buf);
|
|
||||||
const alloc = fba.allocator();
|
|
||||||
const item_name = try options.arrayElementNameConversion(alloc, element_name);
|
|
||||||
|
|
||||||
for (value) |item| {
|
|
||||||
try serializeValue(item, item_name, options, writer, depth + 1);
|
|
||||||
if (options.whitespace != .minified) {
|
|
||||||
try writer.writeByte('\n');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
try writeIndent(writer, depth, options.whitespace);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
else => return error.UnsupportedType,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
.array => |array_info| {
|
|
||||||
if (!options.emit_strings_as_arrays or array_info.child != u8) {
|
|
||||||
if (element_name) |n| {
|
|
||||||
try writer.writeAll("<");
|
|
||||||
try writer.writeAll(n);
|
|
||||||
try writer.writeAll(">");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (array_info.child == u8) {
|
|
||||||
// Fixed-size string
|
|
||||||
const slice = &value;
|
|
||||||
try serializeString(writer, element_name, slice, options, depth);
|
|
||||||
} else {
|
|
||||||
// Fixed-size array
|
|
||||||
if (options.whitespace != .minified) {
|
|
||||||
try writer.writeByte('\n');
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf: [256]u8 = undefined;
|
|
||||||
var fba = std.heap.FixedBufferAllocator.init(&buf);
|
|
||||||
const alloc = fba.allocator();
|
|
||||||
const item_name = try options.arrayElementNameConversion(alloc, element_name);
|
|
||||||
|
|
||||||
for (value) |item| {
|
|
||||||
try serializeValue(item, item_name, options, writer, depth + 1);
|
|
||||||
if (options.whitespace != .minified) {
|
|
||||||
try writer.writeByte('\n');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
try writeIndent(writer, depth, options.whitespace);
|
|
||||||
}
|
|
||||||
if (!options.emit_strings_as_arrays or array_info.child != u8)
|
|
||||||
try writeClose(writer, element_name);
|
|
||||||
return;
|
|
||||||
},
|
|
||||||
.@"struct" => |struct_info| {
|
|
||||||
if (options.whitespace != .minified) {
|
|
||||||
try writer.writeByte('\n');
|
|
||||||
}
|
|
||||||
|
|
||||||
inline for (struct_info.fields) |field| {
|
|
||||||
const field_name =
|
|
||||||
if (std.meta.hasFn(T, "fieldNameFor"))
|
|
||||||
value.fieldNameFor(field.name)
|
|
||||||
else
|
|
||||||
field.name; // TODO: field mapping
|
|
||||||
|
|
||||||
const field_value = @field(value, field.name);
|
|
||||||
try serializeValue(
|
|
||||||
field_value,
|
|
||||||
field_name,
|
|
||||||
options,
|
|
||||||
writer,
|
|
||||||
depth + 1,
|
|
||||||
);
|
|
||||||
|
|
||||||
if (options.whitespace != .minified) {
|
|
||||||
if (!options.emit_null_optional_fields and @typeInfo(@TypeOf(field_value)) == .optional and field_value == null) {
|
|
||||||
// Skip writing anything
|
|
||||||
} else {
|
|
||||||
try writer.writeByte('\n');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
try writeIndent(writer, depth, options.whitespace);
|
|
||||||
},
|
|
||||||
.optional => {
|
|
||||||
if (options.emit_null_optional_fields or value != null) {
|
|
||||||
if (element_name) |n| {
|
|
||||||
try writer.writeAll("<");
|
|
||||||
try writer.writeAll(n);
|
|
||||||
try writer.writeAll(">");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (value) |payload| {
|
|
||||||
try serializeValue(payload, null, options, writer, depth);
|
|
||||||
} else {
|
|
||||||
// For null values, we'll write an empty element
|
|
||||||
// We've already written the opening tag, so just close it immediately
|
|
||||||
if (options.emit_null_optional_fields)
|
|
||||||
try writeClose(writer, element_name);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
.null => {
|
|
||||||
// Empty element
|
|
||||||
},
|
|
||||||
.@"enum" => {
|
|
||||||
try std.fmt.format(writer, "{s}", .{@tagName(value)});
|
|
||||||
},
|
|
||||||
.@"union" => |union_info| {
|
|
||||||
if (union_info.tag_type) |_| {
|
|
||||||
inline for (union_info.fields) |field| {
|
|
||||||
if (@field(std.meta.Tag(T), field.name) == std.meta.activeTag(value)) {
|
|
||||||
try serializeValue(
|
|
||||||
@field(value, field.name),
|
|
||||||
field.name,
|
|
||||||
options,
|
|
||||||
writer,
|
|
||||||
depth,
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return error.UnsupportedType;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
else => return error.UnsupportedType,
|
|
||||||
}
|
|
||||||
|
|
||||||
try writeClose(writer, element_name);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn writeClose(writer: anytype, element_name: ?[]const u8) !void {
|
|
||||||
// Close element tag
|
|
||||||
if (element_name) |n| {
|
|
||||||
try writer.writeAll("</");
|
|
||||||
try writer.writeAll(n);
|
|
||||||
try writer.writeAll(">");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Writes indentation based on depth and indent level
|
|
||||||
fn writeIndent(writer: anytype, depth: usize, whitespace: StringifyOptions.Whitespace) @TypeOf(writer).Error!void {
|
|
||||||
var char: u8 = ' ';
|
|
||||||
const n_chars = switch (whitespace) {
|
|
||||||
.minified => return,
|
|
||||||
.indent_1 => 1 * depth,
|
|
||||||
.indent_2 => 2 * depth,
|
|
||||||
.indent_3 => 3 * depth,
|
|
||||||
.indent_4 => 4 * depth,
|
|
||||||
.indent_8 => 8 * depth,
|
|
||||||
.indent_tab => blk: {
|
|
||||||
char = '\t';
|
|
||||||
break :blk depth;
|
|
||||||
},
|
|
||||||
};
|
|
||||||
try writer.writeByteNTimes(char, n_chars);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn serializeString(
|
|
||||||
writer: anytype,
|
|
||||||
element_name: ?[]const u8,
|
|
||||||
value: []const u8,
|
|
||||||
options: StringifyOptions,
|
|
||||||
depth: usize,
|
|
||||||
) @TypeOf(writer).Error!void {
|
|
||||||
if (options.emit_strings_as_arrays) {
|
|
||||||
// if (true) return error.seestackrun;
|
|
||||||
for (value) |c| {
|
|
||||||
try writeIndent(writer, depth + 1, options.whitespace);
|
|
||||||
|
|
||||||
var buf: [256]u8 = undefined;
|
|
||||||
var fba = std.heap.FixedBufferAllocator.init(&buf);
|
|
||||||
const alloc = fba.allocator();
|
|
||||||
const item_name = try options.arrayElementNameConversion(alloc, element_name);
|
|
||||||
if (item_name) |n| {
|
|
||||||
try writer.writeAll("<");
|
|
||||||
try writer.writeAll(n);
|
|
||||||
try writer.writeAll(">");
|
|
||||||
}
|
|
||||||
try writer.print("{d}", .{c});
|
|
||||||
try writeClose(writer, item_name);
|
|
||||||
if (options.whitespace != .minified) {
|
|
||||||
try writer.writeByte('\n');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
try escapeString(writer, value);
|
|
||||||
}
|
|
||||||
/// Escapes special characters in XML strings
|
|
||||||
fn escapeString(writer: anytype, value: []const u8) @TypeOf(writer).Error!void {
|
|
||||||
for (value) |c| {
|
|
||||||
switch (c) {
|
|
||||||
'&' => try writer.writeAll("&"),
|
|
||||||
'<' => try writer.writeAll("<"),
|
|
||||||
'>' => try writer.writeAll(">"),
|
|
||||||
'"' => try writer.writeAll("""),
|
|
||||||
'\'' => try writer.writeAll("'"),
|
|
||||||
else => try writer.writeByte(c),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Does no transformation on the input array
|
|
||||||
pub fn arrayElementNoopTransformation(allocator: std.mem.Allocator, name: ?[]const u8) !?[]const u8 {
|
|
||||||
_ = allocator;
|
|
||||||
return name;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Attempts to convert a plural name to singular for array items
|
|
||||||
pub fn arrayElementPluralToSingluarTransformation(allocator: std.mem.Allocator, name: ?[]const u8) !?[]const u8 {
|
|
||||||
if (name == null or name.?.len < 3) return name;
|
|
||||||
|
|
||||||
const n = name.?;
|
|
||||||
// There are a ton of these words, I'm just adding two for now
|
|
||||||
// https://wordmom.com/nouns/end-e
|
|
||||||
const es_exceptions = &[_][]const u8{
|
|
||||||
"types",
|
|
||||||
"bytes",
|
|
||||||
};
|
|
||||||
for (es_exceptions) |exception| {
|
|
||||||
if (std.mem.eql(u8, exception, n)) {
|
|
||||||
return n[0 .. n.len - 1];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Very basic English pluralization rules
|
|
||||||
if (std.mem.endsWith(u8, n, "s")) {
|
|
||||||
if (std.mem.endsWith(u8, n, "ies")) {
|
|
||||||
// e.g., "entries" -> "entry"
|
|
||||||
return try std.mem.concat(allocator, u8, &[_][]const u8{ n[0 .. n.len - 3], "y" });
|
|
||||||
} else if (std.mem.endsWith(u8, n, "es")) {
|
|
||||||
return n[0 .. n.len - 2]; // e.g., "boxes" -> "box"
|
|
||||||
} else {
|
|
||||||
return n[0 .. n.len - 1]; // e.g., "items" -> "item"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return name; // Not recognized as plural
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests
|
|
||||||
test "stringify basic types" {
|
|
||||||
const testing = std.testing;
|
|
||||||
const allocator = testing.allocator;
|
|
||||||
|
|
||||||
// Test boolean
|
|
||||||
{
|
|
||||||
const result = try stringifyAlloc(allocator, true, .{});
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root>true</root>", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test comptime integer
|
|
||||||
{
|
|
||||||
const result = try stringifyAlloc(allocator, 42, .{});
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root>42</root>", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test integer
|
|
||||||
{
|
|
||||||
const result = try stringifyAlloc(allocator, @as(usize, 42), .{});
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root>42</root>", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test float
|
|
||||||
{
|
|
||||||
const result = try stringifyAlloc(allocator, 3.14, .{});
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root>3.14e0</root>", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test string
|
|
||||||
{
|
|
||||||
const result = try stringifyAlloc(allocator, "hello", .{});
|
|
||||||
// @compileLog(@typeInfo(@TypeOf("hello")).pointer.size);
|
|
||||||
// @compileLog(@typeName(@typeInfo(@TypeOf("hello")).pointer.child));
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root>hello</root>", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test string with special characters
|
|
||||||
{
|
|
||||||
const result = try stringifyAlloc(allocator, "hello & world < > \" '", .{});
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root>hello & world < > " '</root>", result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
test "stringify arrays" {
|
|
||||||
const testing = std.testing;
|
|
||||||
const allocator = testing.allocator;
|
|
||||||
|
|
||||||
// Test array of integers
|
|
||||||
{
|
|
||||||
const arr = [_]i32{ 1, 2, 3 };
|
|
||||||
const result = try stringifyAlloc(allocator, arr, .{});
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root><root>1</root><root>2</root><root>3</root></root>", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test array of strings
|
|
||||||
{
|
|
||||||
const arr = [_][]const u8{ "one", "two", "three" };
|
|
||||||
const result = try stringifyAlloc(allocator, arr, .{});
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root><root>one</root><root>two</root><root>three</root></root>", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test array with custom root name
|
|
||||||
{
|
|
||||||
const arr = [_]i32{ 1, 2, 3 };
|
|
||||||
const result = try stringifyAlloc(allocator, arr, .{ .root_name = "items" });
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<items><item>1</item><item>2</item><item>3</item></items>", result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
test "stringify structs" {
|
|
||||||
const testing = std.testing;
|
|
||||||
const allocator = testing.allocator;
|
|
||||||
|
|
||||||
const Person = struct {
|
|
||||||
name: []const u8,
|
|
||||||
age: u32,
|
|
||||||
is_active: bool,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Test basic struct
|
|
||||||
{
|
|
||||||
const person = Person{
|
|
||||||
.name = "John",
|
|
||||||
.age = 30,
|
|
||||||
.is_active = true,
|
|
||||||
};
|
|
||||||
|
|
||||||
const result = try stringifyAlloc(allocator, person, .{});
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root><name>John</name><age>30</age><is_active>true</is_active></root>", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test struct with pretty printing
|
|
||||||
{
|
|
||||||
const person = Person{
|
|
||||||
.name = "John",
|
|
||||||
.age = 30,
|
|
||||||
.is_active = true,
|
|
||||||
};
|
|
||||||
|
|
||||||
const result = try stringifyAlloc(allocator, person, .{ .whitespace = .indent_4 });
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root>\n <name>John</name>\n <age>30</age>\n <is_active>true</is_active>\n</root>", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test nested struct
|
|
||||||
{
|
|
||||||
const Address = struct {
|
|
||||||
street: []const u8,
|
|
||||||
city: []const u8,
|
|
||||||
};
|
|
||||||
|
|
||||||
const PersonWithAddress = struct {
|
|
||||||
name: []const u8,
|
|
||||||
address: Address,
|
|
||||||
};
|
|
||||||
|
|
||||||
const person = PersonWithAddress{
|
|
||||||
.name = "John",
|
|
||||||
.address = Address{
|
|
||||||
.street = "123 Main St",
|
|
||||||
.city = "Anytown",
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const result = try stringifyAlloc(allocator, person, .{ .whitespace = .indent_4 });
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root>\n <name>John</name>\n <address>\n <street>123 Main St</street>\n <city>Anytown</city>\n </address>\n</root>", result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
test "stringify optional values" {
|
|
||||||
const testing = std.testing;
|
|
||||||
const allocator = testing.allocator;
|
|
||||||
|
|
||||||
const Person = struct {
|
|
||||||
name: []const u8,
|
|
||||||
middle_name: ?[]const u8,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Test with present optional
|
|
||||||
{
|
|
||||||
const person = Person{
|
|
||||||
.name = "John",
|
|
||||||
.middle_name = "Robert",
|
|
||||||
};
|
|
||||||
|
|
||||||
const result = try stringifyAlloc(allocator, person, .{});
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root><name>John</name><middle_name>Robert</middle_name></root>", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test with null optional
|
|
||||||
{
|
|
||||||
const person = Person{
|
|
||||||
.name = "John",
|
|
||||||
.middle_name = null,
|
|
||||||
};
|
|
||||||
|
|
||||||
const result = try stringifyAlloc(allocator, person, .{});
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root><name>John</name><middle_name></middle_name></root>", result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
test "stringify optional values with emit_null_optional_fields == false" {
|
|
||||||
const testing = std.testing;
|
|
||||||
const allocator = testing.allocator;
|
|
||||||
|
|
||||||
const Person = struct {
|
|
||||||
name: []const u8,
|
|
||||||
middle_name: ?[]const u8,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Test with present optional
|
|
||||||
{
|
|
||||||
const person = Person{
|
|
||||||
.name = "John",
|
|
||||||
.middle_name = "Robert",
|
|
||||||
};
|
|
||||||
|
|
||||||
const result = try stringifyAlloc(allocator, person, .{ .emit_null_optional_fields = false });
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root><name>John</name><middle_name>Robert</middle_name></root>", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test with null optional
|
|
||||||
{
|
|
||||||
const person = Person{
|
|
||||||
.name = "John",
|
|
||||||
.middle_name = null,
|
|
||||||
};
|
|
||||||
|
|
||||||
const result = try stringifyAlloc(allocator, person, .{ .emit_null_optional_fields = false });
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<root><name>John</name></root>", result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
test "stringify with custom options" {
|
|
||||||
const testing = std.testing;
|
|
||||||
const allocator = testing.allocator;
|
|
||||||
|
|
||||||
const Person = struct {
|
|
||||||
first_name: []const u8,
|
|
||||||
last_name: []const u8,
|
|
||||||
};
|
|
||||||
|
|
||||||
const person = Person{
|
|
||||||
.first_name = "John",
|
|
||||||
.last_name = "Doe",
|
|
||||||
};
|
|
||||||
|
|
||||||
// Test without XML declaration
|
|
||||||
{
|
|
||||||
const result = try stringifyAlloc(allocator, person, .{ .include_declaration = false });
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<root><first_name>John</first_name><last_name>Doe</last_name></root>", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test with custom root name
|
|
||||||
{
|
|
||||||
const result = try stringifyAlloc(allocator, person, .{ .root_name = "person" });
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<person><first_name>John</first_name><last_name>Doe</last_name></person>", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test with custom indent level
|
|
||||||
{
|
|
||||||
const result = try stringifyAlloc(allocator, person, .{ .whitespace = .indent_2 });
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings(
|
|
||||||
\\<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
\\<root>
|
|
||||||
\\ <first_name>John</first_name>
|
|
||||||
\\ <last_name>Doe</last_name>
|
|
||||||
\\</root>
|
|
||||||
, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test with output []u8 as array
|
|
||||||
{
|
|
||||||
// pointer, size 1, child == .array, child.array.child == u8
|
|
||||||
// @compileLog(@typeInfo(@typeInfo(@TypeOf("foo")).pointer.child));
|
|
||||||
const result = try stringifyAlloc(allocator, "foo", .{ .emit_strings_as_arrays = true, .root_name = "bytes" });
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<bytes><byte>102</byte><byte>111</byte><byte>111</byte></bytes>", result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
test "structs with custom field names" {
|
|
||||||
const testing = std.testing;
|
|
||||||
const allocator = testing.allocator;
|
|
||||||
|
|
||||||
const Person = struct {
|
|
||||||
first_name: []const u8,
|
|
||||||
last_name: []const u8,
|
|
||||||
|
|
||||||
pub fn fieldNameFor(_: @This(), comptime field_name: []const u8) []const u8 {
|
|
||||||
if (std.mem.eql(u8, field_name, "first_name")) return "GivenName";
|
|
||||||
if (std.mem.eql(u8, field_name, "last_name")) return "FamilyName";
|
|
||||||
unreachable;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const person = Person{
|
|
||||||
.first_name = "John",
|
|
||||||
.last_name = "Doe",
|
|
||||||
};
|
|
||||||
|
|
||||||
{
|
|
||||||
const result = try stringifyAlloc(allocator, person, .{ .whitespace = .indent_2 });
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings(
|
|
||||||
\\<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
\\<root>
|
|
||||||
\\ <GivenName>John</GivenName>
|
|
||||||
\\ <FamilyName>Doe</FamilyName>
|
|
||||||
\\</root>
|
|
||||||
, result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
test "structs with optional values" {
|
|
||||||
const testing = std.testing;
|
|
||||||
const allocator = testing.allocator;
|
|
||||||
|
|
||||||
const Person = struct {
|
|
||||||
first_name: []const u8,
|
|
||||||
middle_name: ?[]const u8 = null,
|
|
||||||
last_name: []const u8,
|
|
||||||
};
|
|
||||||
|
|
||||||
const person = Person{
|
|
||||||
.first_name = "John",
|
|
||||||
.last_name = "Doe",
|
|
||||||
};
|
|
||||||
|
|
||||||
{
|
|
||||||
const result = try stringifyAlloc(
|
|
||||||
allocator,
|
|
||||||
person,
|
|
||||||
.{
|
|
||||||
.whitespace = .indent_2,
|
|
||||||
.emit_null_optional_fields = false,
|
|
||||||
.root_attributes = "xmlns=\"http://example.com/blah/xxxx/\"",
|
|
||||||
},
|
|
||||||
);
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings(
|
|
||||||
\\<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
\\<root xmlns="http://example.com/blah/xxxx/">
|
|
||||||
\\ <first_name>John</first_name>
|
|
||||||
\\ <last_name>Doe</last_name>
|
|
||||||
\\</root>
|
|
||||||
, result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
test "optional structs with value" {
|
|
||||||
const testing = std.testing;
|
|
||||||
const allocator = testing.allocator;
|
|
||||||
|
|
||||||
const Person = struct {
|
|
||||||
first_name: []const u8,
|
|
||||||
middle_name: ?[]const u8 = null,
|
|
||||||
last_name: []const u8,
|
|
||||||
};
|
|
||||||
|
|
||||||
const person: ?Person = Person{
|
|
||||||
.first_name = "John",
|
|
||||||
.last_name = "Doe",
|
|
||||||
};
|
|
||||||
|
|
||||||
{
|
|
||||||
const result = try stringifyAlloc(
|
|
||||||
allocator,
|
|
||||||
person,
|
|
||||||
.{
|
|
||||||
.whitespace = .indent_2,
|
|
||||||
.emit_null_optional_fields = false,
|
|
||||||
.root_attributes = "xmlns=\"http://example.com/blah/xxxx/\"",
|
|
||||||
},
|
|
||||||
);
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings(
|
|
||||||
\\<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
\\<root xmlns="http://example.com/blah/xxxx/">
|
|
||||||
\\ <first_name>John</first_name>
|
|
||||||
\\ <last_name>Doe</last_name>
|
|
||||||
\\</root>
|
|
||||||
, result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
test "nested optional structs with value" {
|
|
||||||
const testing = std.testing;
|
|
||||||
const allocator = testing.allocator;
|
|
||||||
|
|
||||||
const Name = struct {
|
|
||||||
first_name: []const u8,
|
|
||||||
middle_name: ?[]const u8 = null,
|
|
||||||
last_name: []const u8,
|
|
||||||
};
|
|
||||||
|
|
||||||
const Person = struct {
|
|
||||||
name: ?Name,
|
|
||||||
};
|
|
||||||
|
|
||||||
const person: ?Person = Person{
|
|
||||||
.name = .{
|
|
||||||
.first_name = "John",
|
|
||||||
.last_name = "Doe",
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
{
|
|
||||||
const result = try stringifyAlloc(
|
|
||||||
allocator,
|
|
||||||
person,
|
|
||||||
.{
|
|
||||||
.whitespace = .indent_2,
|
|
||||||
.emit_null_optional_fields = false,
|
|
||||||
.root_attributes = "xmlns=\"http://example.com/blah/xxxx/\"",
|
|
||||||
},
|
|
||||||
);
|
|
||||||
defer allocator.free(result);
|
|
||||||
try testing.expectEqualStrings(
|
|
||||||
\\<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
\\<root xmlns="http://example.com/blah/xxxx/">
|
|
||||||
\\ <name>
|
|
||||||
\\ <first_name>John</first_name>
|
|
||||||
\\ <last_name>Doe</last_name>
|
|
||||||
\\ </name>
|
|
||||||
\\</root>
|
|
||||||
, result);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -96,21 +96,15 @@ pub fn parse(comptime T: type, source: []const u8, options: ParseOptions) !Parse
|
||||||
|
|
||||||
fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions) !T {
|
fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions) !T {
|
||||||
switch (@typeInfo(T)) {
|
switch (@typeInfo(T)) {
|
||||||
.bool => {
|
.Bool => {
|
||||||
if (std.ascii.eqlIgnoreCase("true", element.children.items[0].CharData))
|
if (std.ascii.eqlIgnoreCase("true", element.children.items[0].CharData))
|
||||||
return true;
|
return true;
|
||||||
if (std.ascii.eqlIgnoreCase("false", element.children.items[0].CharData))
|
if (std.ascii.eqlIgnoreCase("false", element.children.items[0].CharData))
|
||||||
return false;
|
return false;
|
||||||
return error.UnexpectedToken;
|
return error.UnexpectedToken;
|
||||||
},
|
},
|
||||||
.float, .comptime_float => {
|
.Float, .ComptimeFloat => {
|
||||||
return std.fmt.parseFloat(T, element.children.items[0].CharData) catch |e| {
|
return std.fmt.parseFloat(T, element.children.items[0].CharData) catch |e| {
|
||||||
if (element.children.items[0].CharData[element.children.items[0].CharData.len - 1] == 'Z') {
|
|
||||||
// We have an iso8601 in an integer field (we think)
|
|
||||||
// Try to coerce this into our type
|
|
||||||
const timestamp = try date.parseIso8601ToTimestamp(element.children.items[0].CharData);
|
|
||||||
return @floatFromInt(timestamp);
|
|
||||||
}
|
|
||||||
if (log_parse_traces) {
|
if (log_parse_traces) {
|
||||||
std.log.err(
|
std.log.err(
|
||||||
"Could not parse '{s}' as float in element '{s}': {any}",
|
"Could not parse '{s}' as float in element '{s}': {any}",
|
||||||
|
@ -127,7 +121,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
return e;
|
return e;
|
||||||
};
|
};
|
||||||
},
|
},
|
||||||
.int, .comptime_int => {
|
.Int, .ComptimeInt => {
|
||||||
// 2021-10-05T16:39:45.000Z
|
// 2021-10-05T16:39:45.000Z
|
||||||
return std.fmt.parseInt(T, element.children.items[0].CharData, 10) catch |e| {
|
return std.fmt.parseInt(T, element.children.items[0].CharData, 10) catch |e| {
|
||||||
if (element.children.items[0].CharData[element.children.items[0].CharData.len - 1] == 'Z') {
|
if (element.children.items[0].CharData[element.children.items[0].CharData.len - 1] == 'Z') {
|
||||||
|
@ -152,7 +146,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
return e;
|
return e;
|
||||||
};
|
};
|
||||||
},
|
},
|
||||||
.optional => |optional_info| {
|
.Optional => |optional_info| {
|
||||||
if (element.children.items.len == 0) {
|
if (element.children.items.len == 0) {
|
||||||
// This is almost certainly incomplete. Empty strings? xsi:nil?
|
// This is almost certainly incomplete. Empty strings? xsi:nil?
|
||||||
return null;
|
return null;
|
||||||
|
@ -162,7 +156,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
return try parseInternal(optional_info.child, element, options);
|
return try parseInternal(optional_info.child, element, options);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.@"enum" => |enum_info| {
|
.Enum => |enum_info| {
|
||||||
_ = enum_info;
|
_ = enum_info;
|
||||||
// const numeric: ?enum_info.tag_type = std.fmt.parseInt(enum_info.tag_type, element.children.items[0].CharData, 10) catch null;
|
// const numeric: ?enum_info.tag_type = std.fmt.parseInt(enum_info.tag_type, element.children.items[0].CharData, 10) catch null;
|
||||||
// if (numeric) |num| {
|
// if (numeric) |num| {
|
||||||
|
@ -172,7 +166,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
// return std.meta.stringToEnum(T, element.CharData);
|
// return std.meta.stringToEnum(T, element.CharData);
|
||||||
// }
|
// }
|
||||||
},
|
},
|
||||||
.@"union" => |union_info| {
|
.Union => |union_info| {
|
||||||
if (union_info.tag_type) |_| {
|
if (union_info.tag_type) |_| {
|
||||||
// try each of the union fields until we find one that matches
|
// try each of the union fields until we find one that matches
|
||||||
// inline for (union_info.fields) |u_field| {
|
// inline for (union_info.fields) |u_field| {
|
||||||
|
@ -195,7 +189,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
}
|
}
|
||||||
@compileError("Unable to parse into untagged union '" ++ @typeName(T) ++ "'");
|
@compileError("Unable to parse into untagged union '" ++ @typeName(T) ++ "'");
|
||||||
},
|
},
|
||||||
.@"struct" => |struct_info| {
|
.Struct => |struct_info| {
|
||||||
var r: T = undefined;
|
var r: T = undefined;
|
||||||
var fields_seen = [_]bool{false} ** struct_info.fields.len;
|
var fields_seen = [_]bool{false} ** struct_info.fields.len;
|
||||||
var fields_set: u64 = 0;
|
var fields_set: u64 = 0;
|
||||||
|
@ -250,7 +244,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
fields_set = fields_set + 1;
|
fields_set = fields_set + 1;
|
||||||
found_value = true;
|
found_value = true;
|
||||||
}
|
}
|
||||||
if (@typeInfo(field.type) == .optional) {
|
if (@typeInfo(field.type) == .Optional) {
|
||||||
// Test "compiler assertion failure 2"
|
// Test "compiler assertion failure 2"
|
||||||
// Zig compiler bug circa 0.9.0. Using "and !found_value"
|
// Zig compiler bug circa 0.9.0. Using "and !found_value"
|
||||||
// in the if statement above will trigger assertion failure
|
// in the if statement above will trigger assertion failure
|
||||||
|
@ -275,7 +269,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
return error.FieldElementMismatch; // see fields_seen for details
|
return error.FieldElementMismatch; // see fields_seen for details
|
||||||
return r;
|
return r;
|
||||||
},
|
},
|
||||||
.array => //|array_info| {
|
.Array => //|array_info| {
|
||||||
return error.ArrayNotImplemented,
|
return error.ArrayNotImplemented,
|
||||||
// switch (token) {
|
// switch (token) {
|
||||||
// .ArrayBegin => {
|
// .ArrayBegin => {
|
||||||
|
@ -310,16 +304,16 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
// else => return error.UnexpectedToken,
|
// else => return error.UnexpectedToken,
|
||||||
// }
|
// }
|
||||||
// },
|
// },
|
||||||
.pointer => |ptr_info| {
|
.Pointer => |ptr_info| {
|
||||||
const allocator = options.allocator orelse return error.AllocatorRequired;
|
const allocator = options.allocator orelse return error.AllocatorRequired;
|
||||||
switch (ptr_info.size) {
|
switch (ptr_info.size) {
|
||||||
.one => {
|
.One => {
|
||||||
const r: T = try allocator.create(ptr_info.child);
|
const r: T = try allocator.create(ptr_info.child);
|
||||||
errdefer allocator.free(r);
|
errdefer allocator.free(r);
|
||||||
r.* = try parseInternal(ptr_info.child, element, options);
|
r.* = try parseInternal(ptr_info.child, element, options);
|
||||||
return r;
|
return r;
|
||||||
},
|
},
|
||||||
.slice => {
|
.Slice => {
|
||||||
// TODO: Detect and deal with arrays. This will require two
|
// TODO: Detect and deal with arrays. This will require two
|
||||||
// passes through the element children - one to
|
// passes through the element children - one to
|
||||||
// determine if it is an array, one to parse the elements
|
// determine if it is an array, one to parse the elements
|
||||||
|
@ -348,10 +342,10 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
}
|
}
|
||||||
return try allocator.dupe(u8, element.children.items[0].CharData);
|
return try allocator.dupe(u8, element.children.items[0].CharData);
|
||||||
},
|
},
|
||||||
.many => {
|
.Many => {
|
||||||
return error.ManyPointerSizeNotImplemented;
|
return error.ManyPointerSizeNotImplemented;
|
||||||
},
|
},
|
||||||
.c => {
|
.C => {
|
||||||
return error.CPointerSizeNotImplemented;
|
return error.CPointerSizeNotImplemented;
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue