Compare commits

...
Sign in to create a new pull request.

5 commits

Author SHA1 Message Date
ab47cb9deb
better test web server management
Some checks failed
aws-zig mach nominated build / build-zig-nominated-mach-latest (push) Failing after 1m13s
2025-04-17 17:42:26 -07:00
ae8298b18c
update CI based on master
Some checks failed
aws-zig mach nominated build / build-zig-nominated-mach-latest (push) Failing after 3h10m4s
2025-04-16 19:38:49 -07:00
5cb0c3cc88
add test server timeout
Some checks failed
aws-zig mach nominated build / build-zig-nominated-mach-latest (push) Failing after 7s
2025-04-16 19:36:03 -07:00
3e146f143c
sync workflows from master branch to zig-mach branch 2025-03-21 12:48:55 -07:00
838f0ffb96
fix json serialization for null/empty maps 2025-03-21 12:43:07 -07:00
6 changed files with 247 additions and 32 deletions

View file

@ -1,10 +1,8 @@
name: AWS-Zig Build
run-name: ${{ github.actor }} building AWS Zig SDK
on:
push:
branches:
- '*'
- '!zig-develop*'
- 'master'
env:
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
@ -17,11 +15,11 @@ jobs:
# image: alpine:3.15.0
steps:
- name: Check out repository code
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Setup Zig
uses: https://git.lerch.org/lobo/setup-zig@v3
uses: mlugg/setup-zig@v1.2.1
with:
version: 0.13.0
version: 0.14.0
- name: Run tests
run: zig build test --verbose
# Zig build scripts don't have the ability to import depenedencies directly

View file

@ -1,11 +1,11 @@
name: aws-zig mach nominated build
run-name: ${{ github.actor }} building AWS Zig SDK
on:
workflow_dispatch:
schedule:
- cron: '0 12 * * *' # noon UTC, 4AM Pacific
push:
branches:
- 'zig-develop*'
- 'zig-mach'
env:
PKG_PREFIX: nominated-zig
jobs:
@ -22,19 +22,24 @@ jobs:
# image: alpine:3.15.0
steps:
- name: Check out repository code
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
ref: zig-develop
ref: zig-mach
- name: Setup Zig
uses: mlugg/setup-zig@v1.2.1
uses: https://github.com/mlugg/setup-zig@v1.2.1
with:
version: mach-latest
- name: Restore Zig caches
uses: https://github.com/Hanaasagi/zig-action-cache@3954aae427f8b05914e08dfd79f15e1f2e435929
- name: Run gen
run: zig build gen --verbose
- name: Run smoke test
run: zig build smoke-test --verbose
- name: Run full tests
run: zig build test --verbose
run: zig build test --verbose --summary all
# TODO: Zig mach currently tracking behind zig 0.14.0 branch - enable this test after update
# - name: Run tests (release mode)
# run: zig build test -Doptimize=ReleaseSafe --verbose
# Zig package manager expects everything to be inside a directory in the archive,
# which it then strips out on download. So we need to shove everything inside a directory
# the way GitHub/Gitea does for repo archives
@ -67,7 +72,7 @@ jobs:
# - run: echo "URL of foo (1) is ${{ steps.sign.outputs.URL_1 }}"
- name: Publish source code with generated models
run: |
curl --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
curl -s --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
--upload-file ${{ runner.temp }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz \
https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz
- name: Build example

View file

@ -1,11 +1,11 @@
name: aws-zig nightly build
run-name: ${{ github.actor }} building AWS Zig SDK
on:
workflow_dispatch:
schedule:
- cron: '30 12 * * *' # 12:30 UTC, 4:30AM Pacific
push:
branches:
- 'zig-develop*'
- 'zig-develop'
env:
PKG_PREFIX: nightly-zig
jobs:
@ -22,11 +22,11 @@ jobs:
# image: alpine:3.15.0
steps:
- name: Check out repository code
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
ref: zig-develop
- name: Setup Zig
uses: https://git.lerch.org/lobo/setup-zig@v3
uses: mlugg/setup-zig@v1.2.1
with:
version: master
- name: Run tests

View file

@ -0,0 +1,84 @@
name: AWS-Zig Build
on:
workflow_dispatch:
push:
branches:
- 'zig-0.13'
env:
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
jobs:
build-zig-amd64-host:
runs-on: ubuntu-latest
# Need to use the default container with node and all that, so we can
# use JS-based actions like actions/checkout@v3...
# container:
# image: alpine:3.15.0
steps:
- name: Check out repository code
uses: actions/checkout@v4
with:
ref: zig-0.13
- name: Setup Zig
uses: mlugg/setup-zig@v1.2.1
with:
version: 0.13.0
- name: Run tests
run: zig build test --verbose
# Zig build scripts don't have the ability to import depenedencies directly
# (https://github.com/ziglang/zig/issues/18164). We can allow downstream
# build scripts to import aws with a few tweaks, but we can't @import("src/aws.zig")
# until we have our models built. So we have to have the build script
# basically modified, only during packaging, to allow this use case
#
# Zig package manager expects everything to be inside a directory in the archive,
# which it then strips out on download. So we need to shove everything inside a directory
# the way GitHub/Gitea does for repo archives
#
# Also, zig tar process doesn't handle gnu format for long names, nor does it seam to
# handle posix long name semantics cleanly either. ustar works. This
# should be using git archive, but we need our generated code to be part of it
- name: Package source code with generated models
run: |
sed -i 's#// UNCOMMENT AFTER MODEL GEN TO USE IN BUILD SCRIPTS //##' build.zig
tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
--format ustar \
--exclude 'zig-*' \
*
# Something in this PR broke this transform. I don't mind removing it, but
# the PR attempts to handle situations with or without a prefix, but it
# doesn't. I have not yet determined what the problem is, though
# https://github.com/ziglang/zig/pull/19111/files
# --transform 's,^,${{ github.sha }}/,' *
# - name: Sign
# id: sign
# uses: https://git.lerch.org/lobo/action-hsm-sign@v1
# with:
# pin: ${{ secrets.HSM_USER_PIN }}
# files: ???
# public_key: 'https://emil.lerch.org/serverpublic.pem'
# - run: |
# echo "Source 0 should be ./bar: ${{ steps.sign.outputs.SOURCE_0 }}"
# - run: |
# echo "Signature 0 should be ./bar.sig: ${{ steps.sign.outputs.SIG_0 }}"
# - run: echo "URL of bar (0) is ${{ steps.sign.outputs.URL_0 }}"
# - run: |
# echo "Source 1 should be ./foo: ${{ steps.sign.outputs.SOURCE_1 }}"
# - run: |
# echo "Signature 1 should be ./foo.sig: ${{ steps.sign.outputs.SIG_1 }}"
# - run: echo "URL of foo (1) is ${{ steps.sign.outputs.URL_1 }}"
- name: Publish source code with generated models
run: |
curl --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
--upload-file ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}-with-models.tar.gz
- name: Build example
run: ( cd example && zig build ) # Make sure example builds
- name: Notify
uses: https://git.lerch.org/lobo/action-notify-ntfy@v2
if: always()
with:
host: ${{ secrets.NTFY_HOST }}
topic: ${{ secrets.NTFY_TOPIC }}
user: ${{ secrets.NTFY_USER }}
password: ${{ secrets.NTFY_PASSWORD }}

View file

@ -4,7 +4,7 @@ const std = @import("std");
pub fn serializeMap(map: anytype, key: []const u8, options: anytype, out_stream: anytype) !bool {
if (@typeInfo(@TypeOf(map)) == .optional) {
if (map == null)
return true
return false
else
return serializeMapInternal(map.?, key, options, out_stream);
}
@ -12,7 +12,23 @@ pub fn serializeMap(map: anytype, key: []const u8, options: anytype, out_stream:
}
fn serializeMapInternal(map: anytype, key: []const u8, options: anytype, out_stream: anytype) !bool {
if (map.len == 0) return true;
if (map.len == 0) {
var child_options = options;
if (child_options.whitespace) |*child_ws|
child_ws.indent_level += 1;
try out_stream.writeByte('"');
try out_stream.writeAll(key);
_ = try out_stream.write("\":");
if (options.whitespace) |ws| {
if (ws.separator) {
try out_stream.writeByte(' ');
}
}
try out_stream.writeByte('{');
try out_stream.writeByte('}');
return true;
}
// TODO: Map might be [][]struct{key, value} rather than []struct{key, value}
var child_options = options;
if (child_options.whitespace) |*child_ws|

View file

@ -1306,6 +1306,58 @@ test "custom serialization for map objects" {
, buffer.items);
}
test "proper serialization for kms" {
// Github issue #8
// https://github.com/elerch/aws-sdk-for-zig/issues/8
const allocator = std.testing.allocator;
var buffer = std.ArrayList(u8).init(allocator);
defer buffer.deinit();
const req = services.kms.encrypt.Request{
.encryption_algorithm = "SYMMETRIC_DEFAULT",
// Since encryption_context is not null, we expect "{}" to be the value
// here, not "[]", because this is our special AWS map pattern
.encryption_context = &.{},
.key_id = "42",
.plaintext = "foo",
.dry_run = false,
.grant_tokens = &[_][]const u8{},
};
try json.stringify(req, .{ .whitespace = .{} }, buffer.writer());
try std.testing.expectEqualStrings(
\\{
\\ "KeyId": "42",
\\ "Plaintext": "foo",
\\ "EncryptionContext": {},
\\ "GrantTokens": [],
\\ "EncryptionAlgorithm": "SYMMETRIC_DEFAULT",
\\ "DryRun": false
\\}
, buffer.items);
var buffer_null = std.ArrayList(u8).init(allocator);
defer buffer_null.deinit();
const req_null = services.kms.encrypt.Request{
.encryption_algorithm = "SYMMETRIC_DEFAULT",
// Since encryption_context here *IS* null, we expect simply "null" to be the value
.encryption_context = null,
.key_id = "42",
.plaintext = "foo",
.dry_run = false,
.grant_tokens = &[_][]const u8{},
};
try json.stringify(req_null, .{ .whitespace = .{} }, buffer_null.writer());
try std.testing.expectEqualStrings(
\\{
\\ "KeyId": "42",
\\ "Plaintext": "foo",
\\ "EncryptionContext": null,
\\ "GrantTokens": [],
\\ "EncryptionAlgorithm": "SYMMETRIC_DEFAULT",
\\ "DryRun": false
\\}
, buffer_null.items);
}
test "REST Json v1 builds proper queries" {
const allocator = std.testing.allocator;
const svs = Services(.{.lambda}){};
@ -1481,7 +1533,7 @@ const TestOptions = struct {
request_target: []const u8 = undefined,
request_headers: []std.http.Header = undefined,
test_server_runtime_uri: ?[]u8 = null,
server_ready: bool = false,
server_ready: std.Thread.Semaphore = .{},
requests_processed: usize = 0,
const Self = @This();
@ -1536,10 +1588,18 @@ const TestOptions = struct {
return error.HeaderOrValueNotFound;
}
fn waitForReady(self: *Self) !void {
// While this doesn't return an error, we can use !void
// to prepare for addition of timeout
while (!self.server_ready)
std.time.sleep(100);
// Set 10s timeout...this is way longer than necessary
log.debug("waiting for ready", .{});
try self.server_ready.timedWait(1000 * std.time.ns_per_ms);
// var deadline = std.Thread.Futex.Deadline.init(1000 * std.time.ns_per_ms);
// if (self.futex_word.load(.acquire) != 0) return;
// log.debug("futex zero", .{});
// // note that this seems backwards from the documentation...
// deadline.wait(self.futex_word, 1) catch {
// log.err("futex value {d}", .{self.futex_word.load(.acquire)});
// return error.TestServerTimeoutWaitingForReady;
// };
log.debug("the wait is over!", .{});
}
};
@ -1567,8 +1627,9 @@ fn threadMain(options: *TestOptions) !void {
// var aa = arena.allocator();
// We're in control of all requests/responses, so this flag will tell us
// when it's time to shut down
while (options.server_remaining_requests > 0) {
options.server_remaining_requests -= 1;
if (options.server_remaining_requests == 0)
options.server_ready.post(); // This will cause the wait for server to return
while (options.server_remaining_requests > 0) : (options.server_remaining_requests -= 1) {
processRequest(options, &http_server) catch |e| {
log.err("Unexpected error processing request: {any}", .{e});
if (@errorReturnTrace()) |trace| {
@ -1579,12 +1640,13 @@ fn threadMain(options: *TestOptions) !void {
}
fn processRequest(options: *TestOptions, net_server: *std.net.Server) !void {
options.server_ready = true;
errdefer options.server_ready = false;
log.debug(
"tid {d} (server): server waiting to accept. requests remaining: {d}",
.{ std.Thread.getCurrentId(), options.server_remaining_requests + 1 },
.{ std.Thread.getCurrentId(), options.server_remaining_requests },
);
// options.futex_word.store(1, .release);
// errdefer options.futex_word.store(0, .release);
options.server_ready.post();
var connection = try net_server.accept();
defer connection.stream.close();
var read_buffer: [1024 * 16]u8 = undefined;
@ -1603,8 +1665,6 @@ fn processRequest(options: *TestOptions, net_server: *std.net.Server) !void {
}
fn serveRequest(options: *TestOptions, request: *std.http.Server.Request) !void {
options.server_ready = false;
options.requests_processed += 1;
options.request_body = try (try request.reader()).readAllAlloc(options.allocator, std.math.maxInt(usize));
options.request_method = request.head.method;
@ -1674,7 +1734,8 @@ const TestSetup = struct {
// Not sure why we're getting sprayed here, but we have an arena allocator, and this
// is testing, so yolo
awshttp.endpoint_override = self.request_options.test_server_runtime_uri;
log.debug("endpoint override set to {?s}", .{awshttp.endpoint_override});
if (awshttp.endpoint_override == null) return error.TestSetupStartFailure;
std.log.debug("endpoint override set to {?s}", .{awshttp.endpoint_override});
self.creds = aws_auth.Credentials.init(
self.allocator,
try self.allocator.dupe(u8, "ACCESS"),
@ -1692,6 +1753,27 @@ const TestSetup = struct {
}
fn stop(self: *Self) void {
if (self.request_options.server_remaining_requests > 0)
if (test_error_log_enabled)
std.log.err(
"Test server has {d} request(s) remaining to issue! Draining",
.{self.request_options.server_remaining_requests},
)
else
std.log.info(
"Test server has {d} request(s) remaining to issue! Draining",
.{self.request_options.server_remaining_requests},
);
var rr = self.request_options.server_remaining_requests;
while (rr > 0) : (rr -= 1) {
std.log.debug("rr: {d}", .{self.request_options.server_remaining_requests});
// We need to drain all remaining requests, otherwise the server
// will hang indefinitely
var client = std.http.Client{ .allocator = self.allocator };
defer client.deinit();
_ = client.fetch(.{ .location = .{ .url = self.request_options.test_server_runtime_uri.? } }) catch unreachable;
}
self.server_thread.join();
}
@ -2337,3 +2419,33 @@ test "json_1_1: ECR timestamps" {
// try std.testing.expectEqual(@as(i64, 1.73859841557E9), call.response.authorization_data.?[0].expires_at.?);
try std.testing.expectEqual(@as(f128, 1.7385984915E9), call.response.authorization_data.?[0].expires_at.?);
}
var test_error_log_enabled = true;
test "test server timeout works" {
// const old = std.testing.log_level;
// defer std.testing.log_level = old;
// std.testing.log_level = .debug;
// defer std.testing.log_level = old;
// std.testing.log_level = .debug;
test_error_log_enabled = false;
defer test_error_log_enabled = true;
std.log.debug("test start", .{});
const allocator = std.testing.allocator;
var test_harness = TestSetup.init(.{
.allocator = allocator,
.server_response =
\\{}
,
.server_response_headers = &.{
.{ .name = "Content-Type", .value = "application/json" },
.{ .name = "x-amzn-RequestId", .value = "QBI72OUIN8U9M9AG6PCSADJL4JVV4KQNSO5AEMVJF66Q9ASUAAJG" },
},
});
defer test_harness.deinit();
defer test_harness.creds.deinit(); // Usually this gets done during the call,
// but we're purposely not making a call
// here, so we have to deinit() manually
_ = try test_harness.start();
std.log.debug("harness started", .{});
test_harness.stop();
std.log.debug("test complete", .{});
}