dig out aws crt. project no longer builds
Some checks failed
continuous-integration/drone/push Build is failing

This commit is contained in:
Emil Lerch 2022-01-10 21:49:59 -08:00
parent 1e8ed763ce
commit eb449eabb0
Signed by: lobo
GPG Key ID: A7B62D657EF764F8
9 changed files with 356 additions and 971 deletions

1
.gitignore vendored
View File

@ -7,3 +7,4 @@ service_manifest.zig
demo
src/models/
smithy/zig-out/
libs/

36
CopyStep.zig Normal file
View File

@ -0,0 +1,36 @@
const std = @import("std");
const CopyStep = @This();
step: std.build.Step,
builder: *std.build.Builder,
from_path: []const u8 = null,
to_path: []const u8 = null,
pub fn create(
b: *std.build.Builder,
from_path_relative: []const u8,
to_path_relative: []const u8,
) *CopyStep {
var result = b.allocator.create(CopyStep) catch @panic("memory");
result.* = CopyStep{
.step = std.build.Step.init(.custom, "copy a file", b.allocator, make),
.builder = b,
.from_path = std.fs.path.resolve(b.allocator, &[_][]const u8{
b.build_root,
from_path_relative,
}) catch @panic("memory"),
.to_path = std.fs.path.resolve(b.allocator, &[_][]const u8{
b.build_root,
to_path_relative,
}) catch @panic("memory"),
};
return result;
}
fn make(step: *std.build.Step) !void {
const self = @fieldParentPtr(CopyStep, "step", step);
std.fs.copyFileAbsolute(self.from_path, self.to_path, .{}) catch |e| {
std.log.err("Error copying {s} to {s}: {s}", .{ self.from_path, self.to_path, e });
std.os.exit(1);
};
}

View File

@ -1,110 +0,0 @@
# We are looking for a static build, so we need to be on a musl system
# Zig uses clang, so for best compatibility, everything should be built
# using that compiler
# Establish a base container with build tools common to most projects
FROM alpine:3.13 AS base
# gcc gets us libgcc.a, even though the build should be using clang
RUN apk add --no-cache clang git cmake make lld musl-dev gcc && \
rm /usr/bin/ld && \
ln -s /usr/bin/ld.lld /usr/bin/ld && rm /usr/bin/gcc # just to be sure
FROM base AS common
RUN git clone --depth 1 -b v0.5.2 https://github.com/awslabs/aws-c-common && \
mkdir aws-c-common-build && cd aws-c-common-build && \
cmake ../aws-c-common && \
make -j12 && make test && make install
RUN tar -czf aws-c-common-clang.tgz /usr/local/*
# The only tags currently on the repo are from 9/2020 and don't install
# anything, so we'll use current head of main branch (d60b60e)
FROM base AS awslc
RUN apk add --no-cache perl go g++ linux-headers && rm /usr/bin/g++ && rm /usr/bin/c++ && \
git clone --depth 1000 https://github.com/awslabs/aws-lc && cd aws-lc && \
git reset d60b60e --hard && cd .. && \
cmake -S aws-lc -B aws-lc/build -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_PREFIX_PATH=/usr/local -DCMAKE_INSTALL_PREFIX=/usr/local && \
cmake --build aws-lc/build --config RelWithDebInfo --target install
RUN tar -czf aws-lc-clang.tgz /usr/local/*
FROM base AS s2n
ENV S2N_LIBCRYPTO=awslc
COPY --from=awslc /aws-lc-clang.tgz /
RUN git clone --depth 1 -b v1.0.5 https://github.com/aws/s2n-tls && \
tar -xzf aws-lc-clang.tgz && \
mkdir s2n-build && cd s2n-build && \
cmake ../s2n-tls && \
make -j12 && make install
RUN tar -czf s2n-clang.tgz /usr/local/*
FROM base AS cal
COPY --from=awslc /aws-lc-clang.tgz /
COPY --from=common /aws-c-common-clang.tgz /
# RUN git clone --depth 1 -b v0.5.5 https://github.com/awslabs/aws-c-cal && \
RUN git clone --depth 1 -b static-musl-builds https://github.com/elerch/aws-c-cal && \
tar -xzf aws-c-common-clang.tgz && \
tar -xzf aws-lc-clang.tgz && \
mkdir cal-build && cd cal-build && \
cmake -DCMAKE_MODULE_PATH=/usr/local/lib64/cmake ../aws-c-cal && \
make -j12 && make install
# No make test:
# 40 - ecdsa_p384_test_key_gen_export (Failed)
RUN tar -czf aws-c-cal-clang.tgz /usr/local/*
FROM base AS compression
COPY --from=common /aws-c-common-clang.tgz /
RUN git clone --depth 1 -b v0.2.10 https://github.com/awslabs/aws-c-compression && \
tar -xzf aws-c-common-clang.tgz && \
mkdir compression-build && cd compression-build && \
cmake -DCMAKE_MODULE_PATH=/usr/local/lib64/cmake ../aws-c-compression && \
make -j12 && make test && make install
RUN tar -czf aws-c-compression-clang.tgz /usr/local/*
FROM base AS io
# Cal includes common and openssl
COPY --from=cal /aws-c-cal-clang.tgz /
COPY --from=s2n /s2n-clang.tgz /
RUN git clone --depth 1 -b v0.9.1 https://github.com/awslabs/aws-c-io && \
tar -xzf s2n-clang.tgz && \
tar -xzf aws-c-cal-clang.tgz && \
mkdir io-build && cd io-build && \
cmake -DCMAKE_MODULE_PATH=/usr/local/lib64/cmake ../aws-c-io && \
make -j12 && make install
RUN tar -czf aws-c-io-clang.tgz /usr/local/*
FROM base AS http
# Cal includes common and openssl
# 2 test failures on musl - both "download medium file"
COPY --from=io /aws-c-io-clang.tgz /
COPY --from=compression /aws-c-compression-clang.tgz /
# RUN git clone --depth 1 -b v0.5.19 https://github.com/awslabs/aws-c-http && \
RUN git clone --depth 1 -b v0.6.1 https://github.com/awslabs/aws-c-http && \
tar -xzf aws-c-io-clang.tgz && \
tar -xzf aws-c-compression-clang.tgz && \
mkdir http-build && cd http-build && \
cmake -DCMAKE_MODULE_PATH=/usr/local/lib64/cmake ../aws-c-http && \
make -j12 && make install
RUN tar -czf aws-c-http-clang.tgz /usr/local/*
FROM base AS auth
# http should have all other dependencies
COPY --from=http /aws-c-http-clang.tgz /
RUN git clone --depth 1 -b v0.5.0 https://github.com/awslabs/aws-c-auth && \
tar -xzf aws-c-http-clang.tgz && \
mkdir auth-build && cd auth-build && \
cmake -DCMAKE_MODULE_PATH=/usr/local/lib64/cmake ../aws-c-auth && \
make -j12 && make install # chunked_signing_test fails
RUN tar -czf aws-c-auth-clang.tgz /usr/local/*
FROM alpine:3.13 as final
COPY --from=auth /aws-c-auth-clang.tgz /
ADD https://ziglang.org/download/0.9.0/zig-linux-x86_64-0.9.0.tar.xz /
RUN tar -xzf /aws-c-auth-clang.tgz && mkdir /src && tar -C /usr/local -xf zig-linux* && \
ln -s /usr/local/zig-linux*/zig /usr/local/bin/zig

206
GitRepoStep.zig Normal file
View File

@ -0,0 +1,206 @@
//! Publish Date: 2021_10_17
//! This file is hosted at github.com/marler8997/zig-build-repos and is meant to be copied
//! to projects that use it.
const std = @import("std");
const GitRepoStep = @This();
pub const ShaCheck = enum {
none,
warn,
err,
pub fn reportFail(self: ShaCheck, comptime fmt: []const u8, args: anytype) void {
switch (self) {
.none => unreachable,
.warn => std.log.warn(fmt, args),
.err => {
std.log.err(fmt, args);
std.os.exit(0xff);
},
}
}
};
step: std.build.Step,
builder: *std.build.Builder,
url: []const u8,
name: []const u8,
branch: ?[]const u8 = null,
sha: []const u8,
path: []const u8 = null,
sha_check: ShaCheck = .warn,
fetch_enabled: bool,
var cached_default_fetch_option: ?bool = null;
pub fn defaultFetchOption(b: *std.build.Builder) bool {
if (cached_default_fetch_option) |_| {} else {
cached_default_fetch_option = if (b.option(bool, "fetch", "automatically fetch network resources")) |o| o else false;
}
return cached_default_fetch_option.?;
}
pub fn create(b: *std.build.Builder, opt: struct {
url: []const u8,
branch: ?[]const u8 = null,
sha: []const u8,
path: ?[]const u8 = null,
sha_check: ShaCheck = .warn,
fetch_enabled: ?bool = null,
}) *GitRepoStep {
var result = b.allocator.create(GitRepoStep) catch @panic("memory");
const name = std.fs.path.basename(opt.url);
result.* = GitRepoStep{
.step = std.build.Step.init(.custom, "clone a git repository", b.allocator, make),
.builder = b,
.url = opt.url,
.name = name,
.branch = opt.branch,
.sha = opt.sha,
.path = if (opt.path) |p| (b.allocator.dupe(u8, p) catch @panic("memory")) else (std.fs.path.resolve(b.allocator, &[_][]const u8{
b.build_root,
"libs",
name,
})) catch @panic("memory"),
.sha_check = opt.sha_check,
.fetch_enabled = if (opt.fetch_enabled) |fe| fe else defaultFetchOption(b),
};
return result;
}
// TODO: this should be included in std.build, it helps find bugs in build files
fn hasDependency(step: *const std.build.Step, dep_candidate: *const std.build.Step) bool {
for (step.dependencies.items) |dep| {
// TODO: should probably use step.loop_flag to prevent infinite recursion
// when a circular reference is encountered, or maybe keep track of
// the steps encounterd with a hash set
if (dep == dep_candidate or hasDependency(dep, dep_candidate))
return true;
}
return false;
}
fn make(step: *std.build.Step) !void {
const self = @fieldParentPtr(GitRepoStep, "step", step);
std.fs.accessAbsolute(self.path, std.fs.File.OpenFlags{ .read = true }) catch {
const branch_args = if (self.branch) |b| &[2][]const u8{ " -b ", b } else &[2][]const u8{ "", "" };
if (!self.fetch_enabled) {
std.debug.print("Error: git repository '{s}' does not exist\n", .{self.path});
std.debug.print(" Use -Dfetch to download it automatically, or run the following to clone it:\n", .{});
std.debug.print(" git clone {s}{s}{s} {s} && git -C {3s} checkout {s} -b for_ziget\n", .{ self.url, branch_args[0], branch_args[1], self.path, self.sha });
std.os.exit(1);
}
{
var args = std.ArrayList([]const u8).init(self.builder.allocator);
defer args.deinit();
try args.append("git");
try args.append("clone");
try args.append("--recurse-submodules");
try args.append(self.url);
// TODO: clone it to a temporary location in case of failure
// also, remove that temporary location before running
try args.append(self.path);
if (self.branch) |branch| {
try args.append("-b");
try args.append(branch);
}
try run(self.builder, args.items);
}
try run(self.builder, &[_][]const u8{
"git",
"-C",
self.path,
"checkout",
self.sha,
"-b",
"fordep",
});
};
try self.checkSha();
}
fn checkSha(self: GitRepoStep) !void {
if (self.sha_check == .none)
return;
const result: union(enum) { failed: anyerror, output: []const u8 } = blk: {
const result = std.ChildProcess.exec(.{
.allocator = self.builder.allocator,
.argv = &[_][]const u8{
"git",
"-C",
self.path,
"rev-parse",
"HEAD",
},
.cwd = self.builder.build_root,
.env_map = self.builder.env_map,
}) catch |e| break :blk .{ .failed = e };
try std.io.getStdErr().writer().writeAll(result.stderr);
switch (result.term) {
.Exited => |code| {
if (code == 0) break :blk .{ .output = result.stdout };
break :blk .{ .failed = error.GitProcessNonZeroExit };
},
.Signal => break :blk .{ .failed = error.GitProcessFailedWithSignal },
.Stopped => break :blk .{ .failed = error.GitProcessWasStopped },
.Unknown => break :blk .{ .failed = error.GitProcessFailed },
}
};
switch (result) {
.failed => |err| {
return self.sha_check.reportFail("failed to retreive sha for repository '{s}': {s}", .{ self.name, @errorName(err) });
},
.output => |output| {
if (!std.mem.eql(u8, std.mem.trimRight(u8, output, "\n\r"), self.sha)) {
return self.sha_check.reportFail("repository '{s}' sha does not match\nexpected: {s}\nactual : {s}\n", .{ self.name, self.sha, output });
}
},
}
}
fn run(builder: *std.build.Builder, argv: []const []const u8) !void {
{
var msg = std.ArrayList(u8).init(builder.allocator);
defer msg.deinit();
const writer = msg.writer();
var prefix: []const u8 = "";
for (argv) |arg| {
try writer.print("{s}\"{s}\"", .{ prefix, arg });
prefix = " ";
}
std.log.info("[RUN] {s}", .{msg.items});
}
const child = try std.ChildProcess.init(argv, builder.allocator);
defer child.deinit();
child.stdin_behavior = .Ignore;
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
child.cwd = builder.build_root;
child.env_map = builder.env_map;
try child.spawn();
const result = try child.wait();
switch (result) {
.Exited => |code| if (code != 0) {
std.log.err("git clone failed with exit code {}", .{code});
std.os.exit(0xff);
},
else => {
std.log.err("git clone failed with: {}", .{result});
std.os.exit(0xff);
},
}
}
// Get's the repository path and also verifies that the step requesting the path
// is dependent on this step.
pub fn getPath(self: *const GitRepoStep, who_wants_to_know: *const std.build.Step) []const u8 {
if (!hasDependency(who_wants_to_know, &self.step))
@panic("a step called GitRepoStep.getPath but has not added it as a dependency");
return self.path;
}

100
build.zig
View File

@ -1,8 +1,16 @@
const std = @import("std");
const builtin = @import("builtin");
const Builder = @import("std").build.Builder;
const GitRepoStep = @import("GitRepoStep.zig");
const CopyStep = @import("CopyStep.zig");
pub fn build(b: *Builder) !void {
const zfetch_repo = GitRepoStep.create(b, .{
.url = "https://github.com/truemedian/zfetch",
// .branch = "0.1.10", // branch also takes tags. Tag 0.1.10 isn't quite new enough
.sha = "271cab5da4d12c8f08e67aa0cd5268da100e52f1",
});
// Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options
@ -17,46 +25,30 @@ pub fn build(b: *Builder) !void {
// https://github.com/ziglang/zig/issues/855
exe.addPackagePath("smithy", "smithy/src/smithy.zig");
// This bitfield workaround will end up requiring a bunch of headers that
// currently mean building in the docker container is the best way to build
// TODO: Determine if it's a good idea to copy these files out of our
// docker container to the local fs so we can just build even outside
// the container. And maybe, just maybe these even get committed to
// source control?
exe.addCSourceFile("src/bitfield-workaround.c", &[_][]const u8{"-std=c99"});
const c_include_dirs = .{
"./src/",
"/usr/local/include",
};
inline for (c_include_dirs) |dir|
exe.addIncludeDir(dir);
const dependent_objects = .{
"/usr/local/lib64/libs2n.a",
"/usr/local/lib64/libcrypto.a",
"/usr/local/lib64/libssl.a",
"/usr/local/lib64/libaws-c-auth.a",
"/usr/local/lib64/libaws-c-cal.a",
"/usr/local/lib64/libaws-c-common.a",
"/usr/local/lib64/libaws-c-compression.a",
"/usr/local/lib64/libaws-c-http.a",
"/usr/local/lib64/libaws-c-io.a",
};
inline for (dependent_objects) |obj|
exe.addObjectFile(obj);
exe.linkSystemLibrary("c");
exe.setTarget(target);
exe.setBuildMode(mode);
exe.override_dest_dir = .{ .custom = ".." };
exe.linkage = .static;
// TODO: Strip doesn't actually fully strip the executable. If we're on
// linux we can run strip on the result, probably at the expense
// of busting cache logic
const is_strip = b.option(bool, "strip", "strip exe [true]") orelse true;
exe.strip = is_strip;
exe.strip = b.option(bool, "strip", "strip exe [true]") orelse true;
const copy_deps = CopyStep.create(
b,
"zfetch_deps.zig",
"libs/zfetch/deps.zig",
);
copy_deps.step.dependOn(&zfetch_repo.step);
exe.step.dependOn(&copy_deps.step);
// This import won't work unless we're already cloned. The way around
// this is to have a multi-stage build process, but that's a lot of work.
// Instead, I've copied the addPackage and tweaked it for the build prefix
// so we'll have to keep that in sync with upstream
// const zfetch = @import("libs/zfetch/build.zig");
exe.addPackage(getZfetchPackage(b, "libs/zfetch") catch unreachable);
const run_cmd = exe.run();
run_cmd.step.dependOn(b.getInstallStep());
@ -84,10 +76,14 @@ pub fn build(b: *Builder) !void {
}
}
// TODO: Support > linux
if (builtin.os.tag == .linux) {
if (target.getOs().tag == .linux) {
// TODO: Support > linux with RunStep
// std.build.RunStep.create(null,null).cwd(std.fs.path.resolve(b.build_root, "codegen")).addArgs(...)
const codegen = b.step("gen", "Generate zig service code from smithy models");
codegen.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", "cd codegen && zig build" }).step);
// This can probably be triggered instead by GitRepoStep cloning the repo
// with models
// Since codegen binary is built every time, if it's newer than our
// service manifest we know it needs to be regenerated. So this step
// will remove the service manifest if codegen has been touched, thereby
@ -110,3 +106,39 @@ pub fn build(b: *Builder) !void {
exe.install();
}
fn getDependency(comptime lib_prefix: []const u8, comptime name: []const u8, comptime root: []const u8) !std.build.Pkg {
const path = lib_prefix ++ "/libs/" ++ name ++ "/" ++ root;
// We don't actually care if the dependency has been checked out, as
// GitRepoStep will handle that for us
// Make sure that the dependency has been checked out.
// std.fs.cwd().access(path, .{}) catch |err| switch (err) {
// error.FileNotFound => {
// std.log.err("zfetch: dependency '{s}' not checked out", .{name});
//
// return err;
// },
// else => return err,
// };
return std.build.Pkg{
.name = name,
.path = .{ .path = path },
};
}
pub fn getZfetchPackage(b: *std.build.Builder, comptime lib_prefix: []const u8) !std.build.Pkg {
var dependencies = b.allocator.alloc(std.build.Pkg, 4) catch unreachable;
dependencies[0] = try getDependency(lib_prefix, "iguanaTLS", "src/main.zig");
dependencies[1] = try getDependency(lib_prefix, "network", "network.zig");
dependencies[2] = try getDependency(lib_prefix, "uri", "uri.zig");
dependencies[3] = try getDependency(lib_prefix, "hzzp", "src/main.zig");
return std.build.Pkg{
.name = "zfetch",
.path = .{ .path = lib_prefix ++ "/src/main.zig" },
.dependencies = dependencies,
};
}

View File

@ -8,25 +8,6 @@
//! const result = client.callApi (or client.makeRequest)
//! defer result.deinit();
const std = @import("std");
const c = @cImport({
@cInclude("bitfield-workaround.h");
@cInclude("aws/common/allocator.h");
@cInclude("aws/common/error.h");
@cInclude("aws/common/string.h");
@cInclude("aws/auth/auth.h");
@cInclude("aws/auth/credentials.h");
@cInclude("aws/auth/signable.h");
@cInclude("aws/auth/signing_config.h");
@cInclude("aws/auth/signing_result.h");
@cInclude("aws/auth/signing.h");
@cInclude("aws/http/connection.h");
@cInclude("aws/http/request_response.h");
@cInclude("aws/io/channel_bootstrap.h");
@cInclude("aws/io/tls_channel_handler.h");
@cInclude("aws/io/event_loop.h");
@cInclude("aws/io/socket.h");
@cInclude("aws/io/stream.h");
});
const CN_NORTH_1_HASH = std.hash_map.hashString("cn-north-1");
const CN_NORTHWEST_1_HASH = std.hash_map.hashString("cn-northwest-1");
@ -35,18 +16,6 @@ const US_ISOB_EAST_1_HASH = std.hash_map.hashString("us-isob-east-1");
const httplog = std.log.scoped(.awshttp);
// Variables that can be re-used globally
var reference_count: u32 = 0;
var c_allocator: ?*c.aws_allocator = null;
var c_logger: c.aws_logger = .{
.vtable = null,
.allocator = null,
.p_impl = null,
};
// tls stuff initialized on demand, then destroyed in cDeinit
var tls_ctx_options: ?*c.aws_tls_ctx_options = null;
var tls_ctx: ?*c.aws_tls_ctx = null;
pub const AwsError = error{
AddHeaderError,
AlpnError,
@ -117,133 +86,21 @@ const EndPoint = struct {
}
};
fn cInit(_: std.mem.Allocator) void {
// TODO: what happens if we actually get an allocator?
httplog.debug("auth init", .{});
c_allocator = c.aws_default_allocator();
// TODO: Grab logging level from environment
// See levels here:
// https://github.com/awslabs/aws-c-common/blob/ce964ca459759e685547e8aa95cada50fd078eeb/include/aws/common/logging.h#L13-L19
// We set this to FATAL mostly because we're handling errors for the most
// part here in zig-land. We would therefore set up for something like
// AWS_LL_WARN, but the auth library is bubbling up an AWS_LL_ERROR
// level message about not being able to open an aws config file. This
// could be an error, but we don't need to panic people if configuration
// is done via environment variables
var logger_options = c.aws_logger_standard_options{
// .level = .AWS_LL_WARN,
// .level = .AWS_LL_INFO,
// .level = .AWS_LL_DEBUG,
// .level = .AWS_LL_TRACE,
.level = 1, //.AWS_LL_FATAL, // https://github.com/awslabs/aws-c-common/blob/057746b2e094f4b7a31743d8ba5a9fd0155f69f3/include/aws/common/logging.h#L33
.file = c.get_std_err(),
.filename = null,
};
const rc = c.aws_logger_init_standard(&c_logger, c_allocator, &logger_options);
if (rc != c.AWS_OP_SUCCESS) {
std.debug.panic("Could not configure logging: {s}", .{c.aws_error_debug_str(c.aws_last_error())});
}
c.aws_logger_set(&c_logger);
// auth could use http library, so we'll init http, then auth
// TODO: determine deallocation of ca_path
c.aws_http_library_init(c_allocator);
c.aws_auth_library_init(c_allocator);
}
fn cDeinit() void { // probably the wrong name
if (tls_ctx) |ctx| {
httplog.debug("tls_ctx deinit start", .{});
c.aws_tls_ctx_release(ctx);
httplog.debug("tls_ctx deinit end", .{});
}
if (tls_ctx_options != null) {
// See:
// https://github.com/awslabs/aws-c-io/blob/6c7bae503961545c5e99c6c836c4b37749cfc4ad/source/tls_channel_handler.c#L25
//
// The way this structure is constructed (setupTls/makeRequest), the only
// thing we need to clean up here is the alpn_list, which is set by
// aws_tls_ctx_options_set_alpn_list to a constant value. My guess here
// is that memory is not allocated - the pointer is looking at the program data.
// So the pointer is non-zero, but cannot be deallocated, and we segfault
httplog.debug("tls_ctx_options deinit unnecessary - skipping", .{});
// log.debug("tls_ctx_options deinit start. alpn_list: {*}", .{opts.alpn_list});
// c.aws_string_destroy(opts.alpn_list);
// c.aws_tls_ctx_options_clean_up(opts);
// log.debug("tls_ctx_options deinit end", .{});
}
c.aws_http_library_clean_up();
httplog.debug("auth clean up start", .{});
c.aws_auth_library_clean_up();
httplog.debug("auth clean up complete", .{});
}
pub const AwsHttp = struct {
allocator: std.mem.Allocator,
bootstrap: *c.aws_client_bootstrap,
resolver: *c.aws_host_resolver,
eventLoopGroup: *c.aws_event_loop_group,
credentialsProvider: *c.aws_credentials_provider,
const Self = @This();
pub fn init(allocator: std.mem.Allocator) Self {
if (reference_count == 0) cInit(allocator);
reference_count += 1;
httplog.debug("auth ref count: {}", .{reference_count});
// TODO; determine appropriate lifetime for the bootstrap and credentials'
// provider
// Mostly stolen from aws_c_auth/credentials_tests.c
const el_group = c.aws_event_loop_group_new_default(c_allocator, 1, null);
var resolver_options = c.aws_host_resolver_default_options{
.el_group = el_group,
.max_entries = 8,
.shutdown_options = null, // not set in test
.system_clock_override_fn = null, // not set in test
};
const resolver = c.aws_host_resolver_new_default(c_allocator, &resolver_options);
const bootstrap_options = c.aws_client_bootstrap_options{
.host_resolver = resolver,
.on_shutdown_complete = null, // was set in test
.host_resolution_config = null,
.user_data = null,
.event_loop_group = el_group,
};
const bootstrap = c.aws_client_bootstrap_new(c_allocator, &bootstrap_options);
const provider_chain_options = c.aws_credentials_provider_chain_default_options{
.bootstrap = bootstrap,
.shutdown_options = c.aws_credentials_provider_shutdown_options{
.shutdown_callback = null, // was set on test
.shutdown_user_data = null,
},
};
return .{
.allocator = allocator,
.bootstrap = bootstrap,
.resolver = resolver,
.eventLoopGroup = el_group,
.credentialsProvider = c.aws_credentials_provider_new_chain_default(c_allocator, &provider_chain_options),
// .credentialsProvider = // creds provider could be useful
};
}
pub fn deinit(self: *AwsHttp) void {
if (reference_count > 0)
reference_count -= 1;
httplog.debug("deinit: auth ref count: {}", .{reference_count});
c.aws_credentials_provider_release(self.credentialsProvider);
// TODO: Wait for provider shutdown? https://github.com/awslabs/aws-c-auth/blob/c394e30808816a8edaab712e77f79f480c911d3a/tests/credentials_tests.c#L197
c.aws_client_bootstrap_release(self.bootstrap);
c.aws_host_resolver_release(self.resolver);
c.aws_event_loop_group_release(self.eventLoopGroup);
if (reference_count == 0) {
cDeinit();
httplog.debug("Deinit complete", .{});
}
}
/// callApi allows the calling of AWS APIs through a higher-level interface.
/// It will calculate the appropriate endpoint and action parameters for the
@ -279,185 +136,28 @@ pub const AwsHttp = struct {
/// HttpResult currently contains the body only. The addition of Headers
/// and return code would be a relatively minor change
pub fn makeRequest(self: Self, endpoint: EndPoint, request: HttpRequest, signing_options: ?SigningOptions) !HttpResult {
// Since we're going to pass these into C-land, we need to make sure
// our inputs have sentinals
const method_z = try self.allocator.dupeZ(u8, request.method);
defer self.allocator.free(method_z);
// Path contains both path and query
const path_z = try std.fmt.allocPrintZ(self.allocator, "{s}{s}", .{ request.path, request.query });
defer self.allocator.free(path_z);
const body_z = try self.allocator.dupeZ(u8, request.body);
defer self.allocator.free(body_z);
httplog.debug("Path: {s}", .{path_z});
httplog.debug("Path: {s}", .{request.path});
httplog.debug("Query: {s}", .{request.query});
httplog.debug("Method: {s}", .{request.method});
httplog.debug("body length: {d}", .{request.body.len});
httplog.debug("Body\n====\n{s}\n====", .{request.body});
// TODO: Try to re-encapsulate this
// var http_request = try createRequest(method, path, body);
// TODO: Likely this should be encapsulated more
var http_request = c.aws_http_message_new_request(c_allocator);
defer c.aws_http_message_release(http_request);
if (c.aws_http_message_set_request_method(http_request, c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, method_z))) != c.AWS_OP_SUCCESS)
return AwsError.SetRequestMethodError;
if (c.aws_http_message_set_request_path(http_request, c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, path_z))) != c.AWS_OP_SUCCESS)
return AwsError.SetRequestPathError;
const body_cursor = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, body_z));
const request_body = c.aws_input_stream_new_from_cursor(c_allocator, &body_cursor);
defer c.aws_input_stream_destroy(request_body);
if (request.body.len > 0)
c.aws_http_message_set_body_stream(http_request, request_body);
// End CreateRequest. This should return a struct with a deinit function that can do
// destroys, etc
var context = RequestContext{
.allocator = self.allocator,
};
defer context.deinit();
var tls_connection_options: ?*c.aws_tls_connection_options = null;
const host = try self.allocator.dupeZ(u8, endpoint.host);
defer self.allocator.free(host);
try self.addHeaders(http_request.?, host, request.body, request.content_type, request.headers);
if (std.mem.eql(u8, endpoint.scheme, "https")) {
// TODO: Figure out why this needs to be inline vs function call
// tls_connection_options = try self.setupTls(host);
if (tls_ctx_options == null) {
httplog.debug("Setting up tls options", .{});
// Language change - translate_c no longer translates c enums
// to zig enums as there were too many edge cases:
// https://github.com/ziglang/zig/issues/2115#issuecomment-827968279
var opts: c.aws_tls_ctx_options = .{
.allocator = c_allocator,
.minimum_tls_version = 128, // @intToEnum(c.aws_tls_versions, c.AWS_IO_TLS_VER_SYS_DEFAULTS), // https://github.com/awslabs/aws-c-io/blob/6c7bae503961545c5e99c6c836c4b37749cfc4ad/include/aws/io/tls_channel_handler.h#L21
.cipher_pref = 0, // @intToEnum(c.aws_tls_cipher_pref, c.AWS_IO_TLS_CIPHER_PREF_SYSTEM_DEFAULT), // https://github.com/awslabs/aws-c-io/blob/6c7bae503961545c5e99c6c836c4b37749cfc4ad/include/aws/io/tls_channel_handler.h#L25
.ca_file = c.aws_byte_buf_from_c_str(""),
.ca_path = c.aws_string_new_from_c_str(c_allocator, ""),
.alpn_list = null,
.certificate = c.aws_byte_buf_from_c_str(""),
.private_key = c.aws_byte_buf_from_c_str(""),
.max_fragment_size = 0,
.verify_peer = true,
};
tls_ctx_options = &opts;
c.aws_tls_ctx_options_init_default_client(tls_ctx_options.?, c_allocator);
// h2;http/1.1
if (c.aws_tls_ctx_options_set_alpn_list(tls_ctx_options, "http/1.1") != c.AWS_OP_SUCCESS) {
httplog.err("Failed to load alpn list with error {s}.", .{c.aws_error_debug_str(c.aws_last_error())});
return AwsError.AlpnError;
}
tls_ctx = c.aws_tls_client_ctx_new(c_allocator, tls_ctx_options.?);
if (tls_ctx == null) {
std.debug.panic("Failed to initialize TLS context with error {s}.", .{c.aws_error_debug_str(c.aws_last_error())});
}
httplog.debug("tls options setup applied", .{});
}
var conn_opts = c.aws_tls_connection_options{
.alpn_list = null,
.server_name = null,
.on_negotiation_result = null,
.on_data_read = null,
.on_error = null,
.user_data = null,
.ctx = null,
.advertise_alpn_message = false,
.timeout_ms = 0,
};
tls_connection_options = &conn_opts;
c.aws_tls_connection_options_init_from_ctx(tls_connection_options, tls_ctx);
var host_var = host;
var host_cur = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, host_var));
if (c.aws_tls_connection_options_set_server_name(tls_connection_options, c_allocator, &host_cur) != c.AWS_OP_SUCCESS) {
httplog.err("Failed to set servername with error {s}.", .{c.aws_error_debug_str(c.aws_last_error())});
return AwsError.TlsError;
}
}
if (signing_options) |opts| try self.signRequest(http_request.?, opts);
const socket_options = c.aws_socket_options{
.type = 0, // @intToEnum(c.aws_socket_type, c.AWS_SOCKET_STREAM), // https://github.com/awslabs/aws-c-io/blob/6c7bae503961545c5e99c6c836c4b37749cfc4ad/include/aws/io/socket.h#L24
.domain = 0, // @intToEnum(c.aws_socket_domain, c.AWS_SOCKET_IPV4), // https://github.com/awslabs/aws-c-io/blob/6c7bae503961545c5e99c6c836c4b37749cfc4ad/include/aws/io/socket.h#L12
.connect_timeout_ms = 3000, // TODO: change hardcoded 3s value
.keep_alive_timeout_sec = 0,
.keepalive = false,
.keep_alive_interval_sec = 0,
// If set, sets the number of keep alive probes allowed to fail before the connection is considered
// lost. If zero OS defaults are used. On Windows, this option is meaningless until Windows 10 1703.
.keep_alive_max_failed_probes = 0,
};
const http_client_options = c.aws_http_client_connection_options{
.self_size = @sizeOf(c.aws_http_client_connection_options),
.socket_options = &socket_options,
.allocator = c_allocator,
.port = endpoint.port,
.host_name = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, host)),
.bootstrap = self.bootstrap,
.initial_window_size = c.SIZE_MAX,
.tls_options = tls_connection_options,
.user_data = &context,
.proxy_options = null,
.monitoring_options = null,
.http1_options = null,
.http2_options = null,
.manual_window_management = false,
.on_setup = connectionSetupCallback,
.on_shutdown = connectionShutdownCallback,
};
if (c.aws_http_client_connect(&http_client_options) != c.AWS_OP_SUCCESS) {
httplog.err("HTTP client connect failed with {s}.", .{c.aws_error_debug_str(c.aws_last_error())});
return AwsError.HttpClientConnectError;
}
// TODO: Timeout
// Wait for connection to setup
while (!context.connection_complete.load(.SeqCst)) {
std.time.sleep(1 * std.time.ns_per_ms);
}
if (context.return_error) |e| return e;
const request_options = c.aws_http_make_request_options{
.self_size = @sizeOf(c.aws_http_make_request_options),
.on_response_headers = incomingHeadersCallback,
.on_response_header_block_done = null,
.on_response_body = incomingBodyCallback,
.on_complete = requestCompleteCallback,
.user_data = @ptrCast(*anyopaque, &context),
.request = http_request,
};
const stream = c.aws_http_connection_make_request(context.connection, &request_options);
if (stream == null) {
httplog.err("failed to create request.", .{});
return AwsError.RequestCreateError;
}
if (c.aws_http_stream_activate(stream) != c.AWS_OP_SUCCESS) {
httplog.err("HTTP request failed with {s}.", .{c.aws_error_debug_str(c.aws_last_error())});
return AwsError.HttpRequestError;
}
// TODO: make req
// TODO: Timeout
while (!context.request_complete.load(.SeqCst)) {
std.time.sleep(1 * std.time.ns_per_ms);
}
httplog.debug("request_complete. Response code {d}", .{context.response_code.?});
httplog.debug("headers:", .{});
for (context.headers.?.items) |h| {
httplog.debug(" {s}: {s}", .{ h.name, h.value });
}
httplog.debug("raw response body:\n{s}", .{context.body});
// Connection will stay alive until stream completes
c.aws_http_connection_release(context.connection);
context.connection = null;
if (tls_connection_options) |opts| {
c.aws_tls_connection_options_clean_up(opts);
}
var final_body: []const u8 = "";
if (context.body) |b| {
final_body = b;
}
// Headers would need to be allocated/copied into HttpResult similar
// to RequestContext, so we'll leave this as a later excercise
@ -471,204 +171,57 @@ pub const AwsHttp = struct {
return rc;
}
// TODO: Re-encapsulate or delete this function. It is not currently
// used and will not be touched by the compiler
fn createRequest(method: []const u8, path: []const u8, body: []const u8) !*c.aws_http_message {
// TODO: Likely this should be encapsulated more
var http_request = c.aws_http_message_new_request(c_allocator);
if (c.aws_http_message_set_request_method(http_request, c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, method))) != c.AWS_OP_SUCCESS)
return AwsError.SetRequestMethodError;
if (c.aws_http_message_set_request_path(http_request, c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, path))) != c.AWS_OP_SUCCESS)
return AwsError.SetRequestPathError;
const body_cursor = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, body));
const request_body = c.aws_input_stream_new_from_cursor(c_allocator, &body_cursor);
defer c.aws_input_stream_destroy(request_body);
c.aws_http_message_set_body_stream(http_request, request_body);
return http_request.?;
}
// TODO: Re-encapsulate or delete this function. It is not currently
// used and will not be touched by the compiler
fn setupTls(_: Self, host: []const u8) !*c.aws_tls_connection_options {
if (tls_ctx_options == null) {
httplog.debug("Setting up tls options", .{});
var opts: c.aws_tls_ctx_options = .{
.allocator = c_allocator,
.minimum_tls_version = 128, // @intToEnum(c.aws_tls_versions, c.AWS_IO_TLS_VER_SYS_DEFAULTS), // https://github.com/awslabs/aws-c-io/blob/6c7bae503961545c5e99c6c836c4b37749cfc4ad/include/aws/io/tls_channel_handler.h#L21
.cipher_pref = 0, // @intToEnum(c.aws_tls_cipher_pref, c.AWS_IO_TLS_CIPHER_PREF_SYSTEM_DEFAULT), // https://github.com/awslabs/aws-c-io/blob/6c7bae503961545c5e99c6c836c4b37749cfc4ad/include/aws/io/tls_channel_handler.h#L25
.ca_file = c.aws_byte_buf_from_c_str(""),
.ca_path = c.aws_string_new_from_c_str(c_allocator, ""),
.alpn_list = null,
.certificate = c.aws_byte_buf_from_c_str(""),
.private_key = c.aws_byte_buf_from_c_str(""),
.max_fragment_size = 0,
.verify_peer = true,
};
tls_ctx_options = &opts;
c.aws_tls_ctx_options_init_default_client(tls_ctx_options.?, c_allocator);
// h2;http/1.1
if (c.aws_tls_ctx_options_set_alpn_list(tls_ctx_options, "http/1.1") != c.AWS_OP_SUCCESS) {
httplog.alert("Failed to load alpn list with error {s}.", .{c.aws_error_debug_str(c.aws_last_error())});
return AwsError.AlpnError;
}
tls_ctx = c.aws_tls_client_ctx_new(c_allocator, tls_ctx_options.?);
if (tls_ctx == null) {
std.debug.panic("Failed to initialize TLS context with error {s}.", .{c.aws_error_debug_str(c.aws_last_error())});
}
httplog.debug("tls options setup applied", .{});
}
var tls_connection_options = c.aws_tls_connection_options{
.alpn_list = null,
.server_name = null,
.on_negotiation_result = null,
.on_data_read = null,
.on_error = null,
.user_data = null,
.ctx = null,
.advertise_alpn_message = false,
.timeout_ms = 0,
};
c.aws_tls_connection_options_init_from_ctx(&tls_connection_options, tls_ctx);
var host_var = host;
var host_cur = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, host_var));
if (c.aws_tls_connection_options_set_server_name(&tls_connection_options, c_allocator, &host_cur) != c.AWS_OP_SUCCESS) {
httplog.alert("Failed to set servername with error {s}.", .{c.aws_error_debug_str(c.aws_last_error())});
return AwsError.TlsError;
}
return &tls_connection_options;
// if (app_ctx.uri.port) {
// port = app_ctx.uri.port;
// }
}
fn signRequest(self: Self, http_request: *c.aws_http_message, options: SigningOptions) !void {
const creds = try self.getCredentials();
defer c.aws_credentials_release(creds);
// print the access key. Creds are an opaque C type, so we
// use aws_credentials_get_access_key_id. That gets us an aws_byte_cursor,
// from which we create a new aws_string with the contents. We need
// to convert to c_str with aws_string_c_str
const access_key = c.aws_string_new_from_cursor(c_allocator, &c.aws_credentials_get_access_key_id(creds));
defer c.aws_mem_release(c_allocator, access_key);
// defer c_allocator.*.mem_release.?(c_allocator, access_key);
httplog.debug("Signing with access key: {s}", .{c.aws_string_c_str(access_key)});
const signable = c.aws_signable_new_http_request(c_allocator, http_request);
if (signable == null) {
httplog.warn("Could not create signable request", .{});
return AwsError.SignableError;
}
defer c.aws_signable_destroy(signable);
const signing_region = try std.fmt.allocPrintZ(self.allocator, "{s}", .{options.region});
defer self.allocator.free(signing_region);
const signing_service = try std.fmt.allocPrintZ(self.allocator, "{s}", .{options.service});
defer self.allocator.free(signing_service);
const temp_signing_config = c.bitfield_workaround_aws_signing_config_aws{
.algorithm = 0, // .AWS_SIGNING_ALGORITHM_V4, // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L38
.config_type = 1, // .AWS_SIGNING_CONFIG_AWS, // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L24
.signature_type = 0, // .AWS_ST_HTTP_REQUEST_HEADERS, // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L49
.region = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, signing_region)),
.service = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, signing_service)),
.should_sign_header = null,
.should_sign_header_ud = null,
// TODO: S3 does not double uri encode. Also not sure why normalizing
// the path here is a flag - seems like it should always do this?
.flags = c.bitfield_workaround_aws_signing_config_aws_flags{
.use_double_uri_encode = 1,
.should_normalize_uri_path = 1,
.omit_session_token = 1,
},
.signed_body_value = c.aws_byte_cursor_from_c_str(""),
.signed_body_header = 1, // .AWS_SBHT_X_AMZ_CONTENT_SHA256, //or 0 = AWS_SBHT_NONE // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L131
.credentials = creds,
.credentials_provider = self.credentialsProvider,
.expiration_in_seconds = 0,
};
var signing_config = c.new_aws_signing_config(c_allocator, &temp_signing_config);
defer c.aws_mem_release(c_allocator, signing_config);
var signing_result = AwsAsyncCallbackResult(c.aws_http_message){ .result = http_request };
var sign_result_request = AsyncResult(AwsAsyncCallbackResult(c.aws_http_message)){ .result = &signing_result };
if (c.aws_sign_request_aws(c_allocator, signable, fullCast([*c]const c.aws_signing_config_base, signing_config), signComplete, &sign_result_request) != c.AWS_OP_SUCCESS) {
const error_code = c.aws_last_error();
httplog.err("Could not initiate signing request: {s}:{s}", .{ c.aws_error_name(error_code), c.aws_error_str(error_code) });
return AwsError.SigningInitiationError;
// const signing_region = try std.fmt.allocPrintZ(self.allocator, "{s}", .{options.region});
// defer self.allocator.free(signing_region);
// const signing_service = try std.fmt.allocPrintZ(self.allocator, "{s}", .{options.service});
// defer self.allocator.free(signing_service);
// const temp_signing_config = c.bitfield_workaround_aws_signing_config_aws{
// .algorithm = 0, // .AWS_SIGNING_ALGORITHM_V4, // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L38
// .config_type = 1, // .AWS_SIGNING_CONFIG_AWS, // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L24
// .signature_type = 0, // .AWS_ST_HTTP_REQUEST_HEADERS, // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L49
// .region = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, signing_region)),
// .service = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, signing_service)),
// .should_sign_header = null,
// .should_sign_header_ud = null,
// // TODO: S3 does not double uri encode. Also not sure why normalizing
// // the path here is a flag - seems like it should always do this?
// .flags = c.bitfield_workaround_aws_signing_config_aws_flags{
// .use_double_uri_encode = 1,
// .should_normalize_uri_path = 1,
// .omit_session_token = 1,
// },
// .signed_body_value = c.aws_byte_cursor_from_c_str(""),
// .signed_body_header = 1, // .AWS_SBHT_X_AMZ_CONTENT_SHA256, //or 0 = AWS_SBHT_NONE // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L131
// .credentials = creds,
// .credentials_provider = self.credentialsProvider,
// .expiration_in_seconds = 0,
// };
// return AwsError.SignableError;
}
// Wait for callback. Note that execution, including real work of signing
// the http request, will continue in signComplete (below),
// then continue beyond this line
waitOnCallback(c.aws_http_message, &sign_result_request);
if (sign_result_request.result.error_code != c.AWS_ERROR_SUCCESS) {
return AwsError.SignableError;
}
}
/// It's my theory that the aws event loop has a trigger to corrupt the
/// signing result after this call completes. So the technique of assigning
/// now, using later will not work
fn signComplete(result: ?*c.aws_signing_result, error_code: c_int, user_data: ?*anyopaque) callconv(.C) void {
var async_result = userDataTo(AsyncResult(AwsAsyncCallbackResult(c.aws_http_message)), user_data);
var http_request = async_result.result.result;
async_result.sync.store(true, .SeqCst);
async_result.count += 1;
async_result.result.error_code = error_code;
if (result != null) {
if (c.aws_apply_signing_result_to_http_request(http_request, c_allocator, result) != c.AWS_OP_SUCCESS) {
httplog.err("Could not apply signing request to http request: {s}", .{c.aws_error_debug_str(c.aws_last_error())});
}
httplog.debug("signing result applied", .{});
} else {
httplog.err("Did not receive signing result: {s}", .{c.aws_error_debug_str(c.aws_last_error())});
}
async_result.sync.store(false, .SeqCst);
}
fn addHeaders(self: Self, request: *c.aws_http_message, host: []const u8, body: []const u8, content_type: []const u8, additional_headers: []Header) !void {
const accept_header = c.aws_http_header{
.name = c.aws_byte_cursor_from_c_str("Accept"),
.value = c.aws_byte_cursor_from_c_str("application/json"),
.compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE, // https://github.com/awslabs/aws-c-http/blob/ec42882310900f2b414b279fc24636ba4653f285/include/aws/http/request_response.h#L37
};
if (c.aws_http_message_add_header(request, accept_header) != c.AWS_OP_SUCCESS)
return AwsError.AddHeaderError;
const host_header = c.aws_http_header{
.name = c.aws_byte_cursor_from_c_str("Host"),
.value = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, host)),
.compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
};
if (c.aws_http_message_add_header(request, host_header) != c.AWS_OP_SUCCESS)
return AwsError.AddHeaderError;
const user_agent_header = c.aws_http_header{
.name = c.aws_byte_cursor_from_c_str("User-Agent"),
.value = c.aws_byte_cursor_from_c_str("zig-aws 1.0, Powered by the AWS Common Runtime."),
.compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
};
if (c.aws_http_message_add_header(request, user_agent_header) != c.AWS_OP_SUCCESS)
return AwsError.AddHeaderError;
// AWS does not seem to care about Accept-Encoding
// Accept-Encoding: identity
// Content-Type: application/x-www-form-urlencoded
// const accept_encoding_header = c.aws_http_header{
// .name = c.aws_byte_cursor_from_c_str("Accept-Encoding"),
// .value = c.aws_byte_cursor_from_c_str("identity"),
// .compression = 0, //.AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
// const accept_header = c.aws_http_header{
// .name = c.aws_byte_cursor_from_c_str("Accept"),
// .value = c.aws_byte_cursor_from_c_str("application/json"),
// .compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE, // https://github.com/awslabs/aws-c-http/blob/ec42882310900f2b414b279fc24636ba4653f285/include/aws/http/request_response.h#L37
// };
// const host_header = c.aws_http_header{
// .name = c.aws_byte_cursor_from_c_str("Host"),
// .value = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, host)),
// .compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
// };
// const user_agent_header = c.aws_http_header{
// .name = c.aws_byte_cursor_from_c_str("User-Agent"),
// .value = c.aws_byte_cursor_from_c_str("zig-aws 1.0, Powered by the AWS Common Runtime."),
// .compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
// };
// if (c.aws_http_message_add_header(request, accept_encoding_header) != c.AWS_OP_SUCCESS)
// return AwsError.AddHeaderError;
// AWS *does* seem to care about Content-Type. I don't think this header
// will hold for all APIs
@ -679,8 +232,6 @@ pub const AwsHttp = struct {
.value = c.aws_byte_cursor_from_c_str(c_type),
.compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
};
if (c.aws_http_message_add_header(request, content_type_header) != c.AWS_OP_SUCCESS)
return AwsError.AddHeaderError;
for (additional_headers) |h| {
const name = try std.fmt.allocPrintZ(self.allocator, "{s}", .{h.name});
@ -710,175 +261,19 @@ pub const AwsHttp = struct {
}
}
fn connectionSetupCallback(connection: ?*c.aws_http_connection, error_code: c_int, user_data: ?*anyopaque) callconv(.C) void {
httplog.debug("connection setup callback start", .{});
var context = userDataTo(RequestContext, user_data);
if (error_code != c.AWS_OP_SUCCESS) {
httplog.err("Failed to setup connection: {s}.", .{c.aws_error_debug_str(c.aws_last_error())});
context.return_error = AwsError.SetupConnectionError;
}
context.connection = connection;
context.connection_complete.store(true, .SeqCst);
httplog.debug("connection setup callback end", .{});
}
fn connectionShutdownCallback(connection: ?*c.aws_http_connection, error_code: c_int, _: ?*anyopaque) callconv(.C) void {
// ^^ error_code ^^ user_data
httplog.debug("connection shutdown callback start ({*}). error_code: {d}", .{ connection, error_code });
httplog.debug("connection shutdown callback end", .{});
}
fn incomingHeadersCallback(stream: ?*c.aws_http_stream, _: c.aws_http_header_block, headers: [*c]const c.aws_http_header, num_headers: usize, user_data: ?*anyopaque) callconv(.C) c_int {
var context = userDataTo(RequestContext, user_data);
if (context.response_code == null) {
var status: c_int = 0;
if (c.aws_http_stream_get_incoming_response_status(stream, &status) == c.AWS_OP_SUCCESS) {
context.response_code = @intCast(u16, status); // RFC says this is a 3 digit number, so c_int is silly
httplog.debug("response status code from callback: {d}", .{status});
} else {
httplog.err("could not get status code", .{});
context.return_error = AwsError.StatusCodeError;
}
}
for (headers[0..num_headers]) |header| {
const name = header.name.ptr[0..header.name.len];
const value = header.value.ptr[0..header.value.len];
httplog.debug("header from callback: {s}: {s}", .{ name, value });
context.addHeader(name, value) catch
httplog.err("could not append header to request context", .{});
}
return c.AWS_OP_SUCCESS;
}
fn incomingBodyCallback(_: ?*c.aws_http_stream, data: [*c]const c.aws_byte_cursor, user_data: ?*anyopaque) callconv(.C) c_int {
var context = userDataTo(RequestContext, user_data);
httplog.debug("inbound body, len {d}", .{data.*.len});
const array = @ptrCast(*const []u8, &data.*.ptr).*;
// Need this to be a slice because it does not necessarily have a \0 sentinal
const body_chunk = array[0..data.*.len];
context.appendToBody(body_chunk) catch
httplog.err("could not append to body!", .{});
return c.AWS_OP_SUCCESS;
}
fn requestCompleteCallback(stream: ?*c.aws_http_stream, _: c_int, user_data: ?*anyopaque) callconv(.C) void {
// ^^ error_code
var context = userDataTo(RequestContext, user_data);
context.request_complete.store(true, .SeqCst);
c.aws_http_stream_release(stream);
httplog.debug("request complete", .{});
}
fn getCredentials(self: Self) !*c.aws_credentials {
var credential_result = AwsAsyncCallbackResult(c.aws_credentials){};
var callback_results = AsyncResult(AwsAsyncCallbackResult(c.aws_credentials)){ .result = &credential_result };
const callback = awsAsyncCallbackResult(c.aws_credentials, "got credentials", assignCredentialsOnCallback);
// const get_async_result =
_ = c.aws_credentials_provider_get_credentials(self.credentialsProvider, callback, &callback_results);
// TODO: do we care about the return value from get_creds?
waitOnCallback(c.aws_credentials, &callback_results);
if (credential_result.error_code != c.AWS_ERROR_SUCCESS) {
httplog.err("Could not acquire credentials: {s}:{s}", .{ c.aws_error_name(credential_result.error_code), c.aws_error_str(credential_result.error_code) });
return AwsError.CredentialsError;
}
return credential_result.result orelse unreachable;
}
// Generic wait on callback function
fn waitOnCallback(comptime T: type, results: *AsyncResult(AwsAsyncCallbackResult(T))) void {
var done = false;
while (!done) {
// TODO: Timeout
// More context: https://github.com/ziglang/zig/blob/119fc318a753f57b55809e9256e823accba6b56a/lib/std/crypto/benchmark.zig#L45-L54
// var timer = try std.time.Timer.start();
// const start = timer.lap();
// while (offset < bytes) : (offset += block.len) {
// do work
//
// h.update(block[0..]);
// }
// mem.doNotOptimizeAway(&h);
// const end = timer.read();
//
// const elapsed_s = @intToFloat(f64, end - start) / time.ns_per_s;
while (results.sync.load(.SeqCst)) {
std.time.sleep(1 * std.time.ns_per_ms);
}
done = results.count >= results.requiredCount;
// TODO: Timeout
std.time.sleep(1 * std.time.ns_per_ms);
}
}
// Generic function that generates a type-specific funtion for callback use
fn awsAsyncCallback(comptime T: type, comptime message: []const u8) (fn (result: ?*T, error_code: c_int, user_data: ?*anyopaque) callconv(.C) void) {
const inner = struct {
fn func(userData: *AsyncResult(AwsAsyncCallbackResult(T)), apiData: ?*T) void {
userData.result.result = apiData;
}
};
return awsAsyncCallbackResult(T, message, inner.func);
}
// used by awsAsyncCallbackResult to cast our generic userdata void *
// into a type known to zig
fn userDataTo(comptime T: type, userData: ?*anyopaque) *T {
return @ptrCast(*T, @alignCast(@alignOf(T), userData));
}
// generic callback ability. Takes a function for the actual assignment
// If you need a standard assignment, use awsAsyncCallback instead
fn awsAsyncCallbackResult(comptime T: type, comptime message: []const u8, comptime resultAssignment: (fn (user: *AsyncResult(AwsAsyncCallbackResult(T)), apiData: ?*T) void)) (fn (result: ?*T, error_code: c_int, user_data: ?*anyopaque) callconv(.C) void) {
const inner = struct {
fn innerfunc(result: ?*T, error_code: c_int, user_data: ?*anyopaque) callconv(.C) void {
httplog.debug(message, .{});
var asyncResult = userDataTo(AsyncResult(AwsAsyncCallbackResult(T)), user_data);
asyncResult.sync.store(true, .SeqCst);
asyncResult.count += 1;
asyncResult.result.error_code = error_code;
resultAssignment(asyncResult, result);
// asyncResult.result.result = result;
asyncResult.sync.store(false, .SeqCst);
}
};
return inner.innerfunc;
}
fn assignCredentialsOnCallback(asyncResult: *AsyncResult(AwsAsyncCallbackResult(c.aws_credentials)), credentials: ?*c.aws_credentials) void {
if (asyncResult.result.result) |result| {
c.aws_credentials_release(result);
}
asyncResult.result.result = credentials;
if (credentials) |cred| {
c.aws_credentials_acquire(cred);
}
}
};
fn AsyncResult(comptime T: type) type {
return struct {
result: *T,
requiredCount: u32 = 1,
sync: std.atomic.Atomic(bool) = std.atomic.Atomic(bool).init(false),
count: u8 = 0,
};
}
fn AwsAsyncCallbackResult(comptime T: type) type {
return struct {
result: ?*T = null,
error_code: i32 = c.AWS_ERROR_SUCCESS,
};
}
fn fullCast(comptime T: type, val: anytype) T {
return @ptrCast(T, @alignCast(@alignOf(T), val));
}

View File

@ -1,34 +0,0 @@
#include <aws/auth/signing_config.h>
#include <aws/common/date_time.h>
#include "bitfield-workaround.h"
extern void *new_aws_signing_config(
struct aws_allocator *allocator,
const struct bitfield_workaround_aws_signing_config_aws *config) {
struct aws_signing_config_aws *new_config = aws_mem_acquire(allocator, sizeof(struct aws_signing_config_aws));
new_config->algorithm = config->algorithm;
new_config->config_type = config->config_type;
new_config->signature_type = config->signature_type;
new_config->region = config->region;
new_config->service = config->service;
new_config->should_sign_header = config->should_sign_header;
new_config->should_sign_header_ud = config->should_sign_header_ud;
new_config->flags.use_double_uri_encode = config->flags.use_double_uri_encode;
new_config->flags.should_normalize_uri_path = config->flags.should_normalize_uri_path;
new_config->flags.omit_session_token = config->flags.omit_session_token;
new_config->signed_body_value = config->signed_body_value;
new_config->signed_body_header = config->signed_body_header;
new_config->credentials = config->credentials;
new_config->credentials_provider = config->credentials_provider;
new_config->expiration_in_seconds = config->expiration_in_seconds;
aws_date_time_init_now(&new_config->date);
return new_config;
}
extern FILE *get_std_err() {
return stderr;
}

View File

@ -1,142 +0,0 @@
#ifndef ZIG_AWS_BITFIELD_WORKAROUND_H
#define ZIG_AWS_BITFIELD_WORKAROUND_H
#include <aws/auth/auth.h>
#include <aws/auth/signing_config.h>
// Copied verbatim from https://github.com/awslabs/aws-c-auth/blob/main/include/aws/auth/signing_config.h#L127-L241
// However, the flags has changed to uint32_t without bitfield annotations
// as Zig does not support them yet. See https://github.com/ziglang/zig/issues/1499
// We've renamed as well to make clear what's going on
//
// Signing date is also somewhat problematic, so we removed it and it is
// part of the c code
/*
* Put all flags in here at the end. If this grows, stay aware of bit-space overflow and ABI compatibilty.
*/
struct bitfield_workaround_aws_signing_config_aws_flags {
/**
* We assume the uri will be encoded once in preparation for transmission. Certain services
* do not decode before checking signature, requiring us to actually double-encode the uri in the canonical
* request in order to pass a signature check.
*/
uint32_t use_double_uri_encode;
/**
* Controls whether or not the uri paths should be normalized when building the canonical request
*/
uint32_t should_normalize_uri_path;
/**
* Controls whether "X-Amz-Security-Token" is omitted from the canonical request.
* "X-Amz-Security-Token" is added during signing, as a header or
* query param, when credentials have a session token.
* If false (the default), this parameter is included in the canonical request.
* If true, this parameter is still added, but omitted from the canonical request.
*/
uint32_t omit_session_token;
};
/**
* A configuration structure for use in AWS-related signing. Currently covers sigv4 only, but is not required to.
*/
struct bitfield_workaround_aws_signing_config_aws {
/**
* What kind of config structure is this?
*/
enum aws_signing_config_type config_type;
/**
* What signing algorithm to use.
*/
enum aws_signing_algorithm algorithm;
/**
* What sort of signature should be computed?
*/
enum aws_signature_type signature_type;
/**
* The region to sign against
*/
struct aws_byte_cursor region;
/**
* name of service to sign a request for
*/
struct aws_byte_cursor service;
/**
* Raw date to use during the signing process.
*/
// struct aws_date_time date;
/**
* Optional function to control which headers are a part of the canonical request.
* Skipping auth-required headers will result in an unusable signature. Headers injected by the signing process
* are not skippable.
*
* This function does not override the internal check function (x-amzn-trace-id, user-agent), but rather
* supplements it. In particular, a header will get signed if and only if it returns true to both
* the internal check (skips x-amzn-trace-id, user-agent) and this function (if defined).
*/
aws_should_sign_header_fn *should_sign_header;
void *should_sign_header_ud;
/*
* Put all flags in here at the end. If this grows, stay aware of bit-space overflow and ABI compatibilty.
*/
struct bitfield_workaround_aws_signing_config_aws_flags flags;
/**
* Optional string to use as the canonical request's body value.
* If string is empty, a value will be calculated from the payload during signing.
* Typically, this is the SHA-256 of the (request/chunk/event) payload, written as lowercase hex.
* If this has been precalculated, it can be set here. Special values used by certain services can also be set
* (e.g. "UNSIGNED-PAYLOAD" "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" "STREAMING-AWS4-HMAC-SHA256-EVENTS").
*/
struct aws_byte_cursor signed_body_value;
/**
* Controls what body "hash" header, if any, should be added to the canonical request and the signed request:
* AWS_SBHT_NONE - no header should be added
* AWS_SBHT_X_AMZ_CONTENT_SHA256 - the body "hash" should be added in the X-Amz-Content-Sha256 header
*/
enum aws_signed_body_header_type signed_body_header;
/*
* Signing key control:
*
* (1) If "credentials" is valid, use it
* (2) Else if "credentials_provider" is valid, query credentials from the provider and use the result
* (3) Else fail
*
*/
/**
* AWS Credentials to sign with.
*/
const struct aws_credentials *credentials;
/**
* AWS credentials provider to fetch credentials from.
*/
struct aws_credentials_provider *credentials_provider;
/**
* If non-zero and the signing transform is query param, then signing will add X-Amz-Expires to the query
* string, equal to the value specified here. If this value is zero or if header signing is being used then
* this parameter has no effect.
*/
uint64_t expiration_in_seconds;
};
extern void *new_aws_signing_config(struct aws_allocator *allocator, const struct bitfield_workaround_aws_signing_config_aws *config);
extern FILE *get_std_err();
#endif

1
zfetch_deps.zig Normal file
View File

@ -0,0 +1 @@
const use_submodules = 1;