Compare commits
6 Commits
5b1a6a6e01
...
9fd6755684
Author | SHA1 | Date | |
---|---|---|---|
9fd6755684 | |||
ea93542da8 | |||
3bf6adc13e | |||
87323ecb71 | |||
a5b78384f5 | |||
042dfad64b |
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -8,3 +8,4 @@ demo
|
|||
src/models/
|
||||
smithy/zig-out/
|
||||
libs/
|
||||
src/git_version.zig
|
||||
|
|
208
VersionStep.zig
Normal file
208
VersionStep.zig
Normal file
|
@ -0,0 +1,208 @@
|
|||
//! Publish Date: 2022-01-12
|
||||
//! This file is hosted at ??? and is meant to be copied
|
||||
//! to projects that use it. Sample usage:
|
||||
//!
|
||||
//! const version = VersionStep.create(b, null);
|
||||
//! exe.step.dependOn(&version.step);
|
||||
|
||||
const std = @import("std");
|
||||
const Step = @This();
|
||||
|
||||
step: std.build.Step,
|
||||
builder: *std.build.Builder,
|
||||
version_path: []const u8,
|
||||
|
||||
// Creates a step that will add the git version info in a file in src/
|
||||
// so it can be consumed by additional code. If version_path is not specified,
|
||||
// it will default to "git_version.zig". This should be part of .gitignore
|
||||
pub fn create(b: *std.build.Builder, version_path: ?[]const u8) *Step {
|
||||
var result = b.allocator.create(Step) catch @panic("memory");
|
||||
result.* = Step{
|
||||
.step = std.build.Step.init(.custom, "create version file", b.allocator, make),
|
||||
.builder = b,
|
||||
.version_path = std.fs.path.resolve(b.allocator, &[_][]const u8{
|
||||
b.build_root,
|
||||
"src",
|
||||
version_path orelse "git_version.zig",
|
||||
}) catch @panic("memory"),
|
||||
};
|
||||
return result;
|
||||
}
|
||||
|
||||
fn make(step: *std.build.Step) !void {
|
||||
const self = @fieldParentPtr(Step, "step", step);
|
||||
const file = try std.fs.createFileAbsolute(self.version_path, .{});
|
||||
defer file.close();
|
||||
const version = try getGitVersion(
|
||||
self.builder.allocator,
|
||||
self.builder.build_root,
|
||||
self.builder.env_map,
|
||||
);
|
||||
defer version.deinit();
|
||||
try file.writer().print(
|
||||
\\pub const hash = "{s}";
|
||||
\\pub const abbreviated_hash = "{s}";
|
||||
\\pub const commit_date = "{s}";
|
||||
\\pub const branch = "{s}";
|
||||
\\pub const dirty = {b};
|
||||
\\pub const pretty_version = "{s}";
|
||||
, .{
|
||||
version.hash,
|
||||
version.abbreviated_hash,
|
||||
version.commit_date,
|
||||
version.branch,
|
||||
version.dirty,
|
||||
version.pretty_version,
|
||||
});
|
||||
}
|
||||
|
||||
const GitVersion = struct {
|
||||
hash: []const u8,
|
||||
abbreviated_hash: []const u8,
|
||||
commit_date: []const u8,
|
||||
branch: []const u8,
|
||||
dirty: bool,
|
||||
pretty_version: []const u8,
|
||||
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn deinit(self: Self) void {
|
||||
self.allocator.free(self.hash);
|
||||
self.allocator.free(self.abbreviated_hash);
|
||||
self.allocator.free(self.commit_date);
|
||||
self.allocator.free(self.branch);
|
||||
self.allocator.free(self.pretty_version);
|
||||
}
|
||||
};
|
||||
|
||||
fn getGitVersion(allocator: std.mem.Allocator, git_working_root: ?[]const u8, env: anytype) !GitVersion {
|
||||
// git log -1 --pretty="%H%n%h%n%ci%n%D"
|
||||
// 3bf6adc13e4aa653a7b75b1b5e9c9db5215df8e1
|
||||
// 3bf6adc
|
||||
// 2022-01-12 12:21:28 -0800
|
||||
// HEAD -> zig-native
|
||||
|
||||
const log_output = try run(
|
||||
allocator,
|
||||
&[_][]const u8{
|
||||
"git",
|
||||
"log",
|
||||
"-1",
|
||||
"--pretty=%H%n%h%n%ci%n%D",
|
||||
},
|
||||
git_working_root,
|
||||
env,
|
||||
);
|
||||
defer allocator.free(log_output);
|
||||
const line_data = try getLines(allocator, 4, log_output);
|
||||
const hash = line_data[0];
|
||||
const abbrev_hash = line_data[1];
|
||||
const date = line_data[2];
|
||||
const branch = line_data[3];
|
||||
|
||||
// git status --porcelain
|
||||
const status_output = try run(
|
||||
allocator,
|
||||
&[_][]const u8{
|
||||
"git",
|
||||
"status",
|
||||
"--porcelain",
|
||||
},
|
||||
git_working_root,
|
||||
env,
|
||||
);
|
||||
const dirty = blk: {
|
||||
if (status_output.len > 0) {
|
||||
allocator.free(status_output);
|
||||
break :blk true;
|
||||
}
|
||||
break :blk false;
|
||||
};
|
||||
const dirty_str = blk: {
|
||||
if (dirty) {
|
||||
break :blk " (dirty)";
|
||||
}
|
||||
break :blk "";
|
||||
};
|
||||
const pretty_version: []const u8 = try std.fmt.allocPrint(
|
||||
allocator,
|
||||
"version {s}, committed at {s}{s}",
|
||||
.{
|
||||
abbrev_hash,
|
||||
date,
|
||||
dirty_str,
|
||||
},
|
||||
);
|
||||
|
||||
return GitVersion{
|
||||
.hash = hash,
|
||||
.abbreviated_hash = abbrev_hash,
|
||||
.commit_date = date,
|
||||
.branch = branch,
|
||||
.allocator = allocator,
|
||||
.dirty = dirty,
|
||||
.pretty_version = pretty_version,
|
||||
};
|
||||
}
|
||||
fn getLines(allocator: std.mem.Allocator, comptime line_count: u32, data: []const u8) ![line_count][]u8 {
|
||||
var line: u32 = 0;
|
||||
var start: u32 = 0;
|
||||
var current: u32 = 0;
|
||||
var line_data: [line_count][]u8 = undefined;
|
||||
errdefer {
|
||||
while (line > 0) {
|
||||
allocator.free(line_data[line]);
|
||||
line -= 1;
|
||||
}
|
||||
}
|
||||
for (data) |c| {
|
||||
// try std.io.getStdErr().writer().print("line: {d}, c: {c}, cur: {d}, strt: {d}\n", .{ line, c, current, start });
|
||||
if (c == '\n') {
|
||||
line_data[line] = try allocator.dupe(u8, data[start..current]);
|
||||
// try std.io.getStdErr().writer().print("c: {d}, s: {d}, data: '{s}'\n", .{ current, start, line_data[line] });
|
||||
start = current + 1;
|
||||
line += 1;
|
||||
}
|
||||
current += 1;
|
||||
}
|
||||
return line_data;
|
||||
}
|
||||
|
||||
// env is a std.process.BufMap, but that's private, which is a little weird tbh
|
||||
fn run(allocator: std.mem.Allocator, argv: []const []const u8, cwd: ?[]const u8, env: anytype) ![]const u8 {
|
||||
{
|
||||
var msg = std.ArrayList(u8).init(allocator);
|
||||
defer msg.deinit();
|
||||
const writer = msg.writer();
|
||||
var prefix: []const u8 = "";
|
||||
for (argv) |arg| {
|
||||
try writer.print("{s}\"{s}\"", .{ prefix, arg });
|
||||
prefix = " ";
|
||||
}
|
||||
std.log.info("[RUN] {s}", .{msg.items});
|
||||
}
|
||||
|
||||
const result = try std.ChildProcess.exec(.{
|
||||
.allocator = allocator,
|
||||
.argv = argv,
|
||||
.cwd = cwd,
|
||||
.env_map = env,
|
||||
});
|
||||
defer if (result.stderr.len > 0) allocator.free(result.stderr);
|
||||
try std.io.getStdErr().writer().writeAll(result.stderr);
|
||||
|
||||
switch (result.term) {
|
||||
.Exited => |code| if (code != 0) {
|
||||
std.log.err("process failed with exit code: {}", .{code});
|
||||
|
||||
std.os.exit(0xff);
|
||||
},
|
||||
else => {
|
||||
std.log.err("process failed due to exception: {}", .{result});
|
||||
std.os.exit(0xff);
|
||||
},
|
||||
}
|
||||
return result.stdout;
|
||||
}
|
22
build.zig
22
build.zig
|
@ -3,6 +3,8 @@ const builtin = @import("builtin");
|
|||
const Builder = @import("std").build.Builder;
|
||||
const GitRepoStep = @import("GitRepoStep.zig");
|
||||
const CopyStep = @import("CopyStep.zig");
|
||||
const tst = @import("build_test.zig");
|
||||
const VersionStep = @import("VersionStep.zig");
|
||||
|
||||
pub fn build(b: *Builder) !void {
|
||||
const zfetch_repo = GitRepoStep.create(b, .{
|
||||
|
@ -41,6 +43,8 @@ pub fn build(b: *Builder) !void {
|
|||
);
|
||||
copy_deps.step.dependOn(&zfetch_repo.step);
|
||||
|
||||
const version = VersionStep.create(b, null);
|
||||
exe.step.dependOn(&version.step);
|
||||
exe.step.dependOn(©_deps.step);
|
||||
|
||||
// This import won't work unless we're already cloned. The way around
|
||||
|
@ -59,22 +63,8 @@ pub fn build(b: *Builder) !void {
|
|||
const run_step = b.step("run", "Run the app");
|
||||
run_step.dependOn(&run_cmd.step);
|
||||
|
||||
const test_step = b.step("test", "Run library tests");
|
||||
var build_dir = try std.fs.openDirAbsolute(b.build_root, .{});
|
||||
defer build_dir.close();
|
||||
var src_dir = try build_dir.openDir("src", .{ .iterate = true });
|
||||
defer src_dir.close();
|
||||
var iterator = src_dir.iterate();
|
||||
while (try iterator.next()) |entry| {
|
||||
if (std.mem.endsWith(u8, entry.name, ".zig")) {
|
||||
const name = try std.fmt.allocPrint(b.allocator, "src/{s}", .{entry.name});
|
||||
defer b.allocator.free(name);
|
||||
const t = b.addTest(name);
|
||||
t.addPackagePath("smithy", "smithy/src/smithy.zig");
|
||||
t.setBuildMode(mode);
|
||||
test_step.dependOn(&t.step);
|
||||
}
|
||||
}
|
||||
var test_step = try tst.addTestStep(b, mode, exe.packages.items);
|
||||
test_step.dependOn(&version.step);
|
||||
|
||||
if (target.getOs().tag == .linux) {
|
||||
// TODO: Support > linux with RunStep
|
||||
|
|
28
build_test.zig
Normal file
28
build_test.zig
Normal file
|
@ -0,0 +1,28 @@
|
|||
//! Publish Date: 2022-01-12
|
||||
//! This file is hosted at ??? and is meant to be copied
|
||||
//! to projects that use it. Sample usage:
|
||||
//!
|
||||
//! const @"test" = @import("build_test.zig");
|
||||
//! var test_step = try @"test".addTestStep(b, mode, exe.packages.items);
|
||||
const std = @import("std");
|
||||
|
||||
pub fn addTestStep(b: *std.build.Builder, mode: std.builtin.Mode, packages: []std.build.Pkg) !*std.build.Step {
|
||||
const test_step = b.step("test", "Run all tests");
|
||||
var src_dir = try std.fs.openDirAbsolute(try std.fs.path.resolve(b.allocator, &[_][]const u8{
|
||||
b.build_root,
|
||||
"src",
|
||||
}), .{ .iterate = true });
|
||||
defer src_dir.close();
|
||||
var iterator = src_dir.iterate();
|
||||
while (try iterator.next()) |entry| {
|
||||
if (std.mem.endsWith(u8, entry.name, ".zig")) {
|
||||
const name = try std.fmt.allocPrint(b.allocator, "src/{s}", .{entry.name});
|
||||
defer b.allocator.free(name);
|
||||
const t = b.addTest(name);
|
||||
for (packages) |package| t.addPackage(package);
|
||||
t.setBuildMode(mode);
|
||||
test_step.dependOn(&t.step);
|
||||
}
|
||||
}
|
||||
return test_step;
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
const std = @import("std");
|
||||
|
||||
const awshttp = @import("awshttp.zig");
|
||||
const awshttp = @import("aws_http.zig");
|
||||
const json = @import("json.zig");
|
||||
const url = @import("url.zig");
|
||||
const case = @import("case.zig");
|
||||
|
|
6
src/aws_authentication.zig
Normal file
6
src/aws_authentication.zig
Normal file
|
@ -0,0 +1,6 @@
|
|||
pub const Credentials = struct {
|
||||
access_key: []const u8,
|
||||
secret_key: []const u8,
|
||||
session_token: ?[]const u8,
|
||||
// uint64_t expiration_timepoint_seconds);
|
||||
};
|
23
src/aws_credentials.zig
Normal file
23
src/aws_credentials.zig
Normal file
|
@ -0,0 +1,23 @@
|
|||
//! Implements the standard credential chain:
|
||||
//! 1. Environment variables
|
||||
//! 2. Web identity token from STS
|
||||
//! 3. Credentials/config files
|
||||
//! 4. ECS Container credentials, using AWS_CONTAINER_CREDENTIALS_RELATIVE_URI
|
||||
//! 5. EC2 instance profile credentials
|
||||
const std = @import("std");
|
||||
const auth = @import("aws_authentication.zig");
|
||||
|
||||
pub fn getCredentials(allocator: std.mem.Allocator) !auth.Credentials {
|
||||
_ = allocator;
|
||||
if (getEnvironmentCredentials()) |cred| return cred;
|
||||
// TODO: 2-5
|
||||
return error.NotImplemented;
|
||||
}
|
||||
|
||||
fn getEnvironmentCredentials() ?auth.Credentials {
|
||||
return auth.Credentials{
|
||||
.access_key = std.os.getenv("AWS_ACCESS_KEY_ID") orelse return null,
|
||||
.secret_key = std.os.getenv("AWS_SECRET_ACCESS_KEY") orelse return null,
|
||||
.session_token = std.os.getenv("AWS_SESSION_TOKEN"),
|
||||
};
|
||||
}
|
315
src/aws_http.zig
Normal file
315
src/aws_http.zig
Normal file
|
@ -0,0 +1,315 @@
|
|||
//! This module provides a low level http interface for working with AWS
|
||||
//! It also provides an option to operate outside the AWS ecosystem through
|
||||
//! the makeRequest call with a null signingOptions.
|
||||
//!
|
||||
//! Typical usage:
|
||||
//! const client = awshttp.AwsHttp.init(allocator);
|
||||
//! defer client.deinit()
|
||||
//! const result = client.callApi (or client.makeRequest)
|
||||
//! defer result.deinit();
|
||||
const std = @import("std");
|
||||
const base = @import("aws_http_base.zig");
|
||||
const signing = @import("aws_signing.zig");
|
||||
const credentials = @import("aws_credentials.zig");
|
||||
const zfetch = @import("zfetch");
|
||||
|
||||
const CN_NORTH_1_HASH = std.hash_map.hashString("cn-north-1");
|
||||
const CN_NORTHWEST_1_HASH = std.hash_map.hashString("cn-northwest-1");
|
||||
const US_ISO_EAST_1_HASH = std.hash_map.hashString("us-iso-east-1");
|
||||
const US_ISOB_EAST_1_HASH = std.hash_map.hashString("us-isob-east-1");
|
||||
|
||||
const log = std.log.scoped(.awshttp);
|
||||
|
||||
pub const AwsError = error{
|
||||
AddHeaderError,
|
||||
AlpnError,
|
||||
CredentialsError,
|
||||
HttpClientConnectError,
|
||||
HttpRequestError,
|
||||
SignableError,
|
||||
SigningInitiationError,
|
||||
TlsError,
|
||||
RequestCreateError,
|
||||
SetupConnectionError,
|
||||
StatusCodeError,
|
||||
SetRequestMethodError,
|
||||
SetRequestPathError,
|
||||
};
|
||||
|
||||
pub const Options = struct {
|
||||
region: []const u8 = "aws-global",
|
||||
dualstack: bool = false,
|
||||
sigv4_service_name: ?[]const u8 = null,
|
||||
};
|
||||
|
||||
pub const Header = base.Header;
|
||||
pub const HttpRequest = base.Request;
|
||||
pub const HttpResult = base.Result;
|
||||
|
||||
const EndPoint = struct {
|
||||
uri: []const u8,
|
||||
host: []const u8,
|
||||
scheme: []const u8,
|
||||
port: u16,
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
fn deinit(self: EndPoint) void {
|
||||
self.allocator.free(self.uri);
|
||||
}
|
||||
};
|
||||
|
||||
pub const AwsHttp = struct {
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(allocator: std.mem.Allocator) Self {
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
// .credentialsProvider = // creds provider could be useful
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *AwsHttp) void {
|
||||
_ = self;
|
||||
log.debug("Deinit complete", .{});
|
||||
}
|
||||
|
||||
/// callApi allows the calling of AWS APIs through a higher-level interface.
|
||||
/// It will calculate the appropriate endpoint and action parameters for the
|
||||
/// service called, and will set up the signing options. The return
|
||||
/// value is simply a raw HttpResult
|
||||
pub fn callApi(self: Self, service: []const u8, request: HttpRequest, options: Options) !HttpResult {
|
||||
const endpoint = try regionSubDomain(self.allocator, service, options.region, options.dualstack);
|
||||
defer endpoint.deinit();
|
||||
log.debug("Calling endpoint {s}", .{endpoint.uri});
|
||||
const creds = try credentials.getCredentials(self.allocator);
|
||||
// defer allocator.free(), except sometimes we don't need freeing...
|
||||
const signing_config: signing.Config = .{
|
||||
.region = options.region,
|
||||
.service = options.sigv4_service_name orelse service,
|
||||
.credentials = creds,
|
||||
};
|
||||
return try self.makeRequest(endpoint, request, signing_config);
|
||||
}
|
||||
|
||||
/// makeRequest is a low level http/https function that can be used inside
|
||||
/// or outside the context of AWS services. To use it outside AWS, simply
|
||||
/// pass a null value in for signing_options.
|
||||
///
|
||||
/// Otherwise, it will simply take a URL endpoint (without path information),
|
||||
/// HTTP method (e.g. GET, POST, etc.), and request body.
|
||||
///
|
||||
/// At the moment this does not allow the controlling of headers
|
||||
/// This is likely to change. Current headers are:
|
||||
///
|
||||
/// Accept: application/json
|
||||
/// User-Agent: zig-aws 1.0, Powered by the AWS Common Runtime.
|
||||
/// Content-Type: application/x-www-form-urlencoded
|
||||
/// Content-Length: (length of body)
|
||||
///
|
||||
/// Return value is an HttpResult, which will need the caller to deinit().
|
||||
/// HttpResult currently contains the body only. The addition of Headers
|
||||
/// and return code would be a relatively minor change
|
||||
pub fn makeRequest(self: Self, endpoint: EndPoint, request: HttpRequest, signing_config: ?signing.Config) !HttpResult {
|
||||
log.debug("Path: {s}", .{request.path});
|
||||
log.debug("Query: {s}", .{request.query});
|
||||
log.debug("Method: {s}", .{request.method});
|
||||
log.debug("body length: {d}", .{request.body.len});
|
||||
log.debug("Body\n====\n{s}\n====", .{request.body});
|
||||
// End CreateRequest. This should return a struct with a deinit function that can do
|
||||
// destroys, etc
|
||||
|
||||
// TODO: Add headers
|
||||
_ = endpoint;
|
||||
//try self.addHeaders(endpoint.host, request.body, request.content_type, request.headers);
|
||||
if (signing_config) |opts| try signing.signRequest(self.allocator, request, opts);
|
||||
|
||||
// TODO: make req
|
||||
try zfetch.init(); // This only does anything on Windows. Not sure how performant it is to do this on every request
|
||||
defer zfetch.deinit();
|
||||
var headers = zfetch.Headers.init(self.allocator);
|
||||
defer headers.deinit();
|
||||
for (request.headers) |header|
|
||||
try headers.appendValue(header.name, header.value);
|
||||
|
||||
// TODO: Construct URL with endpoint and request info
|
||||
var req = try zfetch.Request.init(self.allocator, "https://www.lerch.org", null);
|
||||
|
||||
// TODO: http method as requested
|
||||
// TODO: payload
|
||||
try req.do(.GET, headers, null);
|
||||
|
||||
// TODO: Timeout - is this now above us?
|
||||
log.debug("request_complete. Response code {d}: {s}", .{ req.status.code, req.status.reason });
|
||||
log.debug("headers:", .{});
|
||||
var resp_headers = try std.ArrayList(Header).initCapacity(self.allocator, req.headers.list.items.len);
|
||||
for (req.headers.list.items) |h| {
|
||||
log.debug(" {s}: {s}", .{ h.name, h.value });
|
||||
resp_headers.appendAssumeCapacity(.{ .name = h.name, .value = h.value });
|
||||
}
|
||||
const reader = req.reader();
|
||||
// TODO: Get content length and use that to allocate the buffer
|
||||
var buf: [65535]u8 = undefined;
|
||||
while (true) {
|
||||
const read = try reader.read(&buf);
|
||||
if (read == 0) break;
|
||||
}
|
||||
log.debug("raw response body:\n{s}", .{buf});
|
||||
|
||||
// Headers would need to be allocated/copied into HttpResult similar
|
||||
// to RequestContext, so we'll leave this as a later excercise
|
||||
// if it becomes necessary
|
||||
const rc = HttpResult{
|
||||
.response_code = req.status.code,
|
||||
.body = "change me", // TODO: work this all out
|
||||
.headers = resp_headers.toOwnedSlice(),
|
||||
.allocator = self.allocator,
|
||||
};
|
||||
return rc;
|
||||
}
|
||||
|
||||
fn addHeaders(self: Self, host: []const u8, body: []const u8, content_type: []const u8, additional_headers: []Header) !void {
|
||||
_ = self;
|
||||
_ = host;
|
||||
_ = body;
|
||||
_ = content_type;
|
||||
_ = additional_headers;
|
||||
// const accept_header = c.aws_http_header{
|
||||
// .name = c.aws_byte_cursor_from_c_str("Accept"),
|
||||
// .value = c.aws_byte_cursor_from_c_str("application/json"),
|
||||
// .compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE, // https://github.com/awslabs/aws-c-http/blob/ec42882310900f2b414b279fc24636ba4653f285/include/aws/http/request_response.h#L37
|
||||
// };
|
||||
|
||||
// const host_header = c.aws_http_header{
|
||||
// .name = c.aws_byte_cursor_from_c_str("Host"),
|
||||
// .value = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, host)),
|
||||
// .compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
|
||||
// };
|
||||
|
||||
// const user_agent_header = c.aws_http_header{
|
||||
// .name = c.aws_byte_cursor_from_c_str("User-Agent"),
|
||||
// .value = c.aws_byte_cursor_from_c_str("zig-aws 1.0, Powered by the AWS Common Runtime."),
|
||||
// .compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
|
||||
// };
|
||||
|
||||
// AWS *does* seem to care about Content-Type. I don't think this header
|
||||
// will hold for all APIs
|
||||
// const c_type = try std.fmt.allocPrintZ(self.allocator, "{s}", .{content_type});
|
||||
// defer self.allocator.free(c_type);
|
||||
// const content_type_header = c.aws_http_header{
|
||||
// .name = c.aws_byte_cursor_from_c_str("Content-Type"),
|
||||
// .value = c.aws_byte_cursor_from_c_str(c_type),
|
||||
// .compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
|
||||
// };
|
||||
//
|
||||
// for (additional_headers) |h| {
|
||||
// const name = try std.fmt.allocPrintZ(self.allocator, "{s}", .{h.name});
|
||||
// defer self.allocator.free(name);
|
||||
// const value = try std.fmt.allocPrintZ(self.allocator, "{s}", .{h.value});
|
||||
// defer self.allocator.free(value);
|
||||
// const c_header = c.aws_http_header{
|
||||
// .name = c.aws_byte_cursor_from_c_str(name),
|
||||
// .value = c.aws_byte_cursor_from_c_str(value),
|
||||
// .compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
|
||||
// };
|
||||
// if (c.aws_http_message_add_header(request, c_header) != c.AWS_OP_SUCCESS)
|
||||
// return AwsError.AddHeaderError;
|
||||
// }
|
||||
//
|
||||
// if (body.len > 0) {
|
||||
// const len = try std.fmt.allocPrintZ(self.allocator, "{d}", .{body.len});
|
||||
// // This defer seems to work ok, but I'm a bit concerned about why
|
||||
// defer self.allocator.free(len);
|
||||
// const content_length_header = c.aws_http_header{
|
||||
// .name = c.aws_byte_cursor_from_c_str("Content-Length"),
|
||||
// .value = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, len)),
|
||||
// .compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
|
||||
// };
|
||||
// if (c.aws_http_message_add_header(request, content_length_header) != c.AWS_OP_SUCCESS)
|
||||
// return AwsError.AddHeaderError;
|
||||
// }
|
||||
}
|
||||
};
|
||||
|
||||
fn fullCast(comptime T: type, val: anytype) T {
|
||||
return @ptrCast(T, @alignCast(@alignOf(T), val));
|
||||
}
|
||||
|
||||
fn regionSubDomain(allocator: std.mem.Allocator, service: []const u8, region: []const u8, useDualStack: bool) !EndPoint {
|
||||
const environment_override = std.os.getenv("AWS_ENDPOINT_URL");
|
||||
if (environment_override) |override| {
|
||||
const uri = try allocator.dupeZ(u8, override);
|
||||
return endPointFromUri(allocator, uri);
|
||||
}
|
||||
// Fallback to us-east-1 if global endpoint does not exist.
|
||||
const realregion = if (std.mem.eql(u8, region, "aws-global")) "us-east-1" else region;
|
||||
const dualstack = if (useDualStack) ".dualstack" else "";
|
||||
|
||||
const domain = switch (std.hash_map.hashString(region)) {
|
||||
US_ISO_EAST_1_HASH => "c2s.ic.gov",
|
||||
CN_NORTH_1_HASH, CN_NORTHWEST_1_HASH => "amazonaws.com.cn",
|
||||
US_ISOB_EAST_1_HASH => "sc2s.sgov.gov",
|
||||
else => "amazonaws.com",
|
||||
};
|
||||
|
||||
const uri = try std.fmt.allocPrintZ(allocator, "https://{s}{s}.{s}.{s}", .{ service, dualstack, realregion, domain });
|
||||
const host = uri["https://".len..];
|
||||
log.debug("host: {s}, scheme: {s}, port: {}", .{ host, "https", 443 });
|
||||
return EndPoint{
|
||||
.uri = uri,
|
||||
.host = host,
|
||||
.scheme = "https",
|
||||
.port = 443,
|
||||
.allocator = allocator,
|
||||
};
|
||||
}
|
||||
|
||||
/// creates an endpoint from a uri string.
|
||||
///
|
||||
/// allocator: Will be used only to construct the EndPoint struct
|
||||
/// uri: string constructed in such a way that deallocation is needed
|
||||
fn endPointFromUri(allocator: std.mem.Allocator, uri: []const u8) !EndPoint {
|
||||
var scheme: []const u8 = "";
|
||||
var host: []const u8 = "";
|
||||
var port: u16 = 443;
|
||||
var host_start: usize = 0;
|
||||
var host_end: usize = 0;
|
||||
for (uri) |ch, i| {
|
||||
switch (ch) {
|
||||
':' => {
|
||||
if (!std.mem.eql(u8, scheme, "")) {
|
||||
// here to end is port - this is likely a bug if ipv6 address used
|
||||
const rest_of_uri = uri[i + 1 ..];
|
||||
port = try std.fmt.parseUnsigned(u16, rest_of_uri, 10);
|
||||
host_end = i;
|
||||
}
|
||||
},
|
||||
'/' => {
|
||||
if (host_start == 0) {
|
||||
host_start = i + 2;
|
||||
scheme = uri[0 .. i - 1];
|
||||
if (std.mem.eql(u8, scheme, "http")) {
|
||||
port = 80;
|
||||
} else {
|
||||
port = 443;
|
||||
}
|
||||
}
|
||||
},
|
||||
else => continue,
|
||||
}
|
||||
}
|
||||
if (host_end == 0) {
|
||||
host_end = uri.len;
|
||||
}
|
||||
host = uri[host_start..host_end];
|
||||
|
||||
log.debug("host: {s}, scheme: {s}, port: {}", .{ host, scheme, port });
|
||||
return EndPoint{
|
||||
.uri = uri,
|
||||
.host = host,
|
||||
.scheme = scheme,
|
||||
.allocator = allocator,
|
||||
.port = port,
|
||||
};
|
||||
}
|
33
src/aws_http_base.zig
Normal file
33
src/aws_http_base.zig
Normal file
|
@ -0,0 +1,33 @@
|
|||
//! This module provides base data structures for aws http requests
|
||||
const std = @import("std");
|
||||
const log = std.log.scoped(.aws_base);
|
||||
pub const Request = struct {
|
||||
path: []const u8 = "/",
|
||||
query: []const u8 = "",
|
||||
body: []const u8 = "",
|
||||
method: []const u8 = "POST",
|
||||
content_type: []const u8 = "application/json", // Can we get away with this?
|
||||
headers: []Header = &[_]Header{},
|
||||
};
|
||||
pub const Result = struct {
|
||||
response_code: u16, // actually 3 digits can fit in u10
|
||||
body: []const u8,
|
||||
headers: []Header,
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
pub fn deinit(self: Result) void {
|
||||
self.allocator.free(self.body);
|
||||
for (self.headers) |h| {
|
||||
self.allocator.free(h.name);
|
||||
self.allocator.free(h.value);
|
||||
}
|
||||
self.allocator.free(self.headers);
|
||||
log.debug("http result deinit complete", .{});
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Header = struct {
|
||||
name: []const u8,
|
||||
value: []const u8,
|
||||
};
|
80
src/aws_signing.zig
Normal file
80
src/aws_signing.zig
Normal file
|
@ -0,0 +1,80 @@
|
|||
const std = @import("std");
|
||||
const base = @import("aws_http_base.zig");
|
||||
const auth = @import("aws_authentication.zig");
|
||||
|
||||
const log = std.log.scoped(.aws_signing);
|
||||
|
||||
// see https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L186-L207
|
||||
const ConfigFlags = packed struct {
|
||||
// We assume the uri will be encoded once in preparation for transmission. Certain services
|
||||
// do not decode before checking signature, requiring us to actually double-encode the uri in the canonical
|
||||
// request in order to pass a signature check.
|
||||
|
||||
use_double_uri_encode: bool = true,
|
||||
|
||||
// Controls whether or not the uri paths should be normalized when building the canonical request
|
||||
should_normalize_uri_path: bool = true,
|
||||
|
||||
// Controls whether "X-Amz-Security-Token" is omitted from the canonical request.
|
||||
// "X-Amz-Security-Token" is added during signing, as a header or
|
||||
// query param, when credentials have a session token.
|
||||
// If false (the default), this parameter is included in the canonical request.
|
||||
// If true, this parameter is still added, but omitted from the canonical request.
|
||||
omit_session_token: bool = true,
|
||||
};
|
||||
|
||||
pub const Config = struct {
|
||||
// These two should be all you need to set most of the time
|
||||
service: []const u8,
|
||||
credentials: auth.Credentials,
|
||||
|
||||
region: []const u8 = "aws-global",
|
||||
// https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L38
|
||||
algorithm: enum { v4, v4a } = .v4,
|
||||
// https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L24
|
||||
// config_type: ?? // CRT only has one value. We'll ignore for now
|
||||
// https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L49
|
||||
signature_type: enum {
|
||||
headers, // we only support this
|
||||
query_params,
|
||||
request_chunk,
|
||||
request_event, // not implemented by CRT
|
||||
canonical_request_headers,
|
||||
canonical_request_query_params,
|
||||
} = .headers,
|
||||
signing_time: ?i64 = null, // Used for testing. If null, will use current time
|
||||
|
||||
// In the CRT, should_sign_header is a function to allow header filtering.
|
||||
// The _ud would be a anyopaque user defined data for the function to use
|
||||
// .should_sign_header = null,
|
||||
// .should_sign_header_ud = null,
|
||||
|
||||
// In the CRT, this is only used if the body has been precalculated. We don't have
|
||||
// this use case, and we'll ignore
|
||||
// .signed_body_value = c.aws_byte_cursor_from_c_str(""),
|
||||
signed_body_header: enum { sha256, none } = .sha256, // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L131
|
||||
|
||||
// This is more complex in the CRT. We'll just take the creds. Someone
|
||||
// else can use a provider and get them in advance
|
||||
// https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L225-L251
|
||||
// If non-zero and the signing transform is query param, then signing will add X-Amz-Expires to the query
|
||||
// string, equal to the value specified here. If this value is zero or if header signing is being used then
|
||||
// this parameter has no effect.
|
||||
expiration_in_seconds: u64 = 0,
|
||||
};
|
||||
|
||||
pub const SigningError = error{
|
||||
NotImplemented,
|
||||
};
|
||||
|
||||
pub fn signRequest(allocator: std.mem.Allocator, http_request: base.Request, config: Config) SigningError!void {
|
||||
_ = allocator;
|
||||
_ = http_request;
|
||||
try validateConfig(config);
|
||||
log.debug("Signing with access key: {s}", .{config.credentials.access_key});
|
||||
}
|
||||
|
||||
fn validateConfig(config: Config) SigningError!void {
|
||||
_ = config;
|
||||
return SigningError.NotImplemented;
|
||||
}
|
408
src/awshttp.zig
408
src/awshttp.zig
|
@ -1,408 +0,0 @@
|
|||
//! This module provides a low level http interface for working with AWS
|
||||
//! It also provides an option to operate outside the AWS ecosystem through
|
||||
//! the makeRequest call with a null signingOptions.
|
||||
//!
|
||||
//! Typical usage:
|
||||
//! const client = awshttp.AwsHttp.init(allocator);
|
||||
//! defer client.deinit()
|
||||
//! const result = client.callApi (or client.makeRequest)
|
||||
//! defer result.deinit();
|
||||
const std = @import("std");
|
||||
|
||||
const CN_NORTH_1_HASH = std.hash_map.hashString("cn-north-1");
|
||||
const CN_NORTHWEST_1_HASH = std.hash_map.hashString("cn-northwest-1");
|
||||
const US_ISO_EAST_1_HASH = std.hash_map.hashString("us-iso-east-1");
|
||||
const US_ISOB_EAST_1_HASH = std.hash_map.hashString("us-isob-east-1");
|
||||
|
||||
const httplog = std.log.scoped(.awshttp);
|
||||
|
||||
pub const AwsError = error{
|
||||
AddHeaderError,
|
||||
AlpnError,
|
||||
CredentialsError,
|
||||
HttpClientConnectError,
|
||||
HttpRequestError,
|
||||
SignableError,
|
||||
SigningInitiationError,
|
||||
TlsError,
|
||||
RequestCreateError,
|
||||
SetupConnectionError,
|
||||
StatusCodeError,
|
||||
SetRequestMethodError,
|
||||
SetRequestPathError,
|
||||
};
|
||||
|
||||
pub const Options = struct {
|
||||
region: []const u8 = "aws-global",
|
||||
dualstack: bool = false,
|
||||
sigv4_service_name: ?[]const u8 = null,
|
||||
};
|
||||
|
||||
const SigningOptions = struct {
|
||||
region: []const u8 = "aws-global",
|
||||
service: []const u8,
|
||||
};
|
||||
|
||||
pub const HttpRequest = struct {
|
||||
path: []const u8 = "/",
|
||||
query: []const u8 = "",
|
||||
body: []const u8 = "",
|
||||
method: []const u8 = "POST",
|
||||
content_type: []const u8 = "application/json", // Can we get away with this?
|
||||
headers: []Header = &[_]Header{},
|
||||
};
|
||||
pub const HttpResult = struct {
|
||||
response_code: u16, // actually 3 digits can fit in u10
|
||||
body: []const u8,
|
||||
headers: []Header,
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
pub fn deinit(self: HttpResult) void {
|
||||
self.allocator.free(self.body);
|
||||
for (self.headers) |h| {
|
||||
self.allocator.free(h.name);
|
||||
self.allocator.free(h.value);
|
||||
}
|
||||
self.allocator.free(self.headers);
|
||||
httplog.debug("http result deinit complete", .{});
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Header = struct {
|
||||
name: []const u8,
|
||||
value: []const u8,
|
||||
};
|
||||
|
||||
const EndPoint = struct {
|
||||
uri: []const u8,
|
||||
host: []const u8,
|
||||
scheme: []const u8,
|
||||
port: u16,
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
fn deinit(self: EndPoint) void {
|
||||
self.allocator.free(self.uri);
|
||||
}
|
||||
};
|
||||
|
||||
pub const AwsHttp = struct {
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(allocator: std.mem.Allocator) Self {
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
// .credentialsProvider = // creds provider could be useful
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *AwsHttp) void {
|
||||
httplog.debug("Deinit complete", .{});
|
||||
}
|
||||
|
||||
/// callApi allows the calling of AWS APIs through a higher-level interface.
|
||||
/// It will calculate the appropriate endpoint and action parameters for the
|
||||
/// service called, and will set up the signing options. The return
|
||||
/// value is simply a raw HttpResult
|
||||
pub fn callApi(self: Self, service: []const u8, request: HttpRequest, options: Options) !HttpResult {
|
||||
const endpoint = try regionSubDomain(self.allocator, service, options.region, options.dualstack);
|
||||
defer endpoint.deinit();
|
||||
httplog.debug("Calling endpoint {s}", .{endpoint.uri});
|
||||
const signing_options: SigningOptions = .{
|
||||
.region = options.region,
|
||||
.service = if (options.sigv4_service_name) |name| name else service,
|
||||
};
|
||||
return try self.makeRequest(endpoint, request, signing_options);
|
||||
}
|
||||
|
||||
/// makeRequest is a low level http/https function that can be used inside
|
||||
/// or outside the context of AWS services. To use it outside AWS, simply
|
||||
/// pass a null value in for signing_options.
|
||||
///
|
||||
/// Otherwise, it will simply take a URL endpoint (without path information),
|
||||
/// HTTP method (e.g. GET, POST, etc.), and request body.
|
||||
///
|
||||
/// At the moment this does not allow the controlling of headers
|
||||
/// This is likely to change. Current headers are:
|
||||
///
|
||||
/// Accept: application/json
|
||||
/// User-Agent: zig-aws 1.0, Powered by the AWS Common Runtime.
|
||||
/// Content-Type: application/x-www-form-urlencoded
|
||||
/// Content-Length: (length of body)
|
||||
///
|
||||
/// Return value is an HttpResult, which will need the caller to deinit().
|
||||
/// HttpResult currently contains the body only. The addition of Headers
|
||||
/// and return code would be a relatively minor change
|
||||
pub fn makeRequest(self: Self, endpoint: EndPoint, request: HttpRequest, signing_options: ?SigningOptions) !HttpResult {
|
||||
httplog.debug("Path: {s}", .{request.path});
|
||||
httplog.debug("Query: {s}", .{request.query});
|
||||
httplog.debug("Method: {s}", .{request.method});
|
||||
httplog.debug("body length: {d}", .{request.body.len});
|
||||
httplog.debug("Body\n====\n{s}\n====", .{request.body});
|
||||
// End CreateRequest. This should return a struct with a deinit function that can do
|
||||
// destroys, etc
|
||||
|
||||
var context = RequestContext{
|
||||
.allocator = self.allocator,
|
||||
};
|
||||
try self.addHeaders(http_request.?, host, request.body, request.content_type, request.headers);
|
||||
if (signing_options) |opts| try self.signRequest(http_request.?, opts);
|
||||
|
||||
// TODO: make req
|
||||
// TODO: Timeout
|
||||
httplog.debug("request_complete. Response code {d}", .{context.response_code.?});
|
||||
httplog.debug("headers:", .{});
|
||||
for (context.headers.?.items) |h| {
|
||||
httplog.debug(" {s}: {s}", .{ h.name, h.value });
|
||||
}
|
||||
httplog.debug("raw response body:\n{s}", .{context.body});
|
||||
|
||||
// Headers would need to be allocated/copied into HttpResult similar
|
||||
// to RequestContext, so we'll leave this as a later excercise
|
||||
// if it becomes necessary
|
||||
const rc = HttpResult{
|
||||
.response_code = context.response_code.?,
|
||||
.body = final_body,
|
||||
.headers = context.headers.?.toOwnedSlice(),
|
||||
.allocator = self.allocator,
|
||||
};
|
||||
return rc;
|
||||
}
|
||||
|
||||
fn signRequest(self: Self, http_request: *c.aws_http_message, options: SigningOptions) !void {
|
||||
const creds = try self.getCredentials();
|
||||
httplog.debug("Signing with access key: {s}", .{c.aws_string_c_str(access_key)});
|
||||
|
||||
// const signing_region = try std.fmt.allocPrintZ(self.allocator, "{s}", .{options.region});
|
||||
// defer self.allocator.free(signing_region);
|
||||
// const signing_service = try std.fmt.allocPrintZ(self.allocator, "{s}", .{options.service});
|
||||
// defer self.allocator.free(signing_service);
|
||||
// const temp_signing_config = c.bitfield_workaround_aws_signing_config_aws{
|
||||
// .algorithm = 0, // .AWS_SIGNING_ALGORITHM_V4, // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L38
|
||||
// .config_type = 1, // .AWS_SIGNING_CONFIG_AWS, // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L24
|
||||
// .signature_type = 0, // .AWS_ST_HTTP_REQUEST_HEADERS, // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L49
|
||||
// .region = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, signing_region)),
|
||||
// .service = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, signing_service)),
|
||||
// .should_sign_header = null,
|
||||
// .should_sign_header_ud = null,
|
||||
// // TODO: S3 does not double uri encode. Also not sure why normalizing
|
||||
// // the path here is a flag - seems like it should always do this?
|
||||
// .flags = c.bitfield_workaround_aws_signing_config_aws_flags{
|
||||
// .use_double_uri_encode = 1,
|
||||
// .should_normalize_uri_path = 1,
|
||||
// .omit_session_token = 1,
|
||||
// },
|
||||
// .signed_body_value = c.aws_byte_cursor_from_c_str(""),
|
||||
// .signed_body_header = 1, // .AWS_SBHT_X_AMZ_CONTENT_SHA256, //or 0 = AWS_SBHT_NONE // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L131
|
||||
// .credentials = creds,
|
||||
// .credentials_provider = self.credentialsProvider,
|
||||
// .expiration_in_seconds = 0,
|
||||
// };
|
||||
// return AwsError.SignableError;
|
||||
}
|
||||
|
||||
|
||||
fn addHeaders(self: Self, request: *c.aws_http_message, host: []const u8, body: []const u8, content_type: []const u8, additional_headers: []Header) !void {
|
||||
// const accept_header = c.aws_http_header{
|
||||
// .name = c.aws_byte_cursor_from_c_str("Accept"),
|
||||
// .value = c.aws_byte_cursor_from_c_str("application/json"),
|
||||
// .compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE, // https://github.com/awslabs/aws-c-http/blob/ec42882310900f2b414b279fc24636ba4653f285/include/aws/http/request_response.h#L37
|
||||
// };
|
||||
|
||||
// const host_header = c.aws_http_header{
|
||||
// .name = c.aws_byte_cursor_from_c_str("Host"),
|
||||
// .value = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, host)),
|
||||
// .compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
|
||||
// };
|
||||
|
||||
// const user_agent_header = c.aws_http_header{
|
||||
// .name = c.aws_byte_cursor_from_c_str("User-Agent"),
|
||||
// .value = c.aws_byte_cursor_from_c_str("zig-aws 1.0, Powered by the AWS Common Runtime."),
|
||||
// .compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
|
||||
// };
|
||||
|
||||
// AWS *does* seem to care about Content-Type. I don't think this header
|
||||
// will hold for all APIs
|
||||
const c_type = try std.fmt.allocPrintZ(self.allocator, "{s}", .{content_type});
|
||||
defer self.allocator.free(c_type);
|
||||
const content_type_header = c.aws_http_header{
|
||||
.name = c.aws_byte_cursor_from_c_str("Content-Type"),
|
||||
.value = c.aws_byte_cursor_from_c_str(c_type),
|
||||
.compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
|
||||
};
|
||||
|
||||
for (additional_headers) |h| {
|
||||
const name = try std.fmt.allocPrintZ(self.allocator, "{s}", .{h.name});
|
||||
defer self.allocator.free(name);
|
||||
const value = try std.fmt.allocPrintZ(self.allocator, "{s}", .{h.value});
|
||||
defer self.allocator.free(value);
|
||||
const c_header = c.aws_http_header{
|
||||
.name = c.aws_byte_cursor_from_c_str(name),
|
||||
.value = c.aws_byte_cursor_from_c_str(value),
|
||||
.compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
|
||||
};
|
||||
if (c.aws_http_message_add_header(request, c_header) != c.AWS_OP_SUCCESS)
|
||||
return AwsError.AddHeaderError;
|
||||
}
|
||||
|
||||
if (body.len > 0) {
|
||||
const len = try std.fmt.allocPrintZ(self.allocator, "{d}", .{body.len});
|
||||
// This defer seems to work ok, but I'm a bit concerned about why
|
||||
defer self.allocator.free(len);
|
||||
const content_length_header = c.aws_http_header{
|
||||
.name = c.aws_byte_cursor_from_c_str("Content-Length"),
|
||||
.value = c.aws_byte_cursor_from_c_str(@ptrCast([*c]const u8, len)),
|
||||
.compression = 0, // .AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
|
||||
};
|
||||
if (c.aws_http_message_add_header(request, content_length_header) != c.AWS_OP_SUCCESS)
|
||||
return AwsError.AddHeaderError;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn getCredentials(self: Self) !*c.aws_credentials {
|
||||
// const get_async_result =
|
||||
_ = c.aws_credentials_provider_get_credentials(self.credentialsProvider, callback, &callback_results);
|
||||
|
||||
if (credential_result.error_code != c.AWS_ERROR_SUCCESS) {
|
||||
httplog.err("Could not acquire credentials: {s}:{s}", .{ c.aws_error_name(credential_result.error_code), c.aws_error_str(credential_result.error_code) });
|
||||
return AwsError.CredentialsError;
|
||||
}
|
||||
return credential_result.result orelse unreachable;
|
||||
}
|
||||
};
|
||||
|
||||
fn fullCast(comptime T: type, val: anytype) T {
|
||||
return @ptrCast(T, @alignCast(@alignOf(T), val));
|
||||
}
|
||||
|
||||
fn regionSubDomain(allocator: std.mem.Allocator, service: []const u8, region: []const u8, useDualStack: bool) !EndPoint {
|
||||
const environment_override = std.os.getenv("AWS_ENDPOINT_URL");
|
||||
if (environment_override) |override| {
|
||||
const uri = try allocator.dupeZ(u8, override);
|
||||
return endPointFromUri(allocator, uri);
|
||||
}
|
||||
// Fallback to us-east-1 if global endpoint does not exist.
|
||||
const realregion = if (std.mem.eql(u8, region, "aws-global")) "us-east-1" else region;
|
||||
const dualstack = if (useDualStack) ".dualstack" else "";
|
||||
|
||||
const domain = switch (std.hash_map.hashString(region)) {
|
||||
US_ISO_EAST_1_HASH => "c2s.ic.gov",
|
||||
CN_NORTH_1_HASH, CN_NORTHWEST_1_HASH => "amazonaws.com.cn",
|
||||
US_ISOB_EAST_1_HASH => "sc2s.sgov.gov",
|
||||
else => "amazonaws.com",
|
||||
};
|
||||
|
||||
const uri = try std.fmt.allocPrintZ(allocator, "https://{s}{s}.{s}.{s}", .{ service, dualstack, realregion, domain });
|
||||
const host = uri["https://".len..];
|
||||
httplog.debug("host: {s}, scheme: {s}, port: {}", .{ host, "https", 443 });
|
||||
return EndPoint{
|
||||
.uri = uri,
|
||||
.host = host,
|
||||
.scheme = "https",
|
||||
.port = 443,
|
||||
.allocator = allocator,
|
||||
};
|
||||
}
|
||||
|
||||
/// creates an endpoint from a uri string.
|
||||
///
|
||||
/// allocator: Will be used only to construct the EndPoint struct
|
||||
/// uri: string constructed in such a way that deallocation is needed
|
||||
fn endPointFromUri(allocator: std.mem.Allocator, uri: []const u8) !EndPoint {
|
||||
var scheme: []const u8 = "";
|
||||
var host: []const u8 = "";
|
||||
var port: u16 = 443;
|
||||
var host_start: usize = 0;
|
||||
var host_end: usize = 0;
|
||||
for (uri) |ch, i| {
|
||||
switch (ch) {
|
||||
':' => {
|
||||
if (!std.mem.eql(u8, scheme, "")) {
|
||||
// here to end is port - this is likely a bug if ipv6 address used
|
||||
const rest_of_uri = uri[i + 1 ..];
|
||||
port = try std.fmt.parseUnsigned(u16, rest_of_uri, 10);
|
||||
host_end = i;
|
||||
}
|
||||
},
|
||||
'/' => {
|
||||
if (host_start == 0) {
|
||||
host_start = i + 2;
|
||||
scheme = uri[0 .. i - 1];
|
||||
if (std.mem.eql(u8, scheme, "http")) {
|
||||
port = 80;
|
||||
} else {
|
||||
port = 443;
|
||||
}
|
||||
}
|
||||
},
|
||||
else => continue,
|
||||
}
|
||||
}
|
||||
if (host_end == 0) {
|
||||
host_end = uri.len;
|
||||
}
|
||||
host = uri[host_start..host_end];
|
||||
|
||||
httplog.debug("host: {s}, scheme: {s}, port: {}", .{ host, scheme, port });
|
||||
return EndPoint{
|
||||
.uri = uri,
|
||||
.host = host,
|
||||
.scheme = scheme,
|
||||
.allocator = allocator,
|
||||
.port = port,
|
||||
};
|
||||
}
|
||||
|
||||
const RequestContext = struct {
|
||||
connection: ?*c.aws_http_connection = null,
|
||||
connection_complete: std.atomic.Atomic(bool) = std.atomic.Atomic(bool).init(false),
|
||||
request_complete: std.atomic.Atomic(bool) = std.atomic.Atomic(bool).init(false),
|
||||
return_error: ?AwsError = null,
|
||||
allocator: std.mem.Allocator,
|
||||
body: ?[]const u8 = null,
|
||||
response_code: ?u16 = null,
|
||||
headers: ?std.ArrayList(Header) = null,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn deinit(self: Self) void {
|
||||
// We're going to leave it to the caller to free the body
|
||||
// if (self.body) |b| self.allocator.free(b);
|
||||
if (self.headers) |hs| {
|
||||
for (hs.items) |h| {
|
||||
// deallocate the copied values
|
||||
self.allocator.free(h.name);
|
||||
self.allocator.free(h.value);
|
||||
}
|
||||
// deallocate the structure itself
|
||||
hs.deinit();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn appendToBody(self: *Self, fragment: []const u8) !void {
|
||||
var orig_body: []const u8 = "";
|
||||
if (self.body) |b| {
|
||||
orig_body = try self.allocator.dupe(u8, b);
|
||||
self.allocator.free(b);
|
||||
self.body = null;
|
||||
}
|
||||
defer self.allocator.free(orig_body);
|
||||
self.body = try std.fmt.allocPrint(self.allocator, "{s}{s}", .{ orig_body, fragment });
|
||||
}
|
||||
|
||||
pub fn addHeader(self: *Self, name: []const u8, value: []const u8) !void {
|
||||
if (self.headers == null)
|
||||
self.headers = std.ArrayList(Header).init(self.allocator);
|
||||
|
||||
const name_copy = try self.allocator.dupeZ(u8, name);
|
||||
const value_copy = try self.allocator.dupeZ(u8, value);
|
||||
|
||||
try self.headers.?.append(.{
|
||||
.name = name_copy,
|
||||
.value = value_copy,
|
||||
});
|
||||
}
|
||||
};
|
10
src/main.zig
10
src/main.zig
|
@ -1,6 +1,7 @@
|
|||
const std = @import("std");
|
||||
const aws = @import("aws.zig");
|
||||
const json = @import("json.zig");
|
||||
const version = @import("git_version.zig");
|
||||
|
||||
var verbose = false;
|
||||
|
||||
|
@ -37,18 +38,19 @@ const Tests = enum {
|
|||
};
|
||||
|
||||
pub fn main() anyerror!void {
|
||||
const c_allocator = std.heap.c_allocator;
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){
|
||||
.backing_allocator = c_allocator,
|
||||
};
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
defer _ = gpa.deinit();
|
||||
const allocator = gpa.allocator();
|
||||
var tests = std.ArrayList(Tests).init(allocator);
|
||||
defer tests.deinit();
|
||||
var args = std.process.args();
|
||||
var first = true;
|
||||
while (args.next(allocator)) |arg_or_error| {
|
||||
const arg = try arg_or_error;
|
||||
defer allocator.free(arg);
|
||||
if (first)
|
||||
std.log.info("{s} {s}", .{ arg, version.pretty_version });
|
||||
first = false;
|
||||
if (std.mem.eql(u8, "-v", arg)) {
|
||||
verbose = true;
|
||||
continue;
|
||||
|
|
Loading…
Reference in New Issue
Block a user