Compare commits

...

10 Commits

Author SHA1 Message Date
9e02196dd0
clean compile on 0.11, not yet fully functional
Some checks failed
AWS-Zig Build / build-zig-0.9-amd64-host (push) Failing after 1m41s
2023-08-14 10:06:28 -07:00
64dc7b5772
add thoughts on adding git rev 2023-08-14 08:49:23 -07:00
3834fe6a49
add thoughts on code gen 2023-08-14 08:48:01 -07:00
4bc8889d32
fix more compile errors - leaving 2 nasty ones 2023-08-05 16:26:09 -07:00
63ff325068
find/fix more fmt errors 2023-08-05 13:29:23 -07:00
c18de40edd
remove git version info 2023-08-05 13:29:07 -07:00
d04e7b22e4
remove root pem - should no longer need this 2023-08-05 13:24:48 -07:00
f95de3457c
need to address testing 2023-08-05 13:11:30 -07:00
1950cdaba0
delete unneeded files 2023-08-05 13:11:03 -07:00
e49ed1b7ad
found fix for comptime decl stuff, avoid more codegen changes 2023-08-05 13:00:55 -07:00
15 changed files with 91 additions and 618 deletions

View File

@ -1,36 +0,0 @@
const std = @import("std");
const CopyStep = @This();
step: std.build.Step,
builder: *std.build.Builder,
from_path: ?[]const u8 = null,
to_path: ?[]const u8 = null,
pub fn create(
b: *std.build.Builder,
from_path_relative: []const u8,
to_path_relative: []const u8,
) *CopyStep {
var result = b.allocator.create(CopyStep) catch @panic("memory");
result.* = CopyStep{
.step = std.build.Step.init(.custom, "copy a file", b.allocator, make),
.builder = b,
.from_path = std.fs.path.resolve(b.allocator, &[_][]const u8{
b.build_root,
from_path_relative,
}) catch @panic("memory"),
.to_path = std.fs.path.resolve(b.allocator, &[_][]const u8{
b.build_root,
to_path_relative,
}) catch @panic("memory"),
};
return result;
}
fn make(step: *std.build.Step) !void {
const self = @fieldParentPtr(CopyStep, "step", step);
std.fs.copyFileAbsolute(self.from_path.?, self.to_path.?, .{}) catch |e| {
std.log.err("Error copying {s} to {s}: {}", .{ self.from_path.?, self.to_path.?, e });
std.os.exit(1);
};
}

View File

@ -1,218 +0,0 @@
//! Publish Date: 2021_10_17
//! This file is hosted at github.com/marler8997/zig-build-repos and is meant to be copied
//! to projects that use it.
const std = @import("std");
const GitRepoStep = @This();
pub const ShaCheck = enum {
none,
warn,
err,
pub fn reportFail(self: ShaCheck, comptime fmt: []const u8, args: anytype) void {
switch (self) {
.none => unreachable,
.warn => std.log.warn(fmt, args),
.err => {
std.log.err(fmt, args);
std.os.exit(0xff);
},
}
}
};
step: std.build.Step,
builder: *std.build.Builder,
url: []const u8,
name: []const u8,
branch: ?[]const u8 = null,
sha: []const u8,
path: ?[]const u8 = null,
sha_check: ShaCheck = .warn,
fetch_enabled: bool,
var cached_default_fetch_option: ?bool = null;
pub fn defaultFetchOption(b: *std.build.Builder) bool {
if (cached_default_fetch_option) |_| {} else {
cached_default_fetch_option = if (b.option(bool, "fetch", "automatically fetch network resources")) |o| o else false;
}
return cached_default_fetch_option.?;
}
pub fn create(b: *std.build.Builder, opt: struct {
url: []const u8,
branch: ?[]const u8 = null,
sha: []const u8,
path: ?[]const u8 = null,
sha_check: ShaCheck = .warn,
fetch_enabled: ?bool = null,
}) *GitRepoStep {
var result = b.allocator.create(GitRepoStep) catch @panic("memory");
const name = std.fs.path.basename(opt.url);
result.* = GitRepoStep{
.step = std.build.Step.init(.custom, "clone a git repository", b.allocator, make),
.builder = b,
.url = opt.url,
.name = name,
.branch = opt.branch,
.sha = opt.sha,
.path = if (opt.path) |p| (b.allocator.dupe(u8, p) catch @panic("memory")) else (std.fs.path.resolve(b.allocator, &[_][]const u8{
b.build_root,
"libs",
name,
})) catch @panic("memory"),
.sha_check = opt.sha_check,
.fetch_enabled = if (opt.fetch_enabled) |fe| fe else defaultFetchOption(b),
};
return result;
}
// TODO: this should be included in std.build, it helps find bugs in build files
fn hasDependency(step: *const std.build.Step, dep_candidate: *const std.build.Step) bool {
for (step.dependencies.items) |dep| {
// TODO: should probably use step.loop_flag to prevent infinite recursion
// when a circular reference is encountered, or maybe keep track of
// the steps encounterd with a hash set
if (dep == dep_candidate or hasDependency(dep, dep_candidate))
return true;
}
return false;
}
fn make(step: *std.build.Step) !void {
const self = @fieldParentPtr(GitRepoStep, "step", step);
std.fs.accessAbsolute(self.path.?, .{ .mode = .read_only }) catch {
const branch_args = if (self.branch) |b| &[2][]const u8{ " -b ", b } else &[2][]const u8{ "", "" };
if (!self.fetch_enabled) {
std.debug.print("Error: git repository '{s}' does not exist\n", .{self.path.?});
std.debug.print(" Use -Dfetch to download it automatically, or run the following to clone it:\n", .{});
std.debug.print(" git clone {s}{s}{s} {s} && git -C {3s} checkout {s} -b for_ziget\n", .{ self.url, branch_args[0], branch_args[1], self.path.?, self.sha });
std.os.exit(1);
}
{
var args = std.ArrayList([]const u8).init(self.builder.allocator);
defer args.deinit();
try args.append("git");
try args.append("clone");
// This is a bad idea, because we really want to get to the correct
// revision before we go updating submodules
// try args.append("--recurse-submodules");
try args.append(self.url);
// TODO: clone it to a temporary location in case of failure
// also, remove that temporary location before running
try args.append(self.path.?);
if (self.branch) |branch| {
try args.append("-b");
try args.append(branch);
}
try run(self.builder, args.items);
}
try run(self.builder, &[_][]const u8{
"git",
"-C",
self.path.?,
"checkout",
self.sha,
"-b",
"fordep",
});
// Now that we're on the correct revision, we can update submodules
try run(self.builder, &[_][]const u8{
"git",
"-C",
self.path.?,
"submodule",
"update",
"--init",
"--recursive",
});
};
try self.checkSha();
}
fn checkSha(self: GitRepoStep) !void {
if (self.sha_check == .none)
return;
const result: union(enum) { failed: anyerror, output: []const u8 } = blk: {
const result = std.ChildProcess.exec(.{
.allocator = self.builder.allocator,
.argv = &[_][]const u8{
"git",
"-C",
self.path.?,
"rev-parse",
"HEAD",
},
.cwd = self.builder.build_root,
.env_map = self.builder.env_map,
}) catch |e| break :blk .{ .failed = e };
try std.io.getStdErr().writer().writeAll(result.stderr);
switch (result.term) {
.Exited => |code| {
if (code == 0) break :blk .{ .output = result.stdout };
break :blk .{ .failed = error.GitProcessNonZeroExit };
},
.Signal => break :blk .{ .failed = error.GitProcessFailedWithSignal },
.Stopped => break :blk .{ .failed = error.GitProcessWasStopped },
.Unknown => break :blk .{ .failed = error.GitProcessFailed },
}
};
switch (result) {
.failed => |err| {
return self.sha_check.reportFail("failed to retreive sha for repository '{s}': {s}", .{ self.name, @errorName(err) });
},
.output => |output| {
if (!std.mem.eql(u8, std.mem.trimRight(u8, output, "\n\r"), self.sha)) {
return self.sha_check.reportFail("repository '{s}' sha does not match\nexpected: {s}\nactual : {s}\n", .{ self.name, self.sha, output });
}
},
}
}
fn run(builder: *std.build.Builder, argv: []const []const u8) !void {
{
var msg = std.ArrayList(u8).init(builder.allocator);
defer msg.deinit();
const writer = msg.writer();
var prefix: []const u8 = "";
for (argv) |arg| {
try writer.print("{s}\"{s}\"", .{ prefix, arg });
prefix = " ";
}
std.log.debug("[RUN] {s}", .{msg.items});
}
var child = std.ChildProcess.init(argv, builder.allocator);
child.stdin_behavior = .Ignore;
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
child.cwd = builder.build_root;
child.env_map = builder.env_map;
try child.spawn();
const result = try child.wait();
switch (result) {
.Exited => |code| if (code != 0) {
std.log.err("git clone failed with exit code {}", .{code});
std.os.exit(0xff);
},
else => {
std.log.err("git clone failed with: {}", .{result});
std.os.exit(0xff);
},
}
}
// Get's the repository path and also verifies that the step requesting the path
// is dependent on this step.
pub fn getPath(self: *const GitRepoStep, who_wants_to_know: *const std.build.Step) []const u8 {
if (!hasDependency(who_wants_to_know, &self.step))
@panic("a step called GitRepoStep.getPath but has not added it as a dependency");
return self.path;
}

View File

@ -1,229 +0,0 @@
//! Publish Date: 2022-01-12
//! This file is hosted at ??? and is meant to be copied
//! to projects that use it. Sample usage:
//!
//! const version = VersionStep.create(b, null);
//! exe.step.dependOn(&version.step);
const std = @import("std");
const Step = @This();
step: std.build.Step,
builder: *std.build.Builder,
version_path: []const u8,
// Creates a step that will add the git version info in a file in src/
// so it can be consumed by additional code. If version_path is not specified,
// it will default to "git_version.zig". This should be part of .gitignore
pub fn create(b: *std.build.Builder, version_path: ?[]const u8) *Step {
var result = b.allocator.create(Step) catch @panic("memory");
result.* = Step{
.step = std.build.Step.init(.custom, "create version file", b.allocator, make),
.builder = b,
.version_path = std.fs.path.resolve(b.allocator, &[_][]const u8{
b.build_root,
"src",
version_path orelse "git_version.zig",
}) catch @panic("memory"),
};
return result;
}
fn make(step: *std.build.Step) !void {
const self = @fieldParentPtr(Step, "step", step);
const file = try std.fs.createFileAbsolute(self.version_path, .{});
defer file.close();
const version = try getGitVersion(
self.builder.allocator,
self.builder.build_root,
self.builder.env_map,
);
defer version.deinit();
try file.writer().print(
\\pub const hash = "{s}";
\\pub const abbreviated_hash = "{s}";
\\pub const commit_date = "{s}";
\\pub const branch = "{s}";
\\pub const dirty = {};
\\pub const pretty_version = "{s}";
, .{
version.hash,
version.abbreviated_hash,
version.commit_date,
version.branch,
version.dirty,
version.pretty_version,
});
}
const GitVersion = struct {
hash: []const u8,
abbreviated_hash: []const u8,
commit_date: []const u8,
branch: []const u8,
dirty: bool,
pretty_version: []const u8,
allocator: std.mem.Allocator,
const Self = @This();
pub fn deinit(self: Self) void {
self.allocator.free(self.hash);
self.allocator.free(self.abbreviated_hash);
self.allocator.free(self.commit_date);
self.allocator.free(self.branch);
self.allocator.free(self.pretty_version);
}
};
fn getGitVersion(allocator: std.mem.Allocator, git_working_root: ?[]const u8, env: anytype) !GitVersion {
// git log -1 --pretty="%H%n%h%n%ci%n%D"
// 3bf6adc13e4aa653a7b75b1b5e9c9db5215df8e1
// 3bf6adc
// 2022-01-12 12:21:28 -0800
// HEAD -> zig-native
const log_output = run(
allocator,
&[_][]const u8{
"git",
"log",
"-1",
"--pretty=%H%n%h%n%ci%n%D",
},
git_working_root,
env,
) catch |e| {
if (std.os.getenv("DRONE_COMMIT_SHA") != null)
return getGitVersionFromDrone(allocator);
return e;
};
defer allocator.free(log_output);
const line_data = try getLines(allocator, 4, log_output);
const hash = line_data[0];
const abbrev_hash = line_data[1];
const date = line_data[2];
const branch = line_data[3];
// git status --porcelain
const status_output = try run(
allocator,
&[_][]const u8{
"git",
"status",
"--porcelain",
},
git_working_root,
env,
);
const dirty = blk: {
if (status_output.len > 0) {
allocator.free(status_output);
break :blk true;
}
break :blk false;
};
const dirty_str = blk: {
if (dirty) {
break :blk " (dirty)";
}
break :blk "";
};
return GitVersion{
.hash = hash,
.abbreviated_hash = abbrev_hash,
.commit_date = date,
.branch = branch,
.allocator = allocator,
.dirty = dirty,
.pretty_version = try prettyVersion(allocator, abbrev_hash, date, dirty_str),
};
}
fn prettyVersion(allocator: std.mem.Allocator, abbrev_hash: []const u8, date: []const u8, dirty_str: []const u8) ![]const u8 {
const pretty_version: []const u8 = try std.fmt.allocPrint(
allocator,
"version {s}, committed at {s}{s}",
.{
abbrev_hash,
date,
dirty_str,
},
);
return pretty_version;
}
fn getGitVersionFromDrone(allocator: std.mem.Allocator) !GitVersion {
const abbrev_hash = std.os.getenv("DRONE_COMMIT_SHA").?[0..7]; // This isn't quite how git works, but ok
const date = std.os.getenv("DRONE_BUILD_STARTED").?; // this is a timestamp :(
return GitVersion{
.hash = std.os.getenv("DRONE_COMMIT_SHA").?,
.abbreviated_hash = abbrev_hash,
.commit_date = date,
.branch = std.os.getenv("DRONE_COMMIT_BRANCH").?,
.allocator = allocator,
.dirty = false,
.pretty_version = try prettyVersion(allocator, abbrev_hash, date, ""),
};
}
fn getLines(allocator: std.mem.Allocator, comptime line_count: u32, data: []const u8) ![line_count][]u8 {
var line: u32 = 0;
var start: u32 = 0;
var current: u32 = 0;
var line_data: [line_count][]u8 = undefined;
errdefer {
while (line > 0) {
allocator.free(line_data[line]);
line -= 1;
}
}
for (data) |c| {
// try std.io.getStdErr().writer().print("line: {d}, c: {c}, cur: {d}, strt: {d}\n", .{ line, c, current, start });
if (c == '\n') {
line_data[line] = try allocator.dupe(u8, data[start..current]);
// try std.io.getStdErr().writer().print("c: {d}, s: {d}, data: '{s}'\n", .{ current, start, line_data[line] });
start = current + 1;
line += 1;
}
current += 1;
}
return line_data;
}
// env is a std.process.BufMap, but that's private, which is a little weird tbh
fn run(allocator: std.mem.Allocator, argv: []const []const u8, cwd: ?[]const u8, env: anytype) ![]const u8 {
{
var msg = std.ArrayList(u8).init(allocator);
defer msg.deinit();
const writer = msg.writer();
var prefix: []const u8 = "";
for (argv) |arg| {
try writer.print("{s}\"{s}\"", .{ prefix, arg });
prefix = " ";
}
// std.log.debug("[RUN] {s}", .{msg.items});
}
const result = try std.ChildProcess.exec(.{
.allocator = allocator,
.argv = argv,
.cwd = cwd,
.env_map = env,
});
defer if (result.stderr.len > 0) allocator.free(result.stderr);
try std.io.getStdErr().writer().writeAll(result.stderr);
switch (result.term) {
.Exited => |code| if (code != 0) {
std.log.err("process failed with exit code: {}", .{code});
std.os.exit(0xff);
},
else => {
std.log.err("process failed due to exception: {}", .{result});
std.os.exit(0xff);
},
}
return result.stdout;
}

View File

@ -1,7 +1,6 @@
const std = @import("std");
const builtin = @import("builtin");
const Builder = @import("std").build.Builder;
const tst = @import("build_test.zig");
pub fn build(b: *Builder) !void {
// Standard target options allows the person running `zig build` to choose
@ -14,6 +13,16 @@ pub fn build(b: *Builder) !void {
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
const optimize = b.standardOptimizeOption(.{});
// TODO: Embed the current git version in the code. We can do this
// by looking for .git/HEAD (if it exists, follow the ref to /ref/heads/whatevs,
// grab that commit, and use b.addOptions/exe.addOptions to generate the
// Options file. See https://github.com/ziglang/zig/issues/14979 for usage
// example.
//
// From there, I'm not sure what the generated file looks like or quite how
// to use, but that should be easy. It may also give some ideas on the
// code gen piece itself, though it might be nice to leave as a seperate
// executable
const exe = b.addExecutable(.{
.name = "demo",
.root_source_file = .{ .path = "src/main.zig" },
@ -44,8 +53,7 @@ pub fn build(b: *Builder) !void {
const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step);
// TODO: Demo for testing is kind of terrible. Proper testing
// var test_step = try tst.addTestStep(b, optimize, exe.packages.items);
// TODO: Proper testing
var codegen: ?*std.build.Step = null;
if (target.getOs().tag == .linux and false) {
@ -53,13 +61,25 @@ pub fn build(b: *Builder) !void {
// std.build.RunStep.create(null,null).cwd(std.fs.path.resolve(b.build_root, "codegen")).addArgs(...)
codegen = b.step("gen", "Generate zig service code from smithy models");
const cg = codegen.?;
// TODO: this should use zig_exe from std.Build
// codegen should store a hash in a comment
// this would be hash of the exe that created the file
// concatenated with hash of input json. this would
// allow skipping generated files. May not include hash
// of contents of output file as maybe we want to tweak
// manually??
//
// All the hashes can be in service_manifest.zig, which
// could be fun to just parse and go nuts. Top of
// file, generator exe hash. Each import has comment
// with both input and output hash and we can decide
// later about warning on manual changes...
//
// this scheme would permit cross plat codegen and maybe
// we can have codegen added in a seperate repo,
// though not sure how necessary that is
cg.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", "cd codegen && zig build" }).step);
// This can probably be triggered instead by GitRepoStep cloning the repo
// with models
// Since codegen binary is built every time, if it's newer than our
// service manifest we know it needs to be regenerated. So this step
// will remove the service manifest if codegen has been touched, thereby
// triggering the re-gen
cg.dependOn(&b.addSystemCommand(&.{
"/bin/sh", "-c",

View File

@ -1,32 +0,0 @@
//! Publish Date: 2022-01-12
//! This file is hosted at ??? and is meant to be copied
//! to projects that use it. Sample usage:
//!
//! const @"test" = @import("build_test.zig");
//! var test_step = try @"test".addTestStep(b, mode, exe.packages.items);
const std = @import("std");
pub fn addTestStep(b: *std.build.Builder, mode: std.builtin.Mode, packages: []std.build.Pkg) !*std.build.Step {
const test_step = b.step("test", "Run all tests");
const src_path = try std.fs.path.resolve(b.allocator, &[_][]const u8{
b.build_root,
"src",
});
defer b.allocator.free(src_path);
var src_dir = try std.fs.openDirAbsolute(src_path, .{});
defer src_dir.close();
var iterable = try src_dir.openIterableDir(".", .{});
defer iterable.close();
var iterator = iterable.iterate();
while (try iterator.next()) |entry| {
if (std.mem.endsWith(u8, entry.name, ".zig")) {
const name = try std.fmt.allocPrint(b.allocator, "src/{s}", .{entry.name});
defer b.allocator.free(name);
const t = b.addTest(name);
for (packages) |package| t.addPackage(package);
t.setBuildMode(mode);
test_step.dependOn(&t.step);
}
}
return test_step;
}

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF
ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL
MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj
ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM
9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw
IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6
VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L
93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm
jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA
A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI
U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs
N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv
o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU
5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy
rqXRfboQnoZsG4q5WTP468SQvvG5
-----END CERTIFICATE-----

View File

@ -76,7 +76,7 @@ pub fn Request(comptime request_action: anytype) type {
Self.service_meta.version,
action.action_name,
});
log.debug("proto: {s}", .{Self.service_meta.aws_protocol});
log.debug("proto: {}", .{Self.service_meta.aws_protocol});
// It seems as though there are 3 major branches of the 6 protocols.
// 1. query/ec2_query, which are identical until you get to complex
@ -600,7 +600,7 @@ pub fn Request(comptime request_action: anytype) type {
\\This could be the result of a bug or a stale set of code generated
\\service models.
\\
\\Model Type: {s}
\\Model Type: {}
\\
\\Response from server:
\\
@ -620,7 +620,7 @@ pub fn Request(comptime request_action: anytype) type {
\\This could be the result of a bug or a stale set of code generated
\\service models.
\\
\\Model Type: {s}
\\Model Type: {}
\\
\\Response from server:
\\
@ -674,7 +674,7 @@ fn generalAllocPrint(allocator: std.mem.Allocator, val: anytype) !?[]const u8 {
}
}
fn headersFor(allocator: std.mem.Allocator, request: anytype) ![]awshttp.Header {
log.debug("Checking for headers to include for type {s}", .{@TypeOf(request)});
log.debug("Checking for headers to include for type {}", .{@TypeOf(request)});
if (!@hasDecl(@TypeOf(request), "http_header")) return &[_]awshttp.Header{};
const http_header = @TypeOf(request).http_header;
const fields = std.meta.fields(@TypeOf(http_header));
@ -983,13 +983,13 @@ fn buildQuery(allocator: std.mem.Allocator, request: anytype) ![]const u8 {
const writer = buffer.writer();
defer buffer.deinit();
var prefix = "?";
// TODO: This was a pain before, and it's a pain now. Clearly our codegen
// needs to emit a declaration 100% of the time
const query_arguments = @TypeOf(request).http_query;
inline for (@typeInfo(@TypeOf(query_arguments)).Struct.fields) |arg| {
const val = @field(request, arg.name);
if (try addQueryArg(arg.type, prefix, @field(query_arguments, arg.name), val, writer))
prefix = "&";
if (@hasDecl(@TypeOf(request), "http_query")) {
const query_arguments = @field(@TypeOf(request), "http_query");
inline for (@typeInfo(@TypeOf(query_arguments)).Struct.fields) |arg| {
const val = @field(request, arg.name);
if (try addQueryArg(arg.type, prefix, @field(query_arguments, arg.name), val, writer))
prefix = "&";
}
}
return buffer.toOwnedSlice();
}

View File

@ -196,7 +196,7 @@ fn getImdsv2Credentials(allocator: std.mem.Allocator) !?auth.Credentials {
defer resp_payload.deinit();
try resp_payload.resize(req.response.content_length.?);
token = try resp_payload.toOwnedSlice();
errdefer allocator.free(token);
errdefer if (token) |t| allocator.free(t);
_ = try req.readAll(token.?);
}
std.debug.assert(token != null);
@ -207,7 +207,7 @@ fn getImdsv2Credentials(allocator: std.mem.Allocator) !?auth.Credentials {
return null;
}
defer allocator.free(role_name.?);
log.debug("Got role name '{s}'", .{role_name});
log.debug("Got role name '{s}'", .{role_name.?});
return getImdsCredentials(allocator, &cl, role_name.?, token.?);
}

View File

@ -190,7 +190,7 @@ pub const AwsHttp = struct {
// TODO: Timeout - is this now above us?
log.debug(
"Request Complete. Response code {d}: {s}",
"Request Complete. Response code {d}: {any}",
.{ @intFromEnum(req.response.status), req.response.status.phrase() },
);
log.debug("Response headers:", .{});

View File

@ -358,7 +358,7 @@ fn createCanonicalRequest(allocator: std.mem.Allocator, request: base.Request, p
;
// TODO: This is all better as a writer - less allocations/copying
const canonical_method = canonicalRequestMethod(request.method);
const canonical_method = try canonicalRequestMethod(request.method);
// Let's not mess around here...s3 is the oddball
const double_encode = !std.mem.eql(u8, config.service, "s3");
const canonical_url = try canonicalUri(allocator, request.path, double_encode);

View File

@ -1,6 +0,0 @@
pub const hash = "a662f6f674e66aa4cff435553e2593b7fd0a9aef";
pub const abbreviated_hash = "a662f6f";
pub const commit_date = "2022-06-05 18:34:39 -0700";
pub const branch = "HEAD -> master, origin/master";
pub const dirty = true;
pub const pretty_version = "version a662f6f, committed at 2022-06-05 18:34:39 -0700 (dirty)";

View File

@ -1,7 +1,6 @@
const std = @import("std");
const aws = @import("aws.zig");
const json = @import("json.zig");
const version = @import("git_version.zig");
var verbose: u8 = 0;
@ -63,11 +62,7 @@ pub fn main() anyerror!void {
var tests = std.ArrayList(Tests).init(allocator);
defer tests.deinit();
var args = std.process.args();
var first = true;
while (args.next()) |arg| {
if (first)
std.log.info("{s} {s}", .{ arg, version.pretty_version });
first = false;
if (std.mem.eql(u8, "-v", arg)) {
verbose += 1;
continue;
@ -101,64 +96,64 @@ pub fn main() anyerror!void {
const call = try aws.Request(services.sts.get_caller_identity).call(.{}, options);
// const call = try client.call(services.sts.get_caller_identity.Request{}, options);
defer call.deinit();
std.log.info("arn: {s}", .{call.response.arn});
std.log.info("id: {s}", .{call.response.user_id});
std.log.info("account: {s}", .{call.response.account});
std.log.info("requestId: {s}", .{call.response_metadata.request_id});
std.log.info("arn: {any}", .{call.response.arn});
std.log.info("id: {any}", .{call.response.user_id});
std.log.info("account: {any}", .{call.response.account});
std.log.info("requestId: {any}", .{call.response_metadata.request_id});
},
.query_with_input => {
const call = try client.call(services.sqs.list_queues.Request{
.queue_name_prefix = "s",
}, options);
defer call.deinit();
std.log.info("request id: {s}", .{call.response_metadata.request_id});
std.log.info("account has queues with prefix 's': {b}", .{call.response.queue_urls != null});
std.log.info("request id: {any}", .{call.response_metadata.request_id});
std.log.info("account has queues with prefix 's': {}", .{call.response.queue_urls != null});
},
.json_1_0_query_with_input => {
const call = try client.call(services.dynamo_db.list_tables.Request{
.limit = 1,
}, options);
defer call.deinit();
std.log.info("request id: {s}", .{call.response_metadata.request_id});
std.log.info("account has tables: {b}", .{call.response.table_names.?.len > 0});
std.log.info("request id: {any}", .{call.response_metadata.request_id});
std.log.info("account has tables: {}", .{call.response.table_names.?.len > 0});
},
.json_1_0_query_no_input => {
const call = try client.call(services.dynamo_db.describe_limits.Request{}, options);
defer call.deinit();
std.log.info("account read capacity limit: {d}", .{call.response.account_max_read_capacity_units});
std.log.info("account read capacity limit: {?d}", .{call.response.account_max_read_capacity_units});
},
.json_1_1_query_with_input => {
const call = try client.call(services.ecs.list_clusters.Request{
.max_results = 1,
}, options);
defer call.deinit();
std.log.info("request id: {s}", .{call.response_metadata.request_id});
std.log.info("account has clusters: {b}", .{call.response.cluster_arns.?.len > 0});
std.log.info("request id: {any}", .{call.response_metadata.request_id});
std.log.info("account has clusters: {}", .{call.response.cluster_arns.?.len > 0});
},
.json_1_1_query_no_input => {
const call = try client.call(services.ecs.list_clusters.Request{}, options);
defer call.deinit();
std.log.info("request id: {s}", .{call.response_metadata.request_id});
std.log.info("account has clusters: {b}", .{call.response.cluster_arns.?.len > 0});
std.log.info("request id: {any}", .{call.response_metadata.request_id});
std.log.info("account has clusters: {}", .{call.response.cluster_arns.?.len > 0});
},
.rest_json_1_query_with_input => {
const call = try client.call(services.lambda.list_functions.Request{
.max_items = 1,
}, options);
defer call.deinit();
std.log.info("request id: {s}", .{call.response_metadata.request_id});
std.log.info("account has functions: {b}", .{call.response.functions.?.len > 0});
std.log.info("request id: {any}", .{call.response_metadata.request_id});
std.log.info("account has functions: {}", .{call.response.functions.?.len > 0});
},
.rest_json_1_query_no_input => {
const call = try client.call(services.lambda.list_functions.Request{}, options);
defer call.deinit();
std.log.info("request id: {s}", .{call.response_metadata.request_id});
std.log.info("account has functions: {b}", .{call.response.functions.?.len > 0});
std.log.info("request id: {any}", .{call.response_metadata.request_id});
std.log.info("account has functions: {}", .{call.response.functions.?.len > 0});
},
.rest_json_1_work_with_lambda => {
const call = try client.call(services.lambda.list_functions.Request{}, options);
defer call.deinit();
std.log.info("list request id: {s}", .{call.response_metadata.request_id});
std.log.info("list request id: {any}", .{call.response_metadata.request_id});
if (call.response.functions) |fns| {
if (fns.len > 0) {
const func = fns[0];
@ -171,11 +166,11 @@ pub fn main() anyerror!void {
const addtag = try aws.Request(services.lambda.tag_resource).call(req, options);
defer addtag.deinit();
// const addtag = try client.call(services.lambda.tag_resource.Request{ .resource = arn, .tags = &.{.{ .key = "Foo", .value = "Bar" }} }, options);
std.log.info("add tag request id: {s}", .{addtag.response_metadata.request_id});
std.log.info("add tag request id: {any}", .{addtag.response_metadata.request_id});
var keys = [_][]const u8{"Foo"}; // Would love to have a way to express this without burning a var here
const deletetag = try aws.Request(services.lambda.untag_resource).call(.{ .tag_keys = keys[0..], .resource = arn }, options);
defer deletetag.deinit();
std.log.info("delete tag request id: {s}", .{deletetag.response_metadata.request_id});
std.log.info("delete tag request id: {any}", .{deletetag.response_metadata.request_id});
} else {
std.log.err("no functions to work with", .{});
}
@ -187,7 +182,7 @@ pub fn main() anyerror!void {
// Describe regions is a simpler request and easier to debug
const result = try client.call(services.ec2.describe_regions.Request{}, options);
defer result.deinit();
std.log.info("request id: {s}", .{result.response_metadata.request_id});
std.log.info("request id: {any}", .{result.response_metadata.request_id});
std.log.info("region count: {d}", .{result.response.regions.?.len});
},
.ec2_query_with_input => {
@ -221,15 +216,15 @@ pub fn main() anyerror!void {
.rest_xml_no_input => {
const result = try client.call(services.s3.list_buckets.Request{}, options);
defer result.deinit();
std.log.info("request id: {s}", .{result.response_metadata.request_id});
std.log.info("request id: {any}", .{result.response_metadata.request_id});
std.log.info("bucket count: {d}", .{result.response.buckets.?.len});
},
.rest_xml_anything_but_s3 => {
const result = try client.call(services.cloudfront.list_key_groups.Request{}, options);
defer result.deinit();
std.log.info("request id: {s}", .{result.response_metadata.request_id});
std.log.info("request id: {any}", .{result.response_metadata.request_id});
const list = result.response.key_group_list.?;
std.log.info("key group list max: {d}", .{list.max_items});
std.log.info("key group list max: {?d}", .{list.max_items});
std.log.info("key group quantity: {d}", .{list.quantity});
},
.rest_xml_work_with_s3 => {
@ -240,8 +235,8 @@ pub fn main() anyerror!void {
const result = try client.call(services.s3.list_buckets.Request{}, options);
defer result.deinit();
const bucket = result.response.buckets.?[result.response.buckets.?.len - 1];
std.log.info("ListBuckets request id: {s}", .{result.response_metadata.request_id});
std.log.info("bucket name: {s}", .{bucket.name.?});
std.log.info("ListBuckets request id: {any}", .{result.response_metadata.request_id});
std.log.info("bucket name: {any}", .{bucket.name.?});
break :blk try allocator.dupe(u8, bucket.name.?);
};
defer allocator.free(bucket);
@ -251,8 +246,8 @@ pub fn main() anyerror!void {
}, options);
defer result.deinit();
const location = result.response.location_constraint.?;
std.log.info("GetBucketLocation request id: {s}", .{result.response_metadata.request_id});
std.log.info("location: {s}", .{location});
std.log.info("GetBucketLocation request id: {any}", .{result.response_metadata.request_id});
std.log.info("location: {any}", .{location});
break :blk try allocator.dupe(u8, location);
};
defer allocator.free(location);
@ -268,8 +263,8 @@ pub fn main() anyerror!void {
.body = "bar",
.storage_class = "STANDARD",
}, s3opts);
std.log.info("PutObject Request id: {s}", .{result.response_metadata.request_id});
std.log.info("PutObject etag: {s}", .{result.response.e_tag.?});
std.log.info("PutObject Request id: {any}", .{result.response_metadata.request_id});
std.log.info("PutObject etag: {any}", .{result.response.e_tag.?});
defer result.deinit();
}
{
@ -279,9 +274,9 @@ pub fn main() anyerror!void {
.bucket = bucket,
.key = key,
}, s3opts);
std.log.info("GetObject Request id: {s}", .{result.response_metadata.request_id});
std.log.info("GetObject Body: {s}", .{result.response.body});
std.log.info("GetObject etag: {s}", .{result.response.e_tag.?});
std.log.info("GetObject Request id: {any}", .{result.response_metadata.request_id});
std.log.info("GetObject Body: {any}", .{result.response.body});
std.log.info("GetObject etag: {any}", .{result.response.e_tag.?});
std.log.info("GetObject last modified (seconds since epoch): {d}", .{result.response.last_modified.?});
defer result.deinit();
}
@ -290,14 +285,14 @@ pub fn main() anyerror!void {
.bucket = bucket,
.key = key,
}, s3opts);
std.log.info("DeleteObject Request id: {s}", .{result.response_metadata.request_id});
std.log.info("DeleteObject Request id: {any}", .{result.response_metadata.request_id});
defer result.deinit();
}
{
const result = try aws.Request(services.s3.list_objects).call(.{
.bucket = bucket,
}, s3opts);
std.log.info("ListObject Request id: {s}", .{result.response_metadata.request_id});
std.log.info("ListObject Request id: {any}", .{result.response_metadata.request_id});
std.log.info("Object count: {d}", .{result.response.contents.?.len});
defer result.deinit();
}
@ -349,5 +344,5 @@ pub fn jsonFun() !void {
.allow_unknown_fields = true, // new option
}) catch unreachable;
std.log.info("{}", .{res3});
std.log.info("{s}", .{res3.getCallerIdentityResponse.getCallerIdentityResult.user_id});
std.log.info("{any}", .{res3.getCallerIdentityResponse.getCallerIdentityResult.user_id});
}

View File

@ -54,7 +54,7 @@ pub fn encodeInternal(
rc = try encodeInternal(allocator, parent, field_name, first, obj.*, writer, options);
} else {
if (!first) _ = try writer.write("&");
try writer.print("{s}{s}={s}", .{ parent, field_name, obj });
try writer.print("{s}{s}={any}", .{ parent, field_name, obj });
rc = false;
},
.Struct => if (std.mem.eql(u8, "", field_name)) {

View File

@ -102,7 +102,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
return std.fmt.parseFloat(T, element.children.items[0].CharData) catch |e| {
if (log_parse_traces) {
std.log.err(
"Could not parse '{s}' as float in element '{s}': {s}",
"Could not parse '{s}' as float in element '{s}': {any}",
.{
element.children.items[0].CharData,
element.tag,
@ -127,7 +127,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
}
if (log_parse_traces) {
std.log.err(
"Could not parse '{s}' as integer in element '{s}': {s}",
"Could not parse '{s}' as integer in element '{s}': {any}",
.{
element.children.items[0].CharData,
element.tag,
@ -392,7 +392,7 @@ test "can parse a simple type" {
foo_bar: []const u8,
};
// std.debug.print("{s}", .{data});
const parsed_data = try parse(Example, data, .{ .allocator = allocator, .match_predicate = fuzzyEqual });
const parsed_data = try parse(Example, data, .{ .allocator = allocator, .match_predicate_ptr = fuzzyEqual });
defer parsed_data.deinit();
try testing.expectEqualStrings("bar", parsed_data.parsed_value.foo_bar);
}
@ -410,7 +410,7 @@ test "can parse a boolean type" {
foo_bar: bool,
};
// std.debug.print("{s}", .{data});
const parsed_data = try parse(Example, data, .{ .allocator = allocator, .match_predicate = fuzzyEqual });
const parsed_data = try parse(Example, data, .{ .allocator = allocator, .match_predicate_ptr = fuzzyEqual });
defer parsed_data.deinit();
try testing.expectEqual(true, parsed_data.parsed_value.foo_bar);
}
@ -427,7 +427,7 @@ test "can parse an integer type" {
foo_bar: u8,
};
// std.debug.print("{s}", .{data});
const parsed_data = try parse(Example, data, .{ .allocator = allocator, .match_predicate = fuzzyEqual });
const parsed_data = try parse(Example, data, .{ .allocator = allocator, .match_predicate_ptr = fuzzyEqual });
defer parsed_data.deinit();
try testing.expectEqual(@as(u8, 42), parsed_data.parsed_value.foo_bar);
}
@ -442,7 +442,7 @@ test "can parse an optional boolean type" {
const ExampleDoesNotMatter = struct {
foo_bar: ?bool = null,
};
const parsed_data = try parse(ExampleDoesNotMatter, data, .{ .allocator = allocator, .match_predicate = fuzzyEqual });
const parsed_data = try parse(ExampleDoesNotMatter, data, .{ .allocator = allocator, .match_predicate_ptr = fuzzyEqual });
defer parsed_data.deinit();
try testing.expectEqual(@as(?bool, true), parsed_data.parsed_value.foo_bar);
}
@ -458,7 +458,7 @@ test "can coerce 8601 date to integer" {
const ExampleDoesNotMatter = struct {
foo_bar: ?i64 = null,
};
const parsed_data = try parse(ExampleDoesNotMatter, data, .{ .allocator = allocator, .match_predicate = fuzzyEqual });
const parsed_data = try parse(ExampleDoesNotMatter, data, .{ .allocator = allocator, .match_predicate_ptr = fuzzyEqual });
defer parsed_data.deinit();
try testing.expectEqual(@as(i64, 1633451985), parsed_data.parsed_value.foo_bar.?);
}
@ -477,7 +477,7 @@ test "can parse a boolean type (two fields)" {
foo_bar: bool,
foo_baz: bool,
};
const parsed_data = try parse(ExampleDoesNotMatter, data, .{ .allocator = allocator, .match_predicate = fuzzyEqual });
const parsed_data = try parse(ExampleDoesNotMatter, data, .{ .allocator = allocator, .match_predicate_ptr = fuzzyEqual });
defer parsed_data.deinit();
try testing.expectEqual(@as(bool, true), parsed_data.parsed_value.foo_bar);
}
@ -499,7 +499,7 @@ test "can error without leaking memory" {
defer log_parse_traces = true;
try std.testing.expectError(
error.InvalidCharacter,
parse(ExampleDoesNotMatter, data, .{ .allocator = allocator, .match_predicate = fuzzyEqual }),
parse(ExampleDoesNotMatter, data, .{ .allocator = allocator, .match_predicate_ptr = fuzzyEqual }),
);
}
@ -518,7 +518,7 @@ test "can parse a nested type" {
bar: []const u8,
},
};
const parsed_data = try parse(Example, data, .{ .allocator = allocator, .match_predicate = fuzzyEqual });
const parsed_data = try parse(Example, data, .{ .allocator = allocator, .match_predicate_ptr = fuzzyEqual });
defer parsed_data.deinit();
try testing.expectEqualStrings("baz", parsed_data.parsed_value.foo.bar);
}
@ -539,7 +539,7 @@ test "can parse a nested type - two fields" {
qux: []const u8,
},
};
const parsed_data = try parse(Example, data, .{ .allocator = allocator, .match_predicate = fuzzyEqual });
const parsed_data = try parse(Example, data, .{ .allocator = allocator, .match_predicate_ptr = fuzzyEqual });
defer parsed_data.deinit();
try testing.expectEqualStrings("baz", parsed_data.parsed_value.foo.bar);
try testing.expectEqualStrings("baz", parsed_data.parsed_value.foo.qux);

View File

@ -1 +0,0 @@
const use_submodules = 1;