out with AWS cli/in with the SDK

This commit is contained in:
Emil Lerch 2024-08-27 16:29:43 -07:00
parent ef5b793882
commit f86bafc533
Signed by untrusted user: lobo
GPG Key ID: A7B62D657EF764F8
9 changed files with 591 additions and 152 deletions

1
.gitignore vendored
View File

@ -1,2 +1,3 @@
zig-cache/ zig-cache/
zig-out/ zig-out/
.zig-cache

View File

@ -1,37 +1,37 @@
lambda-zig: A Custom Runtime for AWS Lambda lambda-zig: A Custom Runtime for AWS Lambda
=========================================== ===========================================
This is a sample custom runtime built in zig (0.12). Simple projects will execute This is a sample custom runtime built in zig (0.13). Simple projects will execute
in <1ms, with a cold start init time of approximately 11ms. in <1ms, with a cold start init time of approximately 11ms.
Some custom build steps have been added to build.zig, which will only currently appear if compiling from a linux operating system: Some custom build steps have been added to build.zig, which will only currently appear if compiling from a linux operating system:
* `zig build iam`: Deploy and record a default IAM role for the lambda function * `zig build awslambda_iam`: Deploy and record a default IAM role for the lambda function
* `zig build package`: Package the lambda function for upload * `zig build awslambda_package`: Package the lambda function for upload
* `zig build deploy`: Deploy the lambda function * `zig build awslambda_deploy`: Deploy the lambda function
* `zig build remoterun`: Run the lambda function * `zig build awslambda_run`: Run the lambda function
Custom options: Custom options:
* **function-name**: set the name of the AWS Lambda function * **function-name**: set the name of the AWS Lambda function
* **payload**: Use this to set the payload of the function when run using `zig build remoterun` * **payload**: Use this to set the payload of the function when run using `zig build awslambda_run`
* **region**: Use this to set the region for the function deployment/run
* **function-role**: Name of the role to use for the function. The system will
look up the arn from this name, and create if it does not exist
* **function-arn**: Role arn to use with the function. This must exist
Additionally, a custom IAM role can be used for the function by appending ``-- --role myawesomerole`` The AWS Lambda function can be compiled as a linux x86_64 or linux aarch64
to the `zig build deploy` command. This has not really been tested. The role name executable. The build script will set the architecture appropriately
is cached in zig-out/bin/iam_role_name, so you can also just set that to the full
arn of your iam role if you'd like.
The AWS Lambda function is compiled as a linux ARM64 executable. Since the build.zig
calls out to the shell for AWS operations, you will need the AWS CLI. v2.2.43 has been tested.
Caveats: Caveats:
* Unhandled invocation errors seem to be causing timeouts * Building on Windows will not yet work, as the package step still uses
* zig build options only appear if compiling using linux, although it should be trivial system commands due to the need to create a zip file, and the current lack
to make it work on other Unix-like operating systems (e.g. macos, freebsd). In fact, of zip file creation capabilities in the standard library (you can read, but
it will likely work with just a change to the operating system check not write, zip files with the standard library). A TODO exists with more
* There are a **ton** of TODO's in this code. Current state is more of a proof of information should you wish to file a PR.
concept. PRs are welcome! * Caching is not yet implemented in the package or deployment steps, so the
function will be deployed on every build
A sample project using this runtime can be found at https://git.lerch.org/lobo/lambda-zig-sample A sample project using this runtime can be found at https://git.lerch.org/lobo/lambda-zig-sample

View File

@ -7,6 +7,12 @@
// This field is optional. // This field is optional.
// This is currently advisory only; Zig does not yet do anything // This is currently advisory only; Zig does not yet do anything
// with this value. // with this value.
.dependencies = .{
.aws = .{
.url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/908c9d2d429b1f38c835363db566aa17bf1742fd/908c9d2d429b1f38c835363db566aa17bf1742fd-with-models.tar.gz",
.hash = "122022770a177afb2ee46632f88ad5468a5dea8df22170d1dea5163890b0a881399d",
},
},
.minimum_zig_version = "0.12.0", .minimum_zig_version = "0.12.0",
// Specifies the set of files and directories that are included in this package. // Specifies the set of files and directories that are included in this package.
@ -20,6 +26,7 @@
"build.zig.zon", "build.zig.zon",
"lambdabuild.zig", "lambdabuild.zig",
"src", "src",
"lambdabuild",
"LICENSE", "LICENSE",
"README.md", "README.md",
}, },

View File

@ -1,5 +1,9 @@
const std = @import("std"); const std = @import("std");
const builtin = @import("builtin"); const builtin = @import("builtin");
const Package = @import("lambdabuild/Package.zig");
const Iam = @import("lambdabuild/Iam.zig");
const Deploy = @import("lambdabuild/Deploy.zig");
const Invoke = @import("lambdabuild/Invoke.zig");
fn fileExists(file_name: []const u8) bool { fn fileExists(file_name: []const u8) bool {
const file = std.fs.openFileAbsolute(file_name, .{}) catch return false; const file = std.fs.openFileAbsolute(file_name, .{}) catch return false;
@ -40,159 +44,122 @@ pub fn configureBuild(b: *std.Build, exe: *std.Build.Step.Compile, function_name
// TODO: support other host OSs // TODO: support other host OSs
if (builtin.os.tag != .linux) return; if (builtin.os.tag != .linux) return;
// Package step @import("aws").aws.globalLogControl(.info, .warn, .info, false);
const package_step = b.step("awslambda_package", "Package the function"); const package_step = Package.create(b, .{ .exe = exe });
const function_zip = b.getInstallPath(.bin, "function.zip");
// TODO: Avoid use of system-installed zip, maybe using something like const step = b.step("awslambda_package", "Package the function");
// https://github.com/hdorio/hwzip.zig/blob/master/src/hwzip.zig step.dependOn(&package_step.step);
const zip = if (std.mem.eql(u8, "bootstrap", exe.out_filename)) package_step.step.dependOn(b.getInstallStep());
try std.fmt.allocPrint(b.allocator,
\\zip -qj9 {s} {s}
, .{
function_zip,
b.getInstallPath(.bin, "bootstrap"),
})
else
// We need to copy stuff around
try std.fmt.allocPrint(b.allocator,
\\cp {s} {s} && \
\\zip -qj9 {s} {s} && \
\\rm {s}
, .{
b.getInstallPath(.bin, exe.out_filename),
b.getInstallPath(.bin, "bootstrap"),
function_zip,
b.getInstallPath(.bin, "bootstrap"),
b.getInstallPath(.bin, "bootstrap"),
});
// std.debug.print("\nzip cmdline: {s}", .{zip});
defer b.allocator.free(zip);
var zip_cmd = b.addSystemCommand(&.{ "/bin/sh", "-c", zip });
zip_cmd.step.dependOn(b.getInstallStep());
package_step.dependOn(&zip_cmd.step);
// Deployment // Doing this will require that the aws dependency be added to the downstream
const deploy_step = b.step("awslambda_deploy", "Deploy the function"); // build.zig.zon
// const lambdabuild = b.addExecutable(.{
// .name = "lambdabuild",
// .root_source_file = .{
// // we use cwd_relative here because we need to compile this relative
// // to whatever directory this file happens to be. That is likely
// // in a cache directory, not the base of the build.
// .cwd_relative = try std.fs.path.join(b.allocator, &[_][]const u8{
// std.fs.path.dirname(@src().file).?,
// "lambdabuild/src/main.zig",
// }),
// },
// .target = b.host,
// });
// const aws_dep = b.dependency("aws", .{
// .target = b.host,
// .optimize = lambdabuild.root_module.optimize orelse .Debug,
// });
// const aws_module = aws_dep.module("aws");
// lambdabuild.root_module.addImport("aws", aws_module);
//
const iam_role_name = b.option( const iam_role_name = b.option(
[]const u8, []const u8,
"function-role", "function-role",
"IAM role name for function (will create if it does not exist) [lambda_basic_execution]", "IAM role name for function (will create if it does not exist) [lambda_basic_execution]",
) orelse "lambda_basic_execution"; ) orelse "lambda_basic_execution_blah2";
const iam_role_arn = b.option( const iam_role_arn = b.option(
[]const u8, []const u8,
"function-arn", "function-arn",
"Preexisting IAM role arn for function", "Preexisting IAM role arn for function",
); );
const iam_step = b.step("awslambda_iam", "Create/Get IAM role for function"); const iam = Iam.create(b, .{
deploy_step.dependOn(iam_step); // iam_step will either be a noop or all the stuff below .role_name = iam_role_name,
const iam_role_param: []u8 = blk: { .role_arn = iam_role_arn,
if (iam_role_arn != null)
break :blk try std.fmt.allocPrint(b.allocator, "--role {s}", .{iam_role_arn.?});
if (iam_role_name.len == 0)
@panic("Either function-role or function-arn must be specified. function-arn will allow deployment without creating a role");
// Now we have an iam role name to use, but no iam role arn. Let's go hunting
// Once this is done once, we'll have a file with the arn in "cache"
// The iam arn will reside in an 'iam_role' file in the bin directory
// Build system command to create the role if necessary and get the role arn
const iam_role_file = b.getInstallPath(.bin, "iam_role");
if (!fileExists(iam_role_file)) {
// std.debug.print("file does not exist", .{});
// Our cache file does not exist on disk, so we'll create/get the role
// arn using the AWS CLI and dump to disk here
const ifstatement_fmt =
\\ if aws iam get-role --role-name {s} 2>&1 |grep -q NoSuchEntity; then aws iam create-role --output text --query Role.Arn --role-name {s} --assume-role-policy-document '{{
\\ "Version": "2012-10-17",
\\ "Statement": [
\\ {{
\\ "Sid": "",
\\ "Effect": "Allow",
\\ "Principal": {{
\\ "Service": "lambda.amazonaws.com"
\\ }},
\\ "Action": "sts:AssumeRole"
\\ }}
\\ ]}}' > /dev/null; fi && \
\\ aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AWSLambdaExecute --role-name lambda_basic_execution && \
\\ aws iam get-role --role-name lambda_basic_execution --query Role.Arn --output text > {s}
;
const ifstatement = try std.fmt.allocPrint(
b.allocator,
ifstatement_fmt,
.{ iam_role_name, iam_role_name, iam_role_file },
);
iam_step.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", ifstatement }).step);
}
break :blk try std.fmt.allocPrint(b.allocator, "--role \"$(cat {s})\"", .{iam_role_file});
};
const function_name_file = b.getInstallPath(.bin, function_name);
const ifstatement = "if [ ! -f {s} ] || [ {s} -nt {s} ]; then if aws lambda get-function --function-name {s} 2>&1 |grep -q ResourceNotFoundException; then echo not found > /dev/null; {s}; else echo found > /dev/null; {s}; fi; fi";
// The architectures option was introduced in 2.2.43 released 2021-10-01
// We want to use arm64 here because it is both faster and cheaper for most
// Amazon Linux 2 is the only arm64 supported option
// TODO: This should determine compilation target and use x86_64 if needed
const not_found = "aws lambda create-function --architectures arm64 --runtime provided.al2 --function-name {s} --zip-file fileb://{s} --handler not_applicable {s} && touch {s}";
const not_found_fmt = try std.fmt.allocPrint(b.allocator, not_found, .{ function_name, function_zip, iam_role_param, function_name_file });
defer b.allocator.free(not_found_fmt);
const found = "aws lambda update-function-code --function-name {s} --zip-file fileb://{s} && touch {s}";
const found_fmt = try std.fmt.allocPrint(b.allocator, found, .{ function_name, function_zip, function_name_file });
defer b.allocator.free(found_fmt);
var found_final: []const u8 = undefined;
var not_found_final: []const u8 = undefined;
if (b.args) |args| {
found_final = try addArgs(b.allocator, found_fmt, args);
not_found_final = try addArgs(b.allocator, not_found_fmt, args);
} else {
found_final = found_fmt;
not_found_final = not_found_fmt;
}
const cmd = try std.fmt.allocPrint(b.allocator, ifstatement, .{
function_name_file,
b.getInstallPath(.bin, exe.out_filename),
function_name_file,
function_name,
not_found_fmt,
found_fmt,
}); });
const iam_step = b.step("awslambda_iam", "Create/Get IAM role for function");
iam_step.dependOn(&iam.step);
defer b.allocator.free(cmd); const region = b.option([]const u8, "region", "Region to use [default is autodetect from environment/config]") orelse try findRegionFromSystem(b.allocator);
// std.debug.print("{s}\n", .{cmd}); // Deployment
deploy_step.dependOn(package_step); const deploy = Deploy.create(b, .{
deploy_step.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", cmd }).step); .name = function_name,
.package = package_step.packagedFileLazyPath(),
.arch = exe.root_module.resolved_target.?.result.cpu.arch,
.iam_step = iam,
.region = region,
});
deploy.step.dependOn(&package_step.step);
const deploy_step = b.step("awslambda_deploy", "Deploy the function");
deploy_step.dependOn(&deploy.step);
const payload = b.option([]const u8, "payload", "Lambda payload [{\"foo\":\"bar\", \"baz\": \"qux\"}]") orelse const payload = b.option([]const u8, "payload", "Lambda payload [{\"foo\":\"bar\", \"baz\": \"qux\"}]") orelse
\\ {"foo": "bar", "baz": "qux"}" \\ {"foo": "bar", "baz": "qux"}"
; ;
const run_script = const invoke = Invoke.create(b, .{
\\ f=$(mktemp) && \ .name = function_name,
\\ logs=$(aws lambda invoke \ .payload = payload,
\\ --cli-binary-format raw-in-base64-out \ .region = region,
\\ --invocation-type RequestResponse \ });
\\ --function-name {s} \ invoke.step.dependOn(&deploy.step);
\\ --payload '{s}' \ const run_step = b.step("awslambda_run", "Run the app in AWS lambda");
\\ --log-type Tail \ run_step.dependOn(&invoke.step);
\\ --query LogResult \
\\ --output text "$f" |base64 -d) && \
\\ cat "$f" && rm "$f" && \
\\ echo && echo && echo "$logs"
;
const run_script_fmt = try std.fmt.allocPrint(b.allocator, run_script, .{ function_name, payload });
defer b.allocator.free(run_script_fmt);
const run_cmd = b.addSystemCommand(&.{ "/bin/sh", "-c", run_script_fmt });
run_cmd.step.dependOn(deploy_step);
if (b.args) |args| {
run_cmd.addArgs(args);
} }
const run_step = b.step("awslambda_run", "Run the app in AWS lambda"); // AWS_CONFIG_FILE (default is ~/.aws/config
run_step.dependOn(&run_cmd.step); // AWS_DEFAULT_REGION
fn findRegionFromSystem(allocator: std.mem.Allocator) ![]const u8 {
const env_map = try std.process.getEnvMap(allocator);
if (env_map.get("AWS_DEFAULT_REGION")) |r| return r;
const config_file_path = env_map.get("AWS_CONFIG_FILE") orelse
try std.fs.path.join(allocator, &[_][]const u8{
env_map.get("HOME") orelse env_map.get("USERPROFILE").?,
".aws",
"config",
});
const config_file = try std.fs.openFileAbsolute(config_file_path, .{});
defer config_file.close();
const config_bytes = try config_file.readToEndAlloc(allocator, 1024 * 1024);
const profile = env_map.get("AWS_PROFILE") orelse "default";
var line_iterator = std.mem.split(u8, config_bytes, "\n");
var in_profile = false;
while (line_iterator.next()) |line| {
const trimmed = std.mem.trim(u8, line, " \t\r");
if (trimmed.len == 0 or trimmed[0] == '#') continue;
if (!in_profile) {
if (trimmed[0] == '[' and trimmed[trimmed.len - 1] == ']') {
// this is a profile directive!
// std.debug.print("profile: {s}, in file: {s}\n", .{ profile, trimmed[1 .. trimmed.len - 1] });
if (std.mem.eql(u8, profile, trimmed[1 .. trimmed.len - 1])) {
in_profile = true;
}
}
continue; // we're only looking for a profile at this point
}
// look for our region directive
if (trimmed[0] == '[' and trimmed[trimmed.len - 1] == ']')
return error.RegionNotFound; // we've hit another profile without getting our region
if (!std.mem.startsWith(u8, trimmed, "region")) continue;
var equalityiterator = std.mem.split(u8, trimmed, "=");
_ = equalityiterator.next() orelse return error.RegionNotFound;
const raw_val = equalityiterator.next() orelse return error.RegionNotFound;
return try allocator.dupe(u8, std.mem.trimLeft(u8, raw_val, " \t"));
}
return error.RegionNotFound;
} }

161
lambdabuild/Deploy.zig Normal file
View File

@ -0,0 +1,161 @@
const std = @import("std");
const aws = @import("aws").aws;
const Deploy = @This();
step: std.Build.Step,
options: Options,
const base_id: std.Build.Step.Id = .custom;
pub const Options = struct {
/// Function name to be used for the function
name: []const u8,
/// LazyPath for the function package (zip file)
package: std.Build.LazyPath,
/// Architecture for Lambda function
arch: std.Target.Cpu.Arch,
/// Iam step. This will be a dependency of the deployment
iam_step: *@import("Iam.zig"),
/// Region for deployment
region: []const u8,
};
pub fn create(owner: *std.Build, options: Options) *Deploy {
const name = owner.dupe(options.name);
const step_name = owner.fmt("{s} {s}{s}", .{
"aws lambda",
"deploy",
name,
});
const self = owner.allocator.create(Deploy) catch @panic("OOM");
self.* = .{
.step = std.Build.Step.init(.{
.id = base_id,
.name = step_name,
.owner = owner,
.makeFn = make,
}),
.options = options,
};
self.step.dependOn(&options.iam_step.step);
return self;
}
/// gets the last time we deployed this function from the name in cache.
/// If not in cache, null is returned. Note that cache is not account specific,
/// so if you're banging around multiple accounts, you'll want to use different
/// local zig caches for each
fn getlastDeployedTime(step: *std.Build.Step, name: []const u8) !?[]const u8 {
try step.owner.cache_root.handle.makePath("iam");
// we should be able to use the role name, as only the following characters
// are allowed: _+=,.@-.
const cache_file = try std.fmt.allocPrint(
step.owner.allocator,
"deploy{s}{s}",
.{ std.fs.path.sep_str, name },
);
const buff = try step.owner.allocator.alloc(u8, 64);
const time = step.owner.cache_root.handle.readFile(cache_file, buff) catch return null;
return time;
}
fn make(step: *std.Build.Step, node: std.Progress.Node) anyerror!void {
_ = node;
const self: *Deploy = @fieldParentPtr("step", step);
if (self.options.arch != .aarch64 and self.options.arch != .x86_64)
return step.fail("AWS Lambda can only deploy aarch64 and x86_64 functions ({} not allowed)", .{self.options.arch});
// TODO: Work out cache. HOWEVER...this cannot be done until the caching
// for the Deploy command works properly. Right now, it regenerates
// the zip file every time
// if (try getIamArnFromName(step, self.options.role_name)) |_| {
// step.result_cached = true;
// return; // exists in cache - nothing to do
// }
var client = aws.Client.init(self.step.owner.allocator, .{});
defer client.deinit();
const services = aws.Services(.{.lambda}){};
const function = blk: {
var diagnostics = aws.Diagnostics{
.http_code = undefined,
.response_body = undefined,
.allocator = self.step.owner.allocator,
};
const options = aws.Options{
.client = client,
.diagnostics = &diagnostics,
.region = self.options.region,
};
aws.globalLogControl(.info, .warn, .info, true);
defer aws.globalLogControl(.info, .warn, .info, false);
const call = aws.Request(services.lambda.get_function).call(.{
.function_name = self.options.name,
}, options) catch |e| {
// There seems an issue here, but realistically, we have an arena
// so there's no leak leaving this out
defer diagnostics.deinit();
if (diagnostics.http_code == 404) break :blk null;
return step.fail(
"Unknown error {} from Lambda GetFunction. HTTP code {}, message: {s}",
.{ e, diagnostics.http_code, diagnostics.response_body },
);
};
defer call.deinit();
// TODO: Write call.response.configuration.last_modified to cache
// std.debug.print("Function found. Last modified: {s}, revision id: {s}\n", .{ call.response.configuration.?.last_modified.?, call.response.configuration.?.revision_id.? });
break :blk .{
.last_modified = try step.owner.allocator.dupe(u8, call.response.configuration.?.last_modified.?),
.revision_id = try step.owner.allocator.dupe(u8, call.response.configuration.?.revision_id.?),
};
};
const encoder = std.base64.standard.Encoder;
const file = try std.fs.openFileAbsolute(self.options.package.getPath2(step.owner, step), .{});
defer file.close();
const bytes = try file.readToEndAlloc(step.owner.allocator, 100 * 1024 * 1024);
const base64_buf = try step.owner.allocator.alloc(u8, encoder.calcSize(bytes.len));
const base64_bytes = encoder.encode(base64_buf, bytes);
const options = aws.Options{
.client = client,
.region = self.options.region,
};
const arm64_arch = [_][]const u8{"arm64"};
const x86_64_arch = [_][]const u8{"x86_64"};
const architectures = (if (self.options.arch == .aarch64) arm64_arch else x86_64_arch);
const arches: [][]const u8 = @constCast(architectures[0..]);
if (function) |f| {
// TODO: make sure our zipfile newer than the lambda function
const update_call = try aws.Request(services.lambda.update_function_code).call(.{
.function_name = self.options.name,
.architectures = arches,
.revision_id = f.revision_id,
.zip_file = base64_bytes,
}, options);
defer update_call.deinit();
// TODO: Write call.response.last_modified to cache
// TODO: Write call.response.revision_id to cache?
} else {
// New function - we need to create from scratch
const create_call = try aws.Request(services.lambda.create_function).call(.{
.function_name = self.options.name,
.architectures = arches,
.code = .{ .zip_file = base64_bytes },
.handler = "not_applicable",
.package_type = "Zip",
.runtime = "provided.al2",
.role = self.options.iam_step.resolved_arn,
}, options);
defer create_call.deinit();
}
}

146
lambdabuild/Iam.zig Normal file
View File

@ -0,0 +1,146 @@
const std = @import("std");
const aws = @import("aws").aws;
const Iam = @This();
step: std.Build.Step,
options: Options,
/// resolved_arn will be set only after make is run
resolved_arn: []const u8 = undefined,
arn_buf: [2048]u8 = undefined, // https://docs.aws.amazon.com/IAM/latest/APIReference/API_Role.html has 2k limit
const base_id: std.Build.Step.Id = .custom;
pub const Options = struct {
name: []const u8 = "",
role_name: []const u8,
role_arn: ?[]const u8,
};
pub fn create(owner: *std.Build, options: Options) *Iam {
const name = owner.dupe(options.name);
const step_name = owner.fmt("{s} {s}{s}", .{
"aws lambda",
"iam",
name,
});
const self = owner.allocator.create(Iam) catch @panic("OOM");
self.* = .{
.step = std.Build.Step.init(.{
.id = base_id,
.name = step_name,
.owner = owner,
.makeFn = make,
}),
.options = options,
};
return self;
}
/// gets an IamArn from the name in cache. If not in cache, null is returned
/// Note that cache is not account specific, so if you're banging around multiple
/// accounts, you'll want to use different local zig caches for each
pub fn getIamArnFromName(step: *std.Build.Step, name: []const u8) !?[]const u8 {
try step.owner.cache_root.handle.makePath("iam");
// we should be able to use the role name, as only the following characters
// are allowed: _+=,.@-.
const iam_file = try std.fmt.allocPrint(
step.owner.allocator,
"iam{s}{s}",
.{ std.fs.path.sep_str, name },
);
const buff = try step.owner.allocator.alloc(u8, 64);
const arn = step.owner.cache_root.handle.readFile(iam_file, buff) catch return null;
return arn;
}
fn make(step: *std.Build.Step, node: std.Progress.Node) anyerror!void {
_ = node;
const self: *Iam = @fieldParentPtr("step", step);
if (try getIamArnFromName(step, self.options.role_name)) |a| {
step.result_cached = true;
@memcpy(self.arn_buf[0..a.len], a);
self.resolved_arn = self.arn_buf[0..a.len];
return; // exists in cache - nothing to do
}
var client = aws.Client.init(self.step.owner.allocator, .{});
defer client.deinit();
const services = aws.Services(.{.iam}){};
var arn = blk: {
var diagnostics = aws.Diagnostics{
.http_code = undefined,
.response_body = undefined,
.allocator = self.step.owner.allocator,
};
const options = aws.Options{
.client = client,
.diagnostics = &diagnostics,
};
const call = aws.Request(services.iam.get_role).call(.{
.role_name = self.options.role_name, // TODO: if we have a role_arn, we should use it and skip
}, options) catch |e| {
defer diagnostics.deinit();
if (diagnostics.http_code == 404) break :blk null;
return step.fail(
"Unknown error {} from IAM GetRole. HTTP code {}, message: {s}",
.{ e, diagnostics.http_code, diagnostics.response_body },
);
};
defer call.deinit();
break :blk try step.owner.allocator.dupe(u8, call.response.role.arn);
};
// Now ARN will either be null (does not exist), or a value
if (arn == null) {
// we need to create the role before proceeding
const options = aws.Options{
.client = client,
};
const create_call = try aws.Request(services.iam.create_role).call(.{
.role_name = self.options.role_name,
.assume_role_policy_document =
\\{
\\ "Version": "2012-10-17",
\\ "Statement": [
\\ {
\\ "Sid": "",
\\ "Effect": "Allow",
\\ "Principal": {
\\ "Service": "lambda.amazonaws.com"
\\ },
\\ "Action": "sts:AssumeRole"
\\ }
\\ ]
\\}
,
}, options);
defer create_call.deinit();
arn = try step.owner.allocator.dupe(u8, create_call.response.role.arn);
const attach_call = try aws.Request(services.iam.attach_role_policy).call(.{
.policy_arn = "arn:aws:iam::aws:policy/AWSLambdaExecute",
.role_name = self.options.role_name,
}, options);
defer attach_call.deinit();
}
@memcpy(self.arn_buf[0..arn.?.len], arn.?);
self.resolved_arn = self.arn_buf[0..arn.?.len];
// NOTE: This must match getIamArnFromName
const iam_file = try std.fmt.allocPrint(
step.owner.allocator,
"iam{s}{s}",
.{ std.fs.path.sep_str, self.options.role_name },
);
try step.owner.cache_root.handle.writeFile(.{
.sub_path = iam_file,
.data = arn.?,
});
}

63
lambdabuild/Invoke.zig Normal file
View File

@ -0,0 +1,63 @@
const std = @import("std");
const aws = @import("aws").aws;
const Invoke = @This();
step: std.Build.Step,
options: Options,
const base_id: std.Build.Step.Id = .custom;
pub const Options = struct {
/// Function name to invoke
name: []const u8,
/// Payload to send to the function
payload: []const u8,
/// Region for deployment
region: []const u8,
};
pub fn create(owner: *std.Build, options: Options) *Invoke {
const name = owner.dupe(options.name);
const step_name = owner.fmt("{s} {s}{s}", .{
"aws lambda",
"invoke",
name,
});
const self = owner.allocator.create(Invoke) catch @panic("OOM");
self.* = .{
.step = std.Build.Step.init(.{
.id = base_id,
.name = step_name,
.owner = owner,
.makeFn = make,
}),
.options = options,
};
return self;
}
fn make(step: *std.Build.Step, node: std.Progress.Node) anyerror!void {
_ = node;
const self: *Invoke = @fieldParentPtr("step", step);
var client = aws.Client.init(self.step.owner.allocator, .{});
defer client.deinit();
const services = aws.Services(.{.lambda}){};
const options = aws.Options{
.client = client,
.region = self.options.region,
};
const call = try aws.Request(services.lambda.invoke).call(.{
.function_name = self.options.name,
.payload = self.options.payload,
.log_type = "Tail",
.invocation_type = "RequestResponse",
}, options);
defer call.deinit();
std.debug.print("{?s}\n", .{call.response.payload});
}

94
lambdabuild/Package.zig Normal file
View File

@ -0,0 +1,94 @@
const std = @import("std");
const Package = @This();
step: std.Build.Step,
lambda_zipfile: []const u8,
const base_id: std.Build.Step.Id = .install_file;
pub const Options = struct {
name: []const u8 = "",
exe: *std.Build.Step.Compile,
zipfile_name: []const u8 = "function.zip",
};
pub fn create(owner: *std.Build, options: Options) *Package {
const name = owner.dupe(options.name);
const step_name = owner.fmt("{s} {s}{s}", .{
"aws lambda",
"package",
name,
});
const package = owner.allocator.create(Package) catch @panic("OOM");
package.* = .{
.step = std.Build.Step.init(.{
.id = base_id,
.name = step_name,
.owner = owner,
.makeFn = make,
}),
.lambda_zipfile = options.zipfile_name,
};
// TODO: For Windows, tar.exe can actually do zip files. tar -a -cf function.zip file1 [file2...]
// https://superuser.com/questions/201371/create-zip-folder-from-the-command-line-windows#comment2725283_898508
//
// We'll want two system commands here. One for the exe itself, and one for
// other files (TODO: what does this latter one look like? maybe it's an option?)
var zip_cmd = owner.addSystemCommand(&.{ "zip", "-qj9X" });
zip_cmd.has_side_effects = true; // TODO: move these to makeFn as we have little cache control here...
zip_cmd.setCwd(.{ .src_path = .{
.owner = owner,
.sub_path = owner.getInstallPath(.prefix, "."),
} });
const zipfile = zip_cmd.addOutputFileArg(options.zipfile_name);
zip_cmd.addArg(owner.getInstallPath(.bin, "bootstrap"));
// std.debug.print("\nzip cmdline: {s}", .{zip});
if (!std.mem.eql(u8, "bootstrap", options.exe.out_filename)) {
// We need to copy stuff around
// TODO: should this be installing bootstrap binary in .bin directory?
const cp_cmd = owner.addSystemCommand(&.{ "cp", owner.getInstallPath(.bin, options.exe.out_filename) });
cp_cmd.has_side_effects = true;
const copy_output = cp_cmd.addOutputFileArg("bootstrap");
const install_copy = owner.addInstallFileWithDir(copy_output, .bin, "bootstrap");
cp_cmd.step.dependOn(owner.getInstallStep());
zip_cmd.step.dependOn(&install_copy.step);
// might as well leave this bootstrap around for caching purposes
// const rm_cmd = owner.addSystemCommand(&.{ "rm", owner.getInstallPath(.bin, "bootstrap"), });
}
const install_zipfile = owner.addInstallFileWithDir(zipfile, .prefix, options.zipfile_name);
install_zipfile.step.dependOn(&zip_cmd.step);
package.step.dependOn(&install_zipfile.step);
return package;
}
pub fn packagedFilePath(self: Package) []const u8 {
return self.step.owner.getInstallPath(.prefix, self.options.zipfile_name);
}
pub fn packagedFileLazyPath(self: Package) std.Build.LazyPath {
return .{ .src_path = .{
.owner = self.step.owner,
.sub_path = self.step.owner.getInstallPath(.prefix, self.lambda_zipfile),
} };
}
fn make(step: *std.Build.Step, node: std.Progress.Node) anyerror!void {
// Make here doesn't actually do anything. But we want to set up this
// step this way, so that when (if) zig stdlib gains the abiltity to write
// zip files in addition to reading them, we can skip all the system commands
// and just do all the things here instead
//
//
// TODO: The caching plan will be:
//
// get a hash of the bootstrap and whatever other files we put into the zip
// file (because a zip is not really reproducible). If the cache directory
// has the hash as its latest hash, we have nothing to do, so we can exit
// at that point
//
// Otherwise, store that hash in our cache, and copy our bootstrap, zip
// things up and install the file into zig-out
_ = node;
_ = step;
}

BIN
lambdabuild/function.zip Normal file

Binary file not shown.