Compare commits

..

No commits in common. "183d2d912c41ca721c8d18e5c258e4472d38db70" and "a28c96994ce470130dabfaafbdf19b6b5cd82e15" have entirely different histories.

22 changed files with 957 additions and 1435 deletions

View file

@ -1,37 +0,0 @@
name: Lambda-Zig Build
run-name: ${{ github.actor }} building lambda-zig
on:
push:
branches:
- '*'
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Check out repository code
uses: actions/checkout@v4
- name: Setup Zig
uses: https://codeberg.org/mlugg/setup-zig@v2.2.1
- name: Build
run: zig build --summary all
- name: Run tests
run: zig build test --summary all
- name: Build for other platforms
run: |
zig build -Dtarget=aarch64-linux
zig build -Dtarget=x86_64-linux
- name: Notify
uses: https://git.lerch.org/lobo/action-notify-ntfy@v2
if: always()
with:
host: ${{ secrets.NTFY_HOST }}
topic: ${{ secrets.NTFY_TOPIC }}
user: ${{ secrets.NTFY_USER }}
password: ${{ secrets.NTFY_PASSWORD }}

29
.github/workflows/zig-build.yaml vendored Normal file
View file

@ -0,0 +1,29 @@
name: Generic zig build
on:
workflow_dispatch:
push:
branches:
- '*'
- '!zig-develop*'
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: elerch/setup-zig@v3
with:
version: 0.13.0
- uses: elerch/zig-action-cache@v1.1.6
- name: Build project
run: zig build --summary all
- name: Run tests
run: zig build test --summary all
- name: Notify
uses: elerch/action-notify-ntfy@v2.github
if: always() && env.GITEA_ACTIONS == 'true'
with:
host: ${{ secrets.NTFY_HOST }}
topic: ${{ secrets.NTFY_TOPIC }}
status: ${{ job.status }}
user: ${{ secrets.NTFY_USER }}
password: ${{ secrets.NTFY_PASSWORD }}

View file

@ -1,5 +0,0 @@
[tools]
pre-commit = "4.2.0"
zig = "0.15.2"
zls = "0.15.1"
"ubi:DonIsaac/zlint" = "0.7.6"

View file

@ -1,36 +0,0 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- repo: https://github.com/batmac/pre-commit-zig
rev: v0.3.0
hooks:
- id: zig-fmt
- repo: local
hooks:
- id: zlint
name: Run zlint
entry: zlint
args: ["--deny-warnings", "--fix"]
language: system
types: [zig]
- repo: https://github.com/batmac/pre-commit-zig
rev: v0.3.0
hooks:
- id: zig-build
- repo: local
hooks:
- id: test
name: Run zig build test
entry: zig
# args: ["build", "coverage", "-Dcoverage-threshold=80"]
args: ["build", "test"]
language: system
types: [file]
pass_filenames: false

103
README.md
View file

@ -1,77 +1,70 @@
lambda-zig: A Custom Runtime for AWS Lambda lambda-zig: A Custom Runtime for AWS Lambda
=========================================== ===========================================
This is a custom runtime built in Zig (0.15). Simple projects will This is a sample custom runtime built in zig (0.13). Simple projects will execute
execute in <1ms, with a cold start init time of approximately 11ms. in <1ms, with a cold start init time of approximately 11ms.
Custom build steps are available for packaging and deploying Lambda functions: Some custom build steps have been added to build.zig, which will only currently appear if compiling from a linux operating system:
* `zig build awslambda_package`: Package the Lambda function into a zip file * `zig build awslambda_iam`: Deploy and record a default IAM role for the lambda function
* `zig build awslambda_iam`: Create or verify IAM role for the Lambda function * `zig build awslambda_package`: Package the lambda function for upload
* `zig build awslambda_deploy`: Deploy the Lambda function to AWS * `zig build awslambda_deploy`: Deploy the lambda function
* `zig build awslambda_run`: Invoke the deployed Lambda function * `zig build awslambda_run`: Run the lambda function
Build options: Custom options:
* **function-name**: Name of the AWS Lambda function * **function-name**: set the name of the AWS Lambda function
* **payload**: JSON payload for function invocation (used with awslambda_run) * **payload**: Use this to set the payload of the function when run using `zig build awslambda_run`
* **region**: AWS region for deployment and invocation * **region**: Use this to set the region for the function deployment/run
* **profile**: AWS profile to use for credentials * **function-role**: Name of the role to use for the function. The system will
* **role-name**: IAM role name for the function (default: lambda_basic_execution) look up the arn from this name, and create if it does not exist
* **function-arn**: Role arn to use with the function. This must exist
The Lambda function can be compiled for x86_64 or aarch64. The build system The AWS Lambda function can be compiled as a linux x86_64 or linux aarch64
automatically configures the Lambda architecture based on the target. executable. The build script will set the architecture appropriately
A sample project using this runtime can be found at Caveats:
https://git.lerch.org/lobo/lambda-zig-sample
Using the Zig Package Manager * Building on Windows will not yet work, as the package step still uses
system commands due to the need to create a zip file, and the current lack
of zip file creation capabilities in the standard library (you can read, but
not write, zip files with the standard library). A TODO exists with more
information should you wish to file a PR.
A sample project using this runtime can be found at https://git.lerch.org/lobo/lambda-zig-sample
Using the zig package manager
----------------------------- -----------------------------
To add Lambda package/deployment steps to another project: The zig package manager [works just fine](https://github.com/ziglang/zig/issues/14279)
in build.zig, which works well for use of this runtime.
1. Fetch the dependency: To add lambda package/deployment steps to another project:
```sh 1. `zig build init-exe`
zig fetch --save git+https://git.lerch.org/lobo/lambda-zig 2. Add a `build.zig.zon` similar to the below
``` 3. Add a line to build.zig to add necessary build options, etc. Not the build function
return type should be changed from `void` to `!void`
2. Update your `build.zig`: `build.zig`:
```zig ```zig
const std = @import("std"); try @import("lambda-zig").lambdaBuildOptions(b, exe);
const lambda_zig = @import("lambda_zig"); ```
pub fn build(b: *std.Build) !void { `build.zig.zon`:
const target = b.standardTargetOptions(.{});
const optimize = b.standardOptimizeOption(.{});
// Get lambda-zig dependency ```zig
const lambda_zig_dep = b.dependency("lambda_zig", .{ .{
.target = target, .name = "lambda-zig",
.optimize = optimize, .version = "0.1.0",
}); .dependencies = .{
.@"lambda-zig" = .{
const exe_module = b.createModule(.{ .url = "https://git.lerch.org/lobo/lambda-zig/archive/fa13a08c4d91034a9b19d85f8c4c0af4cedaa67e.tar.gz",
.root_source_file = b.path("src/main.zig"), .hash = "122037c357f834ffddf7b3a514f55edd5a4d7a3cde138a4021b6ac51be8fd2926000",
.target = target, },
.optimize = optimize, },
});
// Add lambda runtime to your module
exe_module.addImport("aws_lambda_runtime", lambda_zig_dep.module("lambda_runtime"));
const exe = b.addExecutable(.{
.name = "bootstrap",
.root_module = exe_module,
});
b.installArtifact(exe);
// Add Lambda build steps
try lambda_zig.configureBuild(b, lambda_zig_dep, exe);
} }
``` ```
Note: The build function return type must be `!void` or catch/deal with errors That's it! Now you should have the 4 custom build steps
to support the Lambda build integration.

120
build.zig
View file

@ -1,5 +1,4 @@
const std = @import("std"); const std = @import("std");
const builtin = @import("builtin");
// Although this function looks imperative, note that its job is to // Although this function looks imperative, note that its job is to
// declaratively construct a build graph that will be executed by an external // declaratively construct a build graph that will be executed by an external
@ -16,20 +15,15 @@ pub fn build(b: *std.Build) !void {
// set a preferred release mode, allowing the user to decide how to optimize. // set a preferred release mode, allowing the user to decide how to optimize.
const optimize = b.standardOptimizeOption(.{}); const optimize = b.standardOptimizeOption(.{});
// Create a module for lambda.zig const lib = b.addStaticLibrary(.{
const lambda_module = b.createModule(.{ .name = "lambda-zig",
// In this case the main source file is merely a path, however, in more
// complicated build scripts, this could be a generated file.
.root_source_file = b.path("src/lambda.zig"), .root_source_file = b.path("src/lambda.zig"),
.target = target, .target = target,
.optimize = optimize, .optimize = optimize,
}); });
const lib = b.addLibrary(.{
.name = "lambda-zig",
.linkage = .static,
.root_module = lambda_module,
});
// Export the module for other packages to use
_ = b.addModule("lambda_runtime", .{ _ = b.addModule("lambda_runtime", .{
.root_source_file = b.path("src/lambda.zig"), .root_source_file = b.path("src/lambda.zig"),
.target = target, .target = target,
@ -43,17 +37,12 @@ pub fn build(b: *std.Build) !void {
// Creates a step for unit testing. This only builds the test executable // Creates a step for unit testing. This only builds the test executable
// but does not run it. // but does not run it.
const test_module = b.createModule(.{ const main_tests = b.addTest(.{
.root_source_file = b.path("src/lambda.zig"), .root_source_file = b.path("src/lambda.zig"),
.target = target, .target = target,
.optimize = optimize, .optimize = optimize,
}); });
const main_tests = b.addTest(.{
.name = "test",
.root_module = test_module,
});
const run_main_tests = b.addRunArtifact(main_tests); const run_main_tests = b.addRunArtifact(main_tests);
// This creates a build step. It will be visible in the `zig build --help` menu, // This creates a build step. It will be visible in the `zig build --help` menu,
@ -62,95 +51,38 @@ pub fn build(b: *std.Build) !void {
const test_step = b.step("test", "Run library tests"); const test_step = b.step("test", "Run library tests");
test_step.dependOn(&run_main_tests.step); test_step.dependOn(&run_main_tests.step);
// Create executable module const exe = b.addExecutable(.{
const exe_module = b.createModule(.{ .name = "custom",
.root_source_file = b.path("src/sample-main.zig"), .root_source_file = b.path("src/sample-main.zig"),
.target = target, .target = target,
.optimize = optimize, .optimize = optimize,
}); });
const exe = b.addExecutable(.{
.name = "custom",
.root_module = exe_module,
});
b.installArtifact(exe); b.installArtifact(exe);
try configureBuildInternal(b, exe); try lambdaBuildOptions(b, exe);
} }
/// Internal version of configureBuild for lambda-zig's own build. /// lambdaBuildOptions will add three build options to the build (if compiling
/// the code on a Linux host):
/// ///
/// Both this and configureBuild do the same thing, but resolve the lambda_build /// * package: Packages the function for deployment to Lambda
/// dependency differently: /// (dependencies are the zip executable and a shell)
/// * iam: Gets an IAM role for the Lambda function, and creates it if it does not exist
/// (dependencies are the AWS CLI, grep and a shell)
/// * deploy: Deploys the lambda function to a live AWS environment
/// (dependencies are the AWS CLI, and a shell)
/// * remoterun: Runs the lambda function in a live AWS environment
/// (dependencies are the AWS CLI, and a shell)
/// ///
/// - Here: we call `b.dependency("lambda_build", ...)` directly since `b` is /// remoterun depends on deploy
/// lambda-zig's own Build context, which has lambda_build in its build.zig.zon /// deploy depends on iam and package
/// ///
/// - configureBuild: consumers pass in their lambda_zig dependency, and we use /// iam and package do not have any dependencies
/// `lambda_zig_dep.builder.dependency("lambda_build", ...)` to resolve it from pub fn lambdaBuildOptions(b: *std.Build, exe: *std.Build.Step.Compile) !void {
/// lambda-zig's build.zig.zon rather than the consumer's const function_name = b.option([]const u8, "function-name", "Function name for Lambda [zig-fn]") orelse "zig-fn";
/// try @import("lambdabuild.zig").configureBuild(b, exe, function_name);
/// This avoids requiring consumers to declare lambda_build as a transitive
/// dependency in their own build.zig.zon.
fn configureBuildInternal(b: *std.Build, exe: *std.Build.Step.Compile) !void {
// When called from lambda-zig's own build, use local dependency
const lambda_build_dep = b.dependency("lambda_build", .{
.target = b.graph.host,
.optimize = .ReleaseSafe,
});
try @import("lambdabuild.zig").configureBuild(b, lambda_build_dep, exe);
} }
/// Configure Lambda build steps for a Zig project. pub fn configureBuild(b: *std.Build, exe: *std.Build.Step.Compile, function_name: []const u8) !void {
/// try @import("lambdabuild.zig").configureBuild(b, exe, function_name);
/// This function adds build steps and options for packaging and deploying
/// Lambda functions to AWS. The `lambda_zig_dep` parameter must be the
/// dependency object obtained from `b.dependency("lambda_zig", ...)`.
///
/// ## Build Steps
///
/// The following build steps are added:
///
/// - `awslambda_package`: Package the executable into a Lambda deployment zip
/// - `awslambda_iam`: Create or verify the IAM role for the Lambda function
/// - `awslambda_deploy`: Deploy the function to AWS Lambda (depends on package)
/// - `awslambda_run`: Invoke the deployed function (depends on deploy)
///
/// ## Build Options
///
/// The following options are added to the build:
///
/// - `-Dfunction-name=[string]`: Name of the Lambda function (default: "zig-fn")
/// - `-Dregion=[string]`: AWS region for deployment and invocation
/// - `-Dprofile=[string]`: AWS profile to use for credentials
/// - `-Drole-name=[string]`: IAM role name (default: "lambda_basic_execution")
/// - `-Dpayload=[string]`: JSON payload for invocation (default: "{}")
///
/// ## Example
///
/// ```zig
/// const lambda_zig = @import("lambda_zig");
///
/// pub fn build(b: *std.Build) !void {
/// const target = b.standardTargetOptions(.{});
/// const optimize = b.standardOptimizeOption(.{});
///
/// const lambda_zig_dep = b.dependency("lambda_zig", .{
/// .target = target,
/// .optimize = optimize,
/// });
///
/// const exe = b.addExecutable(.{ ... });
/// b.installArtifact(exe);
///
/// try lambda_zig.configureBuild(b, lambda_zig_dep, exe);
/// }
/// ```
pub fn configureBuild(b: *std.Build, lambda_zig_dep: *std.Build.Dependency, exe: *std.Build.Step.Compile) !void {
// Get lambda_build from the lambda_zig dependency's Build context
const lambda_build_dep = lambda_zig_dep.builder.dependency("lambda_build", .{
.target = b.graph.host,
.optimize = .ReleaseSafe,
});
try @import("lambdabuild.zig").configureBuild(b, lambda_build_dep, exe);
} }

View file

@ -1,15 +1,20 @@
.{ .{
.name = .lambda_zig, .name = "lambda-zig",
// This is a [Semantic Version](https://semver.org/). // This is a [Semantic Version](https://semver.org/).
// In a future version of Zig it will be used for package deduplication. // In a future version of Zig it will be used for package deduplication.
.version = "0.1.0", .version = "0.0.0",
.fingerprint = 0xae58341fff376efc,
.minimum_zig_version = "0.15.2", // This field is optional.
// This is currently advisory only; Zig does not yet do anything
// with this value.
.dependencies = .{ .dependencies = .{
.lambda_build = .{ .aws = .{
.path = "tools/build", .url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/908c9d2d429b1f38c835363db566aa17bf1742fd/908c9d2d429b1f38c835363db566aa17bf1742fd-with-models.tar.gz",
.hash = "122022770a177afb2ee46632f88ad5468a5dea8df22170d1dea5163890b0a881399d",
}, },
}, },
.minimum_zig_version = "0.12.0",
// Specifies the set of files and directories that are included in this package. // Specifies the set of files and directories that are included in this package.
// Only files and directories listed here are included in the `hash` that // Only files and directories listed here are included in the `hash` that
// is computed for this package. // is computed for this package.
@ -21,7 +26,7 @@
"build.zig.zon", "build.zig.zon",
"lambdabuild.zig", "lambdabuild.zig",
"src", "src",
"tools", "lambdabuild",
"LICENSE", "LICENSE",
"README.md", "README.md",
}, },

View file

@ -1,112 +1,168 @@
//! Lambda Build Integration for Zig Build System
//!
//! This module provides build steps for packaging and deploying Lambda functions.
//! It builds the lambda-build CLI tool and invokes it for each operation.
const std = @import("std"); const std = @import("std");
const builtin = @import("builtin");
const Package = @import("lambdabuild/Package.zig");
const Iam = @import("lambdabuild/Iam.zig");
const Deploy = @import("lambdabuild/Deploy.zig");
const Invoke = @import("lambdabuild/Invoke.zig");
/// Configure Lambda build steps for a Zig project. fn fileExists(file_name: []const u8) bool {
/// const file = std.fs.openFileAbsolute(file_name, .{}) catch return false;
/// Adds the following build steps: defer file.close();
/// - awslambda_package: Package the function into a zip file return true;
/// - awslambda_iam: Create/verify IAM role }
/// - awslambda_deploy: Deploy the function to AWS fn addArgs(allocator: std.mem.Allocator, original: []const u8, args: [][]const u8) ![]const u8 {
/// - awslambda_run: Invoke the deployed function var rc = original;
pub fn configureBuild( for (args) |arg| {
b: *std.Build, rc = try std.mem.concat(allocator, u8, &.{ rc, " ", arg });
lambda_build_dep: *std.Build.Dependency,
exe: *std.Build.Step.Compile,
) !void {
// Get the lambda-build CLI artifact from the dependency
const cli = lambda_build_dep.artifact("lambda-build");
// Get configuration options
const function_name = b.option([]const u8, "function-name", "Function name for Lambda") orelse "zig-fn";
const region = b.option([]const u8, "region", "AWS region") orelse null;
const profile = b.option([]const u8, "profile", "AWS profile") orelse null;
const role_name = b.option(
[]const u8,
"role-name",
"IAM role name (default: lambda_basic_execution)",
) orelse "lambda_basic_execution";
const payload = b.option(
[]const u8,
"payload",
"Lambda invocation payload",
) orelse "{}";
// Determine architecture for Lambda
const target_arch = exe.root_module.resolved_target.?.result.cpu.arch;
const arch_str = blk: {
switch (target_arch) {
.aarch64 => break :blk "aarch64",
.x86_64 => break :blk "x86_64",
else => {
std.log.warn("Unsupported architecture for Lambda: {}, defaulting to x86_64", .{target_arch});
break :blk "x86_64";
},
} }
return rc;
}
/// lambdaBuildSteps will add four build steps to the build (if compiling
/// the code on a Linux host):
///
/// * awslambda_package: Packages the function for deployment to Lambda
/// (dependencies are the zip executable and a shell)
/// * awslambda_iam: Gets an IAM role for the Lambda function, and creates it if it does not exist
/// (dependencies are the AWS CLI, grep and a shell)
/// * awslambda_deploy: Deploys the lambda function to a live AWS environment
/// (dependencies are the AWS CLI, and a shell)
/// * awslambda_run: Runs the lambda function in a live AWS environment
/// (dependencies are the AWS CLI, and a shell)
///
/// awslambda_run depends on deploy
/// awslambda_deploy depends on iam and package
///
/// iam and package do not have any dependencies
pub fn configureBuild(b: *std.Build, exe: *std.Build.Step.Compile, function_name: []const u8) !void {
// The rest of this function is currently reliant on the use of Linux
// system being used to build the lambda function
//
// It is likely that much of this will work on other Unix-like OSs, but
// we will work this out later
//
// TODO: support other host OSs
if (builtin.os.tag != .linux) return;
@import("aws").aws.globalLogControl(.info, .warn, .info, false);
const package_step = Package.create(b, .{ .exe = exe });
const step = b.step("awslambda_package", "Package the function");
step.dependOn(&package_step.step);
package_step.step.dependOn(b.getInstallStep());
// Doing this will require that the aws dependency be added to the downstream
// build.zig.zon
// const lambdabuild = b.addExecutable(.{
// .name = "lambdabuild",
// .root_source_file = .{
// // we use cwd_relative here because we need to compile this relative
// // to whatever directory this file happens to be. That is likely
// // in a cache directory, not the base of the build.
// .cwd_relative = try std.fs.path.join(b.allocator, &[_][]const u8{
// std.fs.path.dirname(@src().file).?,
// "lambdabuild/src/main.zig",
// }),
// },
// .target = b.host,
// });
// const aws_dep = b.dependency("aws", .{
// .target = b.host,
// .optimize = lambdabuild.root_module.optimize orelse .Debug,
// });
// const aws_module = aws_dep.module("aws");
// lambdabuild.root_module.addImport("aws", aws_module);
//
const iam_role_name = b.option(
[]const u8,
"function-role",
"IAM role name for function (will create if it does not exist) [lambda_basic_execution]",
) orelse "lambda_basic_execution_blah2";
const iam_role_arn = b.option(
[]const u8,
"function-arn",
"Preexisting IAM role arn for function",
);
const iam = Iam.create(b, .{
.role_name = iam_role_name,
.role_arn = iam_role_arn,
});
const iam_step = b.step("awslambda_iam", "Create/Get IAM role for function");
iam_step.dependOn(&iam.step);
const region = try b.allocator.create(@import("lambdabuild/Region.zig"));
region.* = .{
.allocator = b.allocator,
.specified_region = b.option([]const u8, "region", "Region to use [default is autodetect from environment/config]"),
}; };
// Package step - output goes to cache based on input hash // Deployment
const package_cmd = b.addRunArtifact(cli); const deploy = Deploy.create(b, .{
package_cmd.step.name = try std.fmt.allocPrint(b.allocator, "{s} package", .{cli.name}); .name = function_name,
package_cmd.addArgs(&.{ "package", "--exe" }); .arch = exe.root_module.resolved_target.?.result.cpu.arch,
package_cmd.addFileArg(exe.getEmittedBin()); .iam_step = iam,
package_cmd.addArgs(&.{"--output"}); .package_step = package_step,
const zip_output = package_cmd.addOutputFileArg("function.zip"); .region = region,
package_cmd.step.dependOn(&exe.step);
const package_step = b.step("awslambda_package", "Package the Lambda function");
package_step.dependOn(&package_cmd.step);
// IAM step
const iam_cmd = b.addRunArtifact(cli);
iam_cmd.step.name = try std.fmt.allocPrint(b.allocator, "{s} iam", .{cli.name});
if (profile) |p| iam_cmd.addArgs(&.{ "--profile", p });
if (region) |r| iam_cmd.addArgs(&.{ "--region", r });
iam_cmd.addArgs(&.{ "iam", "--role-name", role_name });
const iam_step = b.step("awslambda_iam", "Create/verify IAM role for Lambda");
iam_step.dependOn(&iam_cmd.step);
// Deploy step (depends on package)
const deploy_cmd = b.addRunArtifact(cli);
deploy_cmd.step.name = try std.fmt.allocPrint(b.allocator, "{s} deploy", .{cli.name});
if (profile) |p| deploy_cmd.addArgs(&.{ "--profile", p });
if (region) |r| deploy_cmd.addArgs(&.{ "--region", r });
deploy_cmd.addArgs(&.{
"deploy",
"--function-name",
function_name,
"--zip-file",
}); });
deploy_cmd.addFileArg(zip_output);
deploy_cmd.addArgs(&.{ const deploy_step = b.step("awslambda_deploy", "Deploy the function");
"--role-name", deploy_step.dependOn(&deploy.step);
role_name,
"--arch", const payload = b.option([]const u8, "payload", "Lambda payload [{\"foo\":\"bar\", \"baz\": \"qux\"}]") orelse
arch_str, \\ {"foo": "bar", "baz": "qux"}"
;
const invoke = Invoke.create(b, .{
.name = function_name,
.payload = payload,
.region = region,
}); });
deploy_cmd.step.dependOn(&package_cmd.step); invoke.step.dependOn(&deploy.step);
const run_step = b.step("awslambda_run", "Run the app in AWS lambda");
const deploy_step = b.step("awslambda_deploy", "Deploy the Lambda function"); run_step.dependOn(&invoke.step);
deploy_step.dependOn(&deploy_cmd.step); }
// Invoke/run step (depends on deploy) // AWS_CONFIG_FILE (default is ~/.aws/config
const invoke_cmd = b.addRunArtifact(cli); // AWS_DEFAULT_REGION
invoke_cmd.step.name = try std.fmt.allocPrint(b.allocator, "{s} invoke", .{cli.name}); fn findRegionFromSystem(allocator: std.mem.Allocator) ![]const u8 {
if (profile) |p| invoke_cmd.addArgs(&.{ "--profile", p }); const env_map = try std.process.getEnvMap(allocator);
if (region) |r| invoke_cmd.addArgs(&.{ "--region", r }); if (env_map.get("AWS_DEFAULT_REGION")) |r| return r;
invoke_cmd.addArgs(&.{ const config_file_path = env_map.get("AWS_CONFIG_FILE") orelse
"invoke", try std.fs.path.join(allocator, &[_][]const u8{
"--function-name", env_map.get("HOME") orelse env_map.get("USERPROFILE").?,
function_name, ".aws",
"--payload", "config",
payload, });
}); const config_file = try std.fs.openFileAbsolute(config_file_path, .{});
invoke_cmd.step.dependOn(&deploy_cmd.step); defer config_file.close();
const config_bytes = try config_file.readToEndAlloc(allocator, 1024 * 1024);
const run_step = b.step("awslambda_run", "Invoke the deployed Lambda function"); const profile = env_map.get("AWS_PROFILE") orelse "default";
run_step.dependOn(&invoke_cmd.step); var line_iterator = std.mem.split(u8, config_bytes, "\n");
var in_profile = false;
while (line_iterator.next()) |line| {
const trimmed = std.mem.trim(u8, line, " \t\r");
if (trimmed.len == 0 or trimmed[0] == '#') continue;
if (!in_profile) {
if (trimmed[0] == '[' and trimmed[trimmed.len - 1] == ']') {
// this is a profile directive!
// std.debug.print("profile: {s}, in file: {s}\n", .{ profile, trimmed[1 .. trimmed.len - 1] });
if (std.mem.eql(u8, profile, trimmed[1 .. trimmed.len - 1])) {
in_profile = true;
}
}
continue; // we're only looking for a profile at this point
}
// look for our region directive
if (trimmed[0] == '[' and trimmed[trimmed.len - 1] == ']')
return error.RegionNotFound; // we've hit another profile without getting our region
if (!std.mem.startsWith(u8, trimmed, "region")) continue;
var equalityiterator = std.mem.split(u8, trimmed, "=");
_ = equalityiterator.next() orelse return error.RegionNotFound;
const raw_val = equalityiterator.next() orelse return error.RegionNotFound;
return try allocator.dupe(u8, std.mem.trimLeft(u8, raw_val, " \t"));
}
return error.RegionNotFound;
} }

165
lambdabuild/Deploy.zig Normal file
View file

@ -0,0 +1,165 @@
const std = @import("std");
const Region = @import("Region.zig");
const aws = @import("aws").aws;
const Deploy = @This();
step: std.Build.Step,
options: Options,
const base_id: std.Build.Step.Id = .custom;
pub const Options = struct {
/// Function name to be used for the function
name: []const u8,
/// Architecture for Lambda function
arch: std.Target.Cpu.Arch,
/// Iam step. This will be a dependency of the deployment
iam_step: *@import("Iam.zig"),
/// Packaging step. This will be a dependency of the deployment
package_step: *@import("Package.zig"),
/// Region for deployment
region: *Region,
};
pub fn create(owner: *std.Build, options: Options) *Deploy {
const name = owner.dupe(options.name);
const step_name = owner.fmt("{s} {s}{s}", .{
"aws lambda",
"deploy ",
name,
});
const self = owner.allocator.create(Deploy) catch @panic("OOM");
self.* = .{
.step = std.Build.Step.init(.{
.id = base_id,
.name = step_name,
.owner = owner,
.makeFn = make,
}),
.options = options,
};
self.step.dependOn(&options.iam_step.step);
self.step.dependOn(&options.package_step.step);
return self;
}
/// gets the last time we deployed this function from the name in cache.
/// If not in cache, null is returned. Note that cache is not account specific,
/// so if you're banging around multiple accounts, you'll want to use different
/// local zig caches for each
fn getlastDeployedTime(step: *std.Build.Step, name: []const u8) !?[]const u8 {
try step.owner.cache_root.handle.makePath("iam");
// we should be able to use the role name, as only the following characters
// are allowed: _+=,.@-.
const cache_file = try std.fmt.allocPrint(
step.owner.allocator,
"deploy{s}{s}",
.{ std.fs.path.sep_str, name },
);
const buff = try step.owner.allocator.alloc(u8, 64);
const time = step.owner.cache_root.handle.readFile(cache_file, buff) catch return null;
return time;
}
fn make(step: *std.Build.Step, node: std.Progress.Node) anyerror!void {
_ = node;
const self: *Deploy = @fieldParentPtr("step", step);
if (self.options.arch != .aarch64 and self.options.arch != .x86_64)
return step.fail("AWS Lambda can only deploy aarch64 and x86_64 functions ({} not allowed)", .{self.options.arch});
const last_packaged_sha256 = blk: {
// file should always be there, but we shouldn't break if the cache doesn't exist
const last_deployed_id_file = std.fs.openFileAbsolute(try self.options.package_step.shasumFilePath(), .{}) catch break :blk null;
defer last_deployed_id_file.close();
break :blk try last_deployed_id_file.readToEndAlloc(step.owner.allocator, 2048);
};
var client = aws.Client.init(self.step.owner.allocator, .{});
defer client.deinit();
const services = aws.Services(.{.lambda}){};
const function = blk: {
var diagnostics = aws.Diagnostics{
.http_code = undefined,
.response_body = undefined,
.allocator = self.step.owner.allocator,
};
const options = aws.Options{
.client = client,
.diagnostics = &diagnostics,
.region = try self.options.region.region(),
};
aws.globalLogControl(.info, .warn, .info, true);
defer aws.globalLogControl(.info, .warn, .info, false);
const call = aws.Request(services.lambda.get_function).call(.{
.function_name = self.options.name,
}, options) catch |e| {
// There seems an issue here, but realistically, we have an arena
// so there's no leak leaving this out
defer diagnostics.deinit();
if (diagnostics.http_code == 404) break :blk null;
return step.fail(
"Unknown error {} from Lambda GetFunction. HTTP code {}, message: {s}",
.{ e, diagnostics.http_code, diagnostics.response_body },
);
};
defer call.deinit();
break :blk .{
.last_modified = try step.owner.allocator.dupe(u8, call.response.configuration.?.last_modified.?),
.revision_id = try step.owner.allocator.dupe(u8, call.response.configuration.?.revision_id.?),
.sha256 = try step.owner.allocator.dupe(u8, call.response.configuration.?.code_sha256.?),
};
};
if (last_packaged_sha256) |s|
if (function) |f|
if (std.mem.eql(u8, s, f.sha256)) {
step.result_cached = true;
return;
};
const encoder = std.base64.standard.Encoder;
const file = try std.fs.openFileAbsolute(self.options.package_step.packagedFileLazyPath().getPath2(step.owner, step), .{});
defer file.close();
const bytes = try file.readToEndAlloc(step.owner.allocator, 100 * 1024 * 1024);
const base64_buf = try step.owner.allocator.alloc(u8, encoder.calcSize(bytes.len));
const base64_bytes = encoder.encode(base64_buf, bytes);
const options = aws.Options{
.client = client,
.region = try self.options.region.region(),
};
const arm64_arch = [_][]const u8{"arm64"};
const x86_64_arch = [_][]const u8{"x86_64"};
const architectures = (if (self.options.arch == .aarch64) arm64_arch else x86_64_arch);
const arches: [][]const u8 = @constCast(architectures[0..]);
if (function) |f| {
// TODO: make sure our zipfile newer than the lambda function
const update_call = try aws.Request(services.lambda.update_function_code).call(.{
.function_name = self.options.name,
.architectures = arches,
.revision_id = f.revision_id,
.zip_file = base64_bytes,
}, options);
defer update_call.deinit();
} else {
// New function - we need to create from scratch
const create_call = try aws.Request(services.lambda.create_function).call(.{
.function_name = self.options.name,
.architectures = arches,
.code = .{ .zip_file = base64_bytes },
.handler = "not_applicable",
.package_type = "Zip",
.runtime = "provided.al2",
.role = self.options.iam_step.resolved_arn,
}, options);
defer create_call.deinit();
}
}

146
lambdabuild/Iam.zig Normal file
View file

@ -0,0 +1,146 @@
const std = @import("std");
const aws = @import("aws").aws;
const Iam = @This();
step: std.Build.Step,
options: Options,
/// resolved_arn will be set only after make is run
resolved_arn: []const u8 = undefined,
arn_buf: [2048]u8 = undefined, // https://docs.aws.amazon.com/IAM/latest/APIReference/API_Role.html has 2k limit
const base_id: std.Build.Step.Id = .custom;
pub const Options = struct {
name: []const u8 = "",
role_name: []const u8,
role_arn: ?[]const u8,
};
pub fn create(owner: *std.Build, options: Options) *Iam {
const name = owner.dupe(options.name);
const step_name = owner.fmt("{s} {s}{s}", .{
"aws lambda",
"iam",
name,
});
const self = owner.allocator.create(Iam) catch @panic("OOM");
self.* = .{
.step = std.Build.Step.init(.{
.id = base_id,
.name = step_name,
.owner = owner,
.makeFn = make,
}),
.options = options,
};
return self;
}
/// gets an IamArn from the name in cache. If not in cache, null is returned
/// Note that cache is not account specific, so if you're banging around multiple
/// accounts, you'll want to use different local zig caches for each
pub fn getIamArnFromName(step: *std.Build.Step, name: []const u8) !?[]const u8 {
try step.owner.cache_root.handle.makePath("iam");
// we should be able to use the role name, as only the following characters
// are allowed: _+=,.@-.
const iam_file = try std.fmt.allocPrint(
step.owner.allocator,
"iam{s}{s}",
.{ std.fs.path.sep_str, name },
);
const buff = try step.owner.allocator.alloc(u8, 64);
const arn = step.owner.cache_root.handle.readFile(iam_file, buff) catch return null;
return arn;
}
fn make(step: *std.Build.Step, node: std.Progress.Node) anyerror!void {
_ = node;
const self: *Iam = @fieldParentPtr("step", step);
if (try getIamArnFromName(step, self.options.role_name)) |a| {
step.result_cached = true;
@memcpy(self.arn_buf[0..a.len], a);
self.resolved_arn = self.arn_buf[0..a.len];
return; // exists in cache - nothing to do
}
var client = aws.Client.init(self.step.owner.allocator, .{});
defer client.deinit();
const services = aws.Services(.{.iam}){};
var arn = blk: {
var diagnostics = aws.Diagnostics{
.http_code = undefined,
.response_body = undefined,
.allocator = self.step.owner.allocator,
};
const options = aws.Options{
.client = client,
.diagnostics = &diagnostics,
};
const call = aws.Request(services.iam.get_role).call(.{
.role_name = self.options.role_name, // TODO: if we have a role_arn, we should use it and skip
}, options) catch |e| {
defer diagnostics.deinit();
if (diagnostics.http_code == 404) break :blk null;
return step.fail(
"Unknown error {} from IAM GetRole. HTTP code {}, message: {s}",
.{ e, diagnostics.http_code, diagnostics.response_body },
);
};
defer call.deinit();
break :blk try step.owner.allocator.dupe(u8, call.response.role.arn);
};
// Now ARN will either be null (does not exist), or a value
if (arn == null) {
// we need to create the role before proceeding
const options = aws.Options{
.client = client,
};
const create_call = try aws.Request(services.iam.create_role).call(.{
.role_name = self.options.role_name,
.assume_role_policy_document =
\\{
\\ "Version": "2012-10-17",
\\ "Statement": [
\\ {
\\ "Sid": "",
\\ "Effect": "Allow",
\\ "Principal": {
\\ "Service": "lambda.amazonaws.com"
\\ },
\\ "Action": "sts:AssumeRole"
\\ }
\\ ]
\\}
,
}, options);
defer create_call.deinit();
arn = try step.owner.allocator.dupe(u8, create_call.response.role.arn);
const attach_call = try aws.Request(services.iam.attach_role_policy).call(.{
.policy_arn = "arn:aws:iam::aws:policy/AWSLambdaExecute",
.role_name = self.options.role_name,
}, options);
defer attach_call.deinit();
}
@memcpy(self.arn_buf[0..arn.?.len], arn.?);
self.resolved_arn = self.arn_buf[0..arn.?.len];
// NOTE: This must match getIamArnFromName
const iam_file = try std.fmt.allocPrint(
step.owner.allocator,
"iam{s}{s}",
.{ std.fs.path.sep_str, self.options.role_name },
);
try step.owner.cache_root.handle.writeFile(.{
.sub_path = iam_file,
.data = arn.?,
});
}

90
lambdabuild/Invoke.zig Normal file
View file

@ -0,0 +1,90 @@
const std = @import("std");
const aws = @import("aws").aws;
const Region = @import("Region.zig");
const Invoke = @This();
step: std.Build.Step,
options: Options,
const base_id: std.Build.Step.Id = .custom;
pub const Options = struct {
/// Function name to invoke
name: []const u8,
/// Payload to send to the function
payload: []const u8,
/// Region for deployment
region: *Region,
};
pub fn create(owner: *std.Build, options: Options) *Invoke {
const name = owner.dupe(options.name);
const step_name = owner.fmt("{s} {s}{s}", .{
"aws lambda",
"invoke",
name,
});
const self = owner.allocator.create(Invoke) catch @panic("OOM");
self.* = .{
.step = std.Build.Step.init(.{
.id = base_id,
.name = step_name,
.owner = owner,
.makeFn = make,
}),
.options = options,
};
return self;
}
fn make(step: *std.Build.Step, node: std.Progress.Node) anyerror!void {
_ = node;
const self: *Invoke = @fieldParentPtr("step", step);
var client = aws.Client.init(self.step.owner.allocator, .{});
defer client.deinit();
const services = aws.Services(.{.lambda}){};
const options = aws.Options{
.client = client,
.region = try self.options.region.region(),
};
var inx: usize = 10; // 200ms * 10
while (inx > 0) : (inx -= 1) {
var diagnostics = aws.Diagnostics{
.http_code = undefined,
.response_body = undefined,
.allocator = self.step.owner.allocator,
};
const call = aws.Request(services.lambda.get_function).call(.{
.function_name = self.options.name,
}, options) catch |e| {
// There seems an issue here, but realistically, we have an arena
// so there's no leak leaving this out
defer diagnostics.deinit();
if (diagnostics.http_code == 404) continue; // function was just created...it's ok
return step.fail(
"Unknown error {} from Lambda GetFunction. HTTP code {}, message: {s}",
.{ e, diagnostics.http_code, diagnostics.response_body },
);
};
defer call.deinit();
if (!std.mem.eql(u8, "InProgress", call.response.configuration.?.last_update_status.?))
break; // We're ready to invoke!
const ms: usize = if (inx == 5) 500 else 50;
std.time.sleep(ms * std.time.ns_per_ms);
}
if (inx == 0)
return step.fail("Timed out waiting for lambda to update function", .{});
const call = try aws.Request(services.lambda.invoke).call(.{
.function_name = self.options.name,
.payload = self.options.payload,
.log_type = "Tail",
.invocation_type = "RequestResponse",
}, options);
defer call.deinit();
std.debug.print("{?s}\n", .{call.response.payload});
}

158
lambdabuild/Package.zig Normal file
View file

@ -0,0 +1,158 @@
const std = @import("std");
const Package = @This();
step: std.Build.Step,
options: Options,
/// This is set as part of the make phase, and is the location in the cache
/// for the lambda package. The package will also be copied to the output
/// directory, but this location makes for a good cache key for deployments
zipfile_cache_dest: ?[]const u8 = null,
zipfile_dest: ?[]const u8 = null,
const base_id: std.Build.Step.Id = .install_file;
pub const Options = struct {
name: []const u8 = "",
exe: *std.Build.Step.Compile,
zipfile_name: []const u8 = "function.zip",
};
pub fn create(owner: *std.Build, options: Options) *Package {
const name = owner.dupe(options.name);
const step_name = owner.fmt("{s} {s}{s}", .{
"aws lambda",
"package",
name,
});
const package = owner.allocator.create(Package) catch @panic("OOM");
package.* = .{
.step = std.Build.Step.init(.{
.id = base_id,
.name = step_name,
.owner = owner,
.makeFn = make,
}),
.options = options,
};
return package;
}
pub fn shasumFilePath(self: Package) ![]const u8 {
return try std.fmt.allocPrint(
self.step.owner.allocator,
"{s}{s}{s}",
.{ std.fs.path.dirname(self.zipfile_cache_dest.?).?, std.fs.path.sep_str, "sha256sum.txt" },
);
}
pub fn packagedFilePath(self: Package) []const u8 {
return self.step.owner.getInstallPath(.prefix, self.options.zipfile_name);
}
pub fn packagedFileLazyPath(self: Package) std.Build.LazyPath {
return .{ .src_path = .{
.owner = self.step.owner,
.sub_path = self.step.owner.getInstallPath(.prefix, self.options.zipfile_name),
} };
}
fn make(step: *std.Build.Step, node: std.Progress.Node) anyerror!void {
_ = node;
const self: *Package = @fieldParentPtr("step", step);
// get a hash of the bootstrap and whatever other files we put into the zip
// file (because a zip is not really reproducible). That hash becomes the
// cache directory, similar to the way rest of zig works
//
// Otherwise, create the package in our cache indexed by hash, and copy
// our bootstrap, zip things up and install the file into zig-out
const bootstrap = bootstrapLocation(self.*) catch |e| {
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
return step.fail("Could not copy output to bootstrap: {}", .{e});
};
const bootstrap_dirname = std.fs.path.dirname(bootstrap).?;
const zipfile_src = try std.fs.path.join(step.owner.allocator, &[_][]const u8{ bootstrap_dirname, self.options.zipfile_name });
self.zipfile_cache_dest = zipfile_src;
self.zipfile_dest = self.step.owner.getInstallPath(.prefix, self.options.zipfile_name);
if (std.fs.copyFileAbsolute(zipfile_src, self.zipfile_dest.?, .{})) |_| {
// we're good here. The zip file exists in cache and has been copied
step.result_cached = true;
} else |_| {
// error, but this is actually the normal case. We will zip the file
// using system zip and store that in cache with the output file for later
// use
// TODO: For Windows, tar.exe can actually do zip files.
// tar -a -cf function.zip file1 [file2...]
//
// See: https://superuser.com/questions/201371/create-zip-folder-from-the-command-line-windows#comment2725283_898508
var child = std.process.Child.init(&[_][]const u8{
"zip",
"-qj9X",
zipfile_src,
bootstrap,
}, self.step.owner.allocator);
child.stdout_behavior = .Ignore;
child.stdin_behavior = .Ignore; // we'll allow stderr through
switch (try child.spawnAndWait()) {
.Exited => |rc| if (rc != 0) return step.fail("Non-zero exit code {} from zip", .{rc}),
.Signal, .Stopped, .Unknown => return step.fail("Abnormal termination from zip step", .{}),
}
try std.fs.copyFileAbsolute(zipfile_src, self.zipfile_dest.?, .{}); // It better be there now
// One last thing. We want to get a Sha256 sum of the zip file, and
// store it in cache. This will help the deployment process compare
// to what's out in AWS, since revision id is apparently trash for these
// purposes
const zipfile = try std.fs.openFileAbsolute(zipfile_src, .{});
defer zipfile.close();
const zip_bytes = try zipfile.readToEndAlloc(step.owner.allocator, 100 * 1024 * 1024);
var hash: [std.crypto.hash.sha2.Sha256.digest_length]u8 = undefined;
std.crypto.hash.sha2.Sha256.hash(zip_bytes, &hash, .{});
const base64 = std.base64.standard.Encoder;
var encoded: [base64.calcSize(std.crypto.hash.sha2.Sha256.digest_length)]u8 = undefined;
const shaoutput = try std.fs.createFileAbsolute(try self.shasumFilePath(), .{});
defer shaoutput.close();
try shaoutput.writeAll(base64.encode(encoded[0..], hash[0..]));
}
}
fn bootstrapLocation(package: Package) ![]const u8 {
const output = package.step.owner.getInstallPath(.bin, package.options.exe.out_filename);
// We will always copy the output file, mainly because we also need the hash...
// if (std.mem.eql(u8, "bootstrap", package.options.exe.out_filename))
// return output; // easy path
// Not so easy...read the file, get a hash of contents, see if it's in cache
const output_file = try std.fs.openFileAbsolute(output, .{});
defer output_file.close();
const output_bytes = try output_file.readToEndAlloc(package.step.owner.allocator, 100 * 1024 * 1024); // 100MB file
// std.Build.Cache.Hasher
// std.Buidl.Cache.hasher_init
var hasher = std.Build.Cache.HashHelper{}; // We'll reuse the same file hasher from cache
hasher.addBytes(output_bytes);
const hash = std.fmt.bytesToHex(hasher.hasher.finalResult(), .lower);
const dest_path = try package.step.owner.cache_root.join(
package.step.owner.allocator,
&[_][]const u8{ "p", hash[0..], "bootstrap" },
);
const dest_file = std.fs.openFileAbsolute(dest_path, .{}) catch null;
if (dest_file) |d| {
d.close();
return dest_path;
}
const pkg_path = try package.step.owner.cache_root.join(
package.step.owner.allocator,
&[_][]const u8{"p"},
);
// Destination file does not exist. Write the bootstrap (after creating the directory)
std.fs.makeDirAbsolute(pkg_path) catch {};
std.fs.makeDirAbsolute(std.fs.path.dirname(dest_path).?) catch {};
const write_file = try std.fs.createFileAbsolute(dest_path, .{});
defer write_file.close();
try write_file.writeAll(output_bytes);
return dest_path;
}

55
lambdabuild/Region.zig Normal file
View file

@ -0,0 +1,55 @@
const std = @import("std");
specified_region: ?[]const u8,
allocator: std.mem.Allocator,
/// internal state, please do not use
_calculated_region: ?[]const u8 = null,
const Region = @This();
pub fn region(self: *Region) ![]const u8 {
if (self.specified_region) |r| return r; // user specified
if (self._calculated_region) |r| return r; // cached
self._calculated_region = try findRegionFromSystem(self.allocator);
return self._calculated_region.?;
}
// AWS_CONFIG_FILE (default is ~/.aws/config
// AWS_DEFAULT_REGION
fn findRegionFromSystem(allocator: std.mem.Allocator) ![]const u8 {
const env_map = try std.process.getEnvMap(allocator);
if (env_map.get("AWS_DEFAULT_REGION")) |r| return r;
const config_file_path = env_map.get("AWS_CONFIG_FILE") orelse
try std.fs.path.join(allocator, &[_][]const u8{
env_map.get("HOME") orelse env_map.get("USERPROFILE").?,
".aws",
"config",
});
const config_file = try std.fs.openFileAbsolute(config_file_path, .{});
defer config_file.close();
const config_bytes = try config_file.readToEndAlloc(allocator, 1024 * 1024);
const profile = env_map.get("AWS_PROFILE") orelse "default";
var line_iterator = std.mem.split(u8, config_bytes, "\n");
var in_profile = false;
while (line_iterator.next()) |line| {
const trimmed = std.mem.trim(u8, line, " \t\r");
if (trimmed.len == 0 or trimmed[0] == '#') continue;
if (!in_profile) {
if (trimmed[0] == '[' and trimmed[trimmed.len - 1] == ']') {
// this is a profile directive!
// std.debug.print("profile: {s}, in file: {s}\n", .{ profile, trimmed[1 .. trimmed.len - 1] });
if (std.mem.eql(u8, profile, trimmed[1 .. trimmed.len - 1])) {
in_profile = true;
}
}
continue; // we're only looking for a profile at this point
}
// look for our region directive
if (trimmed[0] == '[' and trimmed[trimmed.len - 1] == ']')
return error.RegionNotFound; // we've hit another profile without getting our region
if (!std.mem.startsWith(u8, trimmed, "region")) continue;
var equalityiterator = std.mem.split(u8, trimmed, "=");
_ = equalityiterator.next() orelse return error.RegionNotFound;
const raw_val = equalityiterator.next() orelse return error.RegionNotFound;
return try allocator.dupe(u8, std.mem.trimLeft(u8, raw_val, " \t"));
}
return error.RegionNotFound;
}

BIN
lambdabuild/function.zip Normal file

Binary file not shown.

View file

@ -130,51 +130,28 @@ const Event = struct {
// non-ssl), this shouldn't be a big issue // non-ssl), this shouldn't be a big issue
var cl = std.http.Client{ .allocator = self.allocator }; var cl = std.http.Client{ .allocator = self.allocator };
defer cl.deinit(); defer cl.deinit();
const res = cl.fetch(.{
var req = cl.request(.POST, err_uri, .{ .method = .POST,
.payload = content_fmt,
.location = .{ .uri = err_uri },
.extra_headers = &.{ .extra_headers = &.{
.{ .{
.name = "Lambda-Runtime-Function-Error-Type", .name = "Lambda-Runtime-Function-Error-Type",
.value = "HandlerReturned", .value = "HandlerReturned",
}, },
}, },
}) catch |req_err| { }) catch |post_err| { // Well, at this point all we can do is shout at the void
log.err("Error creating request for request id {s}: {}", .{ self.request_id, req_err }); log.err("Error posting response (start) for request id {s}: {}", .{ self.request_id, post_err });
std.posix.exit(1); std.posix.exit(1);
}; };
defer req.deinit(); // TODO: Determine why this post is not returning
if (res.status != .ok) {
req.transfer_encoding = .{ .content_length = content_fmt.len };
var body_writer = req.sendBodyUnflushed(&.{}) catch |send_err| {
log.err("Error sending body for request id {s}: {}", .{ self.request_id, send_err });
std.posix.exit(1);
};
body_writer.writer.writeAll(content_fmt) catch |write_err| {
log.err("Error writing body for request id {s}: {}", .{ self.request_id, write_err });
std.posix.exit(1);
};
body_writer.end() catch |end_err| {
log.err("Error ending body for request id {s}: {}", .{ self.request_id, end_err });
std.posix.exit(1);
};
req.connection.?.flush() catch |flush_err| {
log.err("Error flushing for request id {s}: {}", .{ self.request_id, flush_err });
std.posix.exit(1);
};
var redirect_buffer: [1024]u8 = undefined;
const response = req.receiveHead(&redirect_buffer) catch |recv_err| {
log.err("Error receiving response for request id {s}: {}", .{ self.request_id, recv_err });
std.posix.exit(1);
};
if (response.head.status != .ok) {
// Documentation says something about "exit immediately". The // Documentation says something about "exit immediately". The
// Lambda infrastrucutre restarts, so it's unclear if that's necessary. // Lambda infrastrucutre restarts, so it's unclear if that's necessary.
// It seems as though a continue should be fine, and slightly faster // It seems as though a continue should be fine, and slightly faster
log.err("Post fail: {} {s}", .{ log.err("Post fail: {} {s}", .{
@intFromEnum(response.head.status), @intFromEnum(res.status),
response.head.reason, res.status.phrase() orelse "",
}); });
std.posix.exit(1); std.posix.exit(1);
} }
@ -188,31 +165,20 @@ const Event = struct {
.{ prefix, lambda_runtime_uri, postfix, self.request_id }, .{ prefix, lambda_runtime_uri, postfix, self.request_id },
); );
defer self.allocator.free(response_url); defer self.allocator.free(response_url);
const response_uri = try std.Uri.parse(response_url);
var cl = std.http.Client{ .allocator = self.allocator }; var cl = std.http.Client{ .allocator = self.allocator };
defer cl.deinit(); defer cl.deinit();
// Lambda does different things, depending on the runtime. Go 1.x takes // Lambda does different things, depending on the runtime. Go 1.x takes
// any return value but escapes double quotes. Custom runtimes can // any return value but escapes double quotes. Custom runtimes can
// do whatever they want. node I believe wraps as a json object. We're // do whatever they want. node I believe wraps as a json object. We're
// going to leave the return value up to the handler, and they can // going to leave the return value up to the handler, and they can
// use a seperate API for normalization so we're explicit. As a result, // use a seperate API for normalization so we're explicit. As a result,
// we can just post event_response completely raw here // we can just post event_response completely raw here
const res = try cl.fetch(.{
var req = try cl.request(.POST, response_uri, .{}); .method = .POST,
defer req.deinit(); .payload = event_response,
.location = .{ .url = response_url },
req.transfer_encoding = .{ .content_length = event_response.len }; });
var body_writer = try req.sendBodyUnflushed(&.{}); if (res.status != .ok) return error.UnexpectedStatusFromPostResponse;
try body_writer.writer.writeAll(event_response);
try body_writer.end();
try req.connection.?.flush();
var redirect_buffer: [1024]u8 = undefined;
const response = try req.receiveHead(&redirect_buffer);
if (response.head.status != .ok) return error.UnexpectedStatusFromPostResponse;
} }
}; };
@ -223,32 +189,30 @@ fn getEvent(allocator: std.mem.Allocator, event_data_uri: std.Uri) !?Event {
// non-ssl), this shouldn't be a big issue // non-ssl), this shouldn't be a big issue
var cl = std.http.Client{ .allocator = allocator }; var cl = std.http.Client{ .allocator = allocator };
defer cl.deinit(); defer cl.deinit();
var response_bytes = std.ArrayList(u8).init(allocator);
defer response_bytes.deinit();
var server_header_buffer: [16 * 1024]u8 = undefined;
// Lambda freezes the process at this line of code. During warm start, // Lambda freezes the process at this line of code. During warm start,
// the process will unfreeze and data will be sent in response to client.get // the process will unfreeze and data will be sent in response to client.get
var req = try cl.request(.GET, event_data_uri, .{}); var res = try cl.fetch(.{
defer req.deinit(); .server_header_buffer = &server_header_buffer,
.location = .{ .uri = event_data_uri },
try req.sendBodiless(); .response_storage = .{ .dynamic = &response_bytes },
});
var redirect_buffer: [0]u8 = undefined; if (res.status != .ok) {
var response = try req.receiveHead(&redirect_buffer);
if (response.head.status != .ok) {
// Documentation says something about "exit immediately". The // Documentation says something about "exit immediately". The
// Lambda infrastrucutre restarts, so it's unclear if that's necessary. // Lambda infrastrucutre restarts, so it's unclear if that's necessary.
// It seems as though a continue should be fine, and slightly faster // It seems as though a continue should be fine, and slightly faster
// std.os.exit(1); // std.os.exit(1);
log.err("Lambda server event response returned bad error code: {} {s}", .{ log.err("Lambda server event response returned bad error code: {} {s}", .{
@intFromEnum(response.head.status), @intFromEnum(res.status),
response.head.reason, res.status.phrase() orelse "",
}); });
return error.EventResponseNotOkResponse; return error.EventResponseNotOkResponse;
} }
// Extract request ID from response headers
var request_id: ?[]const u8 = null; var request_id: ?[]const u8 = null;
var header_it = response.head.iterateHeaders(); var header_it = std.http.HeaderIterator.init(server_header_buffer[0..]);
while (header_it.next()) |h| { while (header_it.next()) |h| {
if (std.ascii.eqlIgnoreCase(h.name, "Lambda-Runtime-Aws-Request-Id")) if (std.ascii.eqlIgnoreCase(h.name, "Lambda-Runtime-Aws-Request-Id"))
request_id = h.value; request_id = h.value;
@ -270,30 +234,9 @@ fn getEvent(allocator: std.mem.Allocator, event_data_uri: std.Uri) !?Event {
const req_id = request_id.?; const req_id = request_id.?;
log.debug("got lambda request with id {s}", .{req_id}); log.debug("got lambda request with id {s}", .{req_id});
// Read response body using a transfer buffer
var transfer_buffer: [64 * 1024]u8 = undefined;
const body_reader = response.reader(&transfer_buffer);
// Read all data into an allocated buffer
// We use content_length if available, otherwise read chunks
const content_len = response.head.content_length orelse (10 * 1024 * 1024); // 10MB max if not specified
var event_data = try allocator.alloc(u8, content_len);
errdefer allocator.free(event_data);
var total_read: usize = 0;
while (total_read < content_len) {
const remaining = event_data[total_read..];
const bytes_read = body_reader.readSliceShort(remaining) catch |err| switch (err) {
error.ReadFailed => return error.ReadFailed,
};
if (bytes_read == 0) break;
total_read += bytes_read;
}
event_data = try allocator.realloc(event_data, total_read);
return Event.init( return Event.init(
allocator, allocator,
event_data, try response_bytes.toOwnedSlice(),
try allocator.dupe(u8, req_id), try allocator.dupe(u8, req_id),
); );
} }
@ -338,6 +281,15 @@ fn threadMain(allocator: std.mem.Allocator) !void {
// when it's time to shut down // when it's time to shut down
while (server_remaining_requests > 0) { while (server_remaining_requests > 0) {
server_remaining_requests -= 1; server_remaining_requests -= 1;
// defer {
// if (!arena.reset(.{ .retain_capacity = {} })) {
// // reallocation failed, arena is degraded
// log.warn("Arena reset failed and is degraded. Resetting arena", .{});
// arena.deinit();
// arena = std.heap.ArenaAllocator.init(allocator);
// aa = arena.allocator();
// }
// }
processRequest(aa, &http_server) catch |e| { processRequest(aa, &http_server) catch |e| {
log.err("Unexpected error processing request: {any}", .{e}); log.err("Unexpected error processing request: {any}", .{e});
@ -360,54 +312,42 @@ fn processRequest(allocator: std.mem.Allocator, server: *std.net.Server) !void {
server_ready = false; server_ready = false;
var read_buffer: [1024 * 16]u8 = undefined; var read_buffer: [1024 * 16]u8 = undefined;
var write_buffer: [1024 * 16]u8 = undefined; var http_server = std.http.Server.init(connection, &read_buffer);
var stream_reader = std.net.Stream.Reader.init(connection.stream, &read_buffer);
var stream_writer = std.net.Stream.Writer.init(connection.stream, &write_buffer);
var http_server = std.http.Server.init(stream_reader.interface(), &stream_writer.interface); if (http_server.state == .ready) {
var request = http_server.receiveHead() catch |err| switch (err) {
const request = http_server.receiveHead() catch |err| switch (err) {
error.HttpConnectionClosing => return, error.HttpConnectionClosing => return,
else => { else => {
std.log.err("closing http connection: {s}", .{@errorName(err)}); std.log.err("closing http connection: {s}", .{@errorName(err)});
std.log.debug("Error occurred from this request: \n{s}", .{read_buffer[0..http_server.read_buffer_len]});
return; return;
}, },
}; };
server_request_aka_lambda_response = try (try request.reader()).readAllAlloc(allocator, std.math.maxInt(usize));
// Read request body if present var respond_options = std.http.Server.Request.RespondOptions{};
if (request.head.content_length) |content_len| { const response_bytes = serve(allocator, request, &respond_options) catch |e| brk: {
if (content_len > 0) { respond_options.status = .internal_server_error;
var body_transfer_buffer: [64 * 1024]u8 = undefined; // TODO: more about this particular request
const body_reader = http_server.reader.bodyReader(&body_transfer_buffer, request.head.transfer_encoding, request.head.content_length); log.err("Unexpected error from executor processing request: {any}", .{e});
var body_data = try allocator.alloc(u8, content_len); if (@errorReturnTrace()) |trace| {
errdefer allocator.free(body_data); std.debug.dumpStackTrace(trace.*);
var total_read: usize = 0;
while (total_read < content_len) {
const remaining = body_data[total_read..];
const bytes_read = body_reader.readSliceShort(remaining) catch break;
if (bytes_read == 0) break;
total_read += bytes_read;
} }
server_request_aka_lambda_response = try allocator.realloc(body_data, total_read); break :brk "Unexpected error generating request to lambda";
} };
} try request.respond(response_bytes, respond_options);
// Build and send response
const response_bytes = serve();
var respond_request = request;
try respond_request.respond(response_bytes, .{
.extra_headers = &.{
.{ .name = "Lambda-Runtime-Aws-Request-Id", .value = "69" },
},
});
log.debug( log.debug(
"tid {d} (server): sent response: {s}", "tid {d} (server): sent response: {s}",
.{ std.Thread.getCurrentId(), response_bytes }, .{ std.Thread.getCurrentId(), response_bytes },
); );
}
} }
fn serve() []const u8 { fn serve(allocator: std.mem.Allocator, request: std.http.Server.Request, respond_options: *std.http.Server.Request.RespondOptions) ![]const u8 {
_ = allocator;
_ = request;
respond_options.extra_headers = &.{
.{ .name = "Lambda-Runtime-Aws-Request-Id", .value = "69" },
};
return server_response; return server_response;
} }
@ -451,7 +391,7 @@ pub fn test_lambda_request(allocator: std.mem.Allocator, request: []const u8, re
// when subsequent tests fail // when subsequent tests fail
const server_thread = try startServer(aa); // start the server, get it ready const server_thread = try startServer(aa); // start the server, get it ready
while (!server_ready) while (!server_ready)
std.Thread.sleep(100); std.time.sleep(100);
log.debug("tid {d} (main): server reports ready", .{std.Thread.getCurrentId()}); log.debug("tid {d} (main): server reports ready", .{std.Thread.getCurrentId()});
// we aren't testing the server, // we aren't testing the server,

View file

@ -1,49 +0,0 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
const target = b.standardTargetOptions(.{});
const optimize = b.standardOptimizeOption(.{});
// Create the main module for the CLI
const main_module = b.createModule(.{
.root_source_file = b.path("src/main.zig"),
.target = target,
.optimize = optimize,
});
// Add aws dependency to the module
const aws_dep = b.dependency("aws", .{ .target = target, .optimize = optimize });
main_module.addImport("aws", aws_dep.module("aws"));
const exe = b.addExecutable(.{
.name = "lambda-build",
.root_module = main_module,
});
b.installArtifact(exe);
// Run step for testing: zig build run -- package --exe /path/to/exe --output /path/to/out.zip
const run_cmd = b.addRunArtifact(exe);
run_cmd.step.dependOn(b.getInstallStep());
if (b.args) |args|
run_cmd.addArgs(args);
const run_step = b.step("run", "Run the CLI");
run_step.dependOn(&run_cmd.step);
// Test step
const test_module = b.createModule(.{
.root_source_file = b.path("src/main.zig"),
.target = target,
.optimize = optimize,
});
test_module.addImport("aws", aws_dep.module("aws"));
const unit_tests = b.addTest(.{
.root_module = test_module,
});
const run_unit_tests = b.addRunArtifact(unit_tests);
const test_step = b.step("test", "Run unit tests");
test_step.dependOn(&run_unit_tests.step);
}

View file

@ -1,16 +0,0 @@
.{
.name = .lambda_build,
.version = "0.1.0",
.fingerprint = 0x6e61de08e7e51114,
.dependencies = .{
.aws = .{
.url = "git+https://git.lerch.org/lobo/aws-sdk-for-zig.git#6e34e83933aaa1120b7d0049f458608fdd6fa27b",
.hash = "aws-0.0.1-SbsFcCs2CgChIYNjD8lAnGZTOk0_96-db5kDawRnzImA",
},
},
.paths = .{
"build.zig",
"build.zig.zon",
"src",
},
}

View file

@ -1,251 +0,0 @@
//! Deploy command - deploys a Lambda function to AWS.
//!
//! Creates a new function or updates an existing one.
const std = @import("std");
const aws = @import("aws");
const iam_cmd = @import("iam.zig");
const RunOptions = @import("main.zig").RunOptions;
pub fn run(args: []const []const u8, options: RunOptions) !void {
var function_name: ?[]const u8 = null;
var zip_file: ?[]const u8 = null;
var role_arn: ?[]const u8 = null;
var role_name: []const u8 = "lambda_basic_execution";
var arch: ?[]const u8 = null;
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (std.mem.eql(u8, arg, "--function-name")) {
i += 1;
if (i >= args.len) return error.MissingFunctionName;
function_name = args[i];
} else if (std.mem.eql(u8, arg, "--zip-file")) {
i += 1;
if (i >= args.len) return error.MissingZipFile;
zip_file = args[i];
} else if (std.mem.eql(u8, arg, "--role-arn")) {
i += 1;
if (i >= args.len) return error.MissingRoleArn;
role_arn = args[i];
} else if (std.mem.eql(u8, arg, "--role-name")) {
i += 1;
if (i >= args.len) return error.MissingRoleName;
role_name = args[i];
} else if (std.mem.eql(u8, arg, "--arch")) {
i += 1;
if (i >= args.len) return error.MissingArch;
arch = args[i];
} else if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) {
printHelp(options.stdout);
try options.stdout.flush();
return;
} else {
try options.stderr.print("Unknown option: {s}\n", .{arg});
try options.stderr.flush();
return error.UnknownOption;
}
}
if (function_name == null) {
try options.stderr.print("Error: --function-name is required\n", .{});
printHelp(options.stderr);
try options.stderr.flush();
return error.MissingFunctionName;
}
if (zip_file == null) {
try options.stderr.print("Error: --zip-file is required\n", .{});
printHelp(options.stderr);
try options.stderr.flush();
return error.MissingZipFile;
}
try deployFunction(.{
.function_name = function_name.?,
.zip_file = zip_file.?,
.role_arn = role_arn,
.role_name = role_name,
.arch = arch,
}, options);
}
fn printHelp(writer: *std.Io.Writer) void {
writer.print(
\\Usage: lambda-build deploy [options]
\\
\\Deploy a Lambda function to AWS.
\\
\\Options:
\\ --function-name <name> Name of the Lambda function (required)
\\ --zip-file <path> Path to the deployment zip (required)
\\ --role-arn <arn> IAM role ARN (optional - creates role if omitted)
\\ --role-name <name> IAM role name if creating (default: lambda_basic_execution)
\\ --arch <arch> Architecture: x86_64 or aarch64 (default: x86_64)
\\ --help, -h Show this help message
\\
\\If the function exists, its code is updated. Otherwise, a new function
\\is created with the provided configuration.
\\
, .{}) catch {};
}
const DeployOptions = struct {
function_name: []const u8,
zip_file: []const u8,
role_arn: ?[]const u8,
role_name: []const u8,
arch: ?[]const u8,
};
fn deployFunction(deploy_opts: DeployOptions, options: RunOptions) !void {
// Validate architecture
const arch_str = deploy_opts.arch orelse "x86_64";
if (!std.mem.eql(u8, arch_str, "x86_64") and !std.mem.eql(u8, arch_str, "aarch64") and !std.mem.eql(u8, arch_str, "arm64")) {
return error.InvalidArchitecture;
}
// Note: Profile is expected to be set via AWS_PROFILE env var before invoking this tool
// (e.g., via aws-vault exec)
// Get or create IAM role if not provided
const role_arn = if (deploy_opts.role_arn) |r|
try options.allocator.dupe(u8, r)
else
try iam_cmd.getOrCreateRole(deploy_opts.role_name, options);
defer options.allocator.free(role_arn);
// Read the zip file and encode as base64
const zip_file = try std.fs.cwd().openFile(deploy_opts.zip_file, .{});
defer zip_file.close();
var read_buffer: [4096]u8 = undefined;
var file_reader = zip_file.reader(&read_buffer);
const zip_data = try file_reader.interface.allocRemaining(options.allocator, std.Io.Limit.limited(50 * 1024 * 1024));
defer options.allocator.free(zip_data);
const base64_data = try std.fmt.allocPrint(options.allocator, "{b64}", .{zip_data});
defer options.allocator.free(base64_data);
var client = aws.Client.init(options.allocator, .{});
defer client.deinit();
const services = aws.Services(.{.lambda}){};
const region = options.region orelse "us-east-1";
const aws_options = aws.Options{
.client = client,
.region = region,
};
// Convert arch string to Lambda format
const lambda_arch = if (std.mem.eql(u8, arch_str, "aarch64") or std.mem.eql(u8, arch_str, "arm64"))
"arm64"
else
"x86_64";
const architectures: []const []const u8 = &.{lambda_arch};
// Try to create the function first - if it already exists, we'll update it
std.log.info("Attempting to create function: {s}", .{deploy_opts.function_name});
var create_diagnostics = aws.Diagnostics{
.http_code = undefined,
.response_body = undefined,
.allocator = options.allocator,
};
const create_options = aws.Options{
.client = client,
.region = region,
.diagnostics = &create_diagnostics,
};
const create_result = aws.Request(services.lambda.create_function).call(.{
.function_name = deploy_opts.function_name,
.architectures = @constCast(architectures),
.code = .{ .zip_file = base64_data },
.handler = "bootstrap",
.package_type = "Zip",
.runtime = "provided.al2023",
.role = role_arn,
}, create_options) catch |err| {
defer create_diagnostics.deinit();
std.log.info("CreateFunction returned: error={}, HTTP code={}", .{ err, create_diagnostics.http_code });
// Function already exists (409 Conflict) - update it instead
if (create_diagnostics.http_code == 409) {
std.log.info("Function already exists, updating: {s}", .{deploy_opts.function_name});
const update_result = try aws.Request(services.lambda.update_function_code).call(.{
.function_name = deploy_opts.function_name,
.architectures = @constCast(architectures),
.zip_file = base64_data,
}, aws_options);
defer update_result.deinit();
try options.stdout.print("Updated function: {s}\n", .{deploy_opts.function_name});
if (update_result.response.function_arn) |arn| {
try options.stdout.print("ARN: {s}\n", .{arn});
}
try options.stdout.flush();
// Wait for function to be ready before returning
try waitForFunctionReady(deploy_opts.function_name, aws_options);
return;
}
std.log.err("Lambda CreateFunction failed: {} (HTTP {})", .{ err, create_diagnostics.http_code });
return error.LambdaCreateFunctionFailed;
};
defer create_result.deinit();
try options.stdout.print("Created function: {s}\n", .{deploy_opts.function_name});
if (create_result.response.function_arn) |arn| {
try options.stdout.print("ARN: {s}\n", .{arn});
}
try options.stdout.flush();
// Wait for function to be ready before returning
try waitForFunctionReady(deploy_opts.function_name, aws_options);
}
fn waitForFunctionReady(function_name: []const u8, aws_options: aws.Options) !void {
const services = aws.Services(.{.lambda}){};
var retries: usize = 30; // Up to ~6 seconds total
while (retries > 0) : (retries -= 1) {
const result = aws.Request(services.lambda.get_function).call(.{
.function_name = function_name,
}, aws_options) catch |err| {
// Function should exist at this point, but retry on transient errors
std.log.warn("GetFunction failed during wait: {}", .{err});
std.Thread.sleep(200 * std.time.ns_per_ms);
continue;
};
defer result.deinit();
// Check if function is ready
if (result.response.configuration) |config| {
if (config.last_update_status) |status| {
if (std.mem.eql(u8, status, "Successful")) {
std.log.info("Function is ready", .{});
return;
} else if (std.mem.eql(u8, status, "Failed")) {
return error.FunctionUpdateFailed;
}
// "InProgress" - keep waiting
} else {
return; // No status means it's ready
}
} else {
return; // No configuration means we can't check, assume ready
}
std.Thread.sleep(200 * std.time.ns_per_ms);
}
return error.FunctionNotReady;
}

View file

@ -1,148 +0,0 @@
//! IAM command - creates or retrieves an IAM role for Lambda execution.
const std = @import("std");
const aws = @import("aws");
const RunOptions = @import("main.zig").RunOptions;
pub fn run(args: []const []const u8, options: RunOptions) !void {
var role_name: ?[]const u8 = null;
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (std.mem.eql(u8, arg, "--role-name")) {
i += 1;
if (i >= args.len) return error.MissingRoleName;
role_name = args[i];
} else if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) {
printHelp(options.stdout);
try options.stdout.flush();
return;
} else {
try options.stderr.print("Unknown option: {s}\n", .{arg});
try options.stderr.flush();
return error.UnknownOption;
}
}
if (role_name == null) {
try options.stderr.print("Error: --role-name is required\n", .{});
printHelp(options.stderr);
try options.stderr.flush();
return error.MissingRoleName;
}
const arn = try getOrCreateRole(role_name.?, options);
defer options.allocator.free(arn);
try options.stdout.print("{s}\n", .{arn});
try options.stdout.flush();
}
fn printHelp(writer: *std.Io.Writer) void {
writer.print(
\\Usage: lambda-build iam [options]
\\
\\Create or retrieve an IAM role for Lambda execution.
\\
\\Options:
\\ --role-name <name> Name of the IAM role (required)
\\ --help, -h Show this help message
\\
\\If the role exists, its ARN is returned. If not, a new role is created
\\with the AWSLambdaExecute policy attached.
\\
, .{}) catch {};
}
/// Get or create an IAM role for Lambda execution
/// Returns the role ARN
pub fn getOrCreateRole(role_name: []const u8, options: RunOptions) ![]const u8 {
// Note: Profile is expected to be set via AWS_PROFILE env var before invoking this tool
// (e.g., via aws-vault exec)
var client = aws.Client.init(options.allocator, .{});
defer client.deinit();
// Try to get existing role
const services = aws.Services(.{.iam}){};
var diagnostics = aws.Diagnostics{
.http_code = undefined,
.response_body = undefined,
.allocator = options.allocator,
};
const region = options.region orelse "us-east-1"; // IAM is global, but needs a region for signing
_ = region;
const aws_options = aws.Options{
.client = client,
.diagnostics = &diagnostics,
};
const get_result = aws.Request(services.iam.get_role).call(.{
.role_name = role_name,
}, aws_options) catch |err| {
defer diagnostics.deinit();
if (diagnostics.http_code == 404) {
// Role doesn't exist, create it
return try createRole(options.allocator, role_name, client);
}
std.log.err("IAM GetRole failed: {} (HTTP {})", .{ err, diagnostics.http_code });
return error.IamGetRoleFailed;
};
defer get_result.deinit();
// Role exists, return ARN
return try options.allocator.dupe(u8, get_result.response.role.arn);
}
fn createRole(allocator: std.mem.Allocator, role_name: []const u8, client: aws.Client) ![]const u8 {
const services = aws.Services(.{.iam}){};
const aws_options = aws.Options{
.client = client,
};
const assume_role_policy =
\\{
\\ "Version": "2012-10-17",
\\ "Statement": [
\\ {
\\ "Sid": "",
\\ "Effect": "Allow",
\\ "Principal": {
\\ "Service": "lambda.amazonaws.com"
\\ },
\\ "Action": "sts:AssumeRole"
\\ }
\\ ]
\\}
;
std.log.info("Creating IAM role: {s}", .{role_name});
const create_result = try aws.Request(services.iam.create_role).call(.{
.role_name = role_name,
.assume_role_policy_document = assume_role_policy,
}, aws_options);
defer create_result.deinit();
const arn = try allocator.dupe(u8, create_result.response.role.arn);
// Attach the Lambda execution policy
std.log.info("Attaching AWSLambdaExecute policy", .{});
const attach_result = try aws.Request(services.iam.attach_role_policy).call(.{
.policy_arn = "arn:aws:iam::aws:policy/AWSLambdaExecute",
.role_name = role_name,
}, aws_options);
defer attach_result.deinit();
// IAM role creation can take a moment to propagate
std.log.info("Role created: {s}", .{arn});
std.log.info("Note: New roles may take a few seconds to propagate", .{});
return arn;
}

View file

@ -1,105 +0,0 @@
//! Invoke command - invokes a Lambda function.
const std = @import("std");
const aws = @import("aws");
const RunOptions = @import("main.zig").RunOptions;
pub fn run(args: []const []const u8, options: RunOptions) !void {
var function_name: ?[]const u8 = null;
var payload: []const u8 = "{}";
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (std.mem.eql(u8, arg, "--function-name")) {
i += 1;
if (i >= args.len) return error.MissingFunctionName;
function_name = args[i];
} else if (std.mem.eql(u8, arg, "--payload")) {
i += 1;
if (i >= args.len) return error.MissingPayload;
payload = args[i];
} else if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) {
printHelp(options.stdout);
try options.stdout.flush();
return;
} else {
try options.stderr.print("Unknown option: {s}\n", .{arg});
try options.stderr.flush();
return error.UnknownOption;
}
}
if (function_name == null) {
try options.stderr.print("Error: --function-name is required\n", .{});
printHelp(options.stderr);
try options.stderr.flush();
return error.MissingFunctionName;
}
try invokeFunction(function_name.?, payload, options);
}
fn printHelp(writer: *std.Io.Writer) void {
writer.print(
\\Usage: lambda-build invoke [options]
\\
\\Invoke a Lambda function.
\\
\\Options:
\\ --function-name <name> Name of the Lambda function (required)
\\ --payload <json> JSON payload to send (default: empty object)
\\ --help, -h Show this help message
\\
\\The function response is printed to stdout.
\\
, .{}) catch {};
}
fn invokeFunction(function_name: []const u8, payload: []const u8, options: RunOptions) !void {
// Note: Profile is expected to be set via AWS_PROFILE env var before invoking this tool
// (e.g., via aws-vault exec)
var client = aws.Client.init(options.allocator, .{});
defer client.deinit();
const services = aws.Services(.{.lambda}){};
const region = options.region orelse "us-east-1";
const aws_options = aws.Options{
.client = client,
.region = region,
};
std.log.info("Invoking function: {s}", .{function_name});
const result = try aws.Request(services.lambda.invoke).call(.{
.function_name = function_name,
.payload = payload,
.log_type = "Tail",
.invocation_type = "RequestResponse",
}, aws_options);
defer result.deinit();
// Print response payload
if (result.response.payload) |response_payload| {
try options.stdout.print("{s}\n", .{response_payload});
}
// Print function error if any
if (result.response.function_error) |func_error| {
try options.stdout.print("Function error: {s}\n", .{func_error});
}
// Print logs if available (base64 decoded)
if (result.response.log_result) |log_result| {
const decoder = std.base64.standard.Decoder;
const decoded_len = try decoder.calcSizeForSlice(log_result);
const decoded = try options.allocator.alloc(u8, decoded_len);
defer options.allocator.free(decoded);
try decoder.decode(decoded, log_result);
try options.stdout.print("\n--- Logs ---\n{s}\n", .{decoded});
}
try options.stdout.flush();
}

View file

@ -1,135 +0,0 @@
//! Lambda Build CLI
//!
//! A command-line tool for packaging, deploying, and invoking AWS Lambda functions.
//!
//! Usage: lambda-build <command> [options]
//!
//! Commands:
//! package Create deployment zip from executable
//! iam Create/verify IAM role for Lambda
//! deploy Deploy function to AWS Lambda
//! invoke Invoke the deployed function
const std = @import("std");
const package = @import("package.zig");
const iam_cmd = @import("iam.zig");
const deploy_cmd = @import("deploy.zig");
const invoke_cmd = @import("invoke.zig");
/// Options passed to all commands
pub const RunOptions = struct {
allocator: std.mem.Allocator,
stdout: *std.Io.Writer,
stderr: *std.Io.Writer,
region: ?[]const u8 = null,
profile: ?[]const u8 = null,
};
pub fn main() !u8 {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
var stdout_buffer: [4096]u8 = undefined;
var stderr_buffer: [4096]u8 = undefined;
var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer);
var stderr_writer = std.fs.File.stderr().writer(&stderr_buffer);
var options = RunOptions{
.allocator = allocator,
.stdout = &stdout_writer.interface,
.stderr = &stderr_writer.interface,
};
run(&options) catch |err| {
options.stderr.print("Error: {}\n", .{err}) catch {};
options.stderr.flush() catch {};
return 1;
};
try options.stderr.flush();
try options.stdout.flush();
return 0;
}
fn run(options: *RunOptions) !void {
const args = try std.process.argsAlloc(options.allocator);
defer std.process.argsFree(options.allocator, args);
if (args.len < 2) {
printUsage(options.stderr);
try options.stderr.flush();
return error.MissingCommand;
}
// Parse global options and find command
var cmd_start: usize = 1;
while (cmd_start < args.len) {
const arg = args[cmd_start];
if (std.mem.eql(u8, arg, "--region")) {
cmd_start += 1;
if (cmd_start >= args.len) return error.MissingRegionValue;
options.region = args[cmd_start];
cmd_start += 1;
} else if (std.mem.eql(u8, arg, "--profile")) {
cmd_start += 1;
if (cmd_start >= args.len) return error.MissingProfileValue;
options.profile = args[cmd_start];
cmd_start += 1;
} else if (std.mem.startsWith(u8, arg, "--")) {
// Unknown global option - might be command-specific, let command handle it
break;
} else {
// Found command
break;
}
}
if (cmd_start >= args.len) {
printUsage(options.stderr);
try options.stderr.flush();
return error.MissingCommand;
}
const command = args[cmd_start];
const cmd_args = args[cmd_start + 1 ..];
if (std.mem.eql(u8, command, "package")) {
try package.run(cmd_args, options.*);
} else if (std.mem.eql(u8, command, "iam")) {
try iam_cmd.run(cmd_args, options.*);
} else if (std.mem.eql(u8, command, "deploy")) {
try deploy_cmd.run(cmd_args, options.*);
} else if (std.mem.eql(u8, command, "invoke")) {
try invoke_cmd.run(cmd_args, options.*);
} else if (std.mem.eql(u8, command, "--help") or std.mem.eql(u8, command, "-h")) {
printUsage(options.stdout);
try options.stdout.flush();
} else {
options.stderr.print("Unknown command: {s}\n\n", .{command}) catch {};
printUsage(options.stderr);
try options.stderr.flush();
return error.UnknownCommand;
}
}
fn printUsage(writer: *std.Io.Writer) void {
writer.print(
\\Usage: lambda-build [global-options] <command> [options]
\\
\\Lambda deployment CLI tool
\\
\\Global Options:
\\ --region <region> AWS region (default: from AWS config)
\\ --profile <profile> AWS profile to use
\\
\\Commands:
\\ package Create deployment zip from executable
\\ iam Create/verify IAM role for Lambda
\\ deploy Deploy function to AWS Lambda
\\ invoke Invoke the deployed function
\\
\\Run 'lambda-build <command> --help' for command-specific options.
\\
, .{}) catch {};
}

View file

@ -1,265 +0,0 @@
//! Package command - creates a Lambda deployment zip from an executable.
//!
//! The zip file contains a single file named "bootstrap" (Lambda's expected name
//! for custom runtime executables).
//!
//! Note: Uses "store" (uncompressed) format because Zig 0.15's std.compress.flate.Compress
//! has incomplete implementation (drain function panics with TODO). When the compression
//! implementation is completed, this should use deflate level 6.
const std = @import("std");
const zip = std.zip;
const RunOptions = @import("main.zig").RunOptions;
pub fn run(args: []const []const u8, options: RunOptions) !void {
var exe_path: ?[]const u8 = null;
var output_path: ?[]const u8 = null;
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (std.mem.eql(u8, arg, "--exe")) {
i += 1;
if (i >= args.len) return error.MissingExePath;
exe_path = args[i];
} else if (std.mem.eql(u8, arg, "--output") or std.mem.eql(u8, arg, "-o")) {
i += 1;
if (i >= args.len) return error.MissingOutputPath;
output_path = args[i];
} else if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) {
printHelp(options.stdout);
try options.stdout.flush();
return;
} else {
try options.stderr.print("Unknown option: {s}\n", .{arg});
try options.stderr.flush();
return error.UnknownOption;
}
}
if (exe_path == null) {
try options.stderr.print("Error: --exe is required\n", .{});
printHelp(options.stderr);
try options.stderr.flush();
return error.MissingExePath;
}
if (output_path == null) {
try options.stderr.print("Error: --output is required\n", .{});
printHelp(options.stderr);
try options.stderr.flush();
return error.MissingOutputPath;
}
try createLambdaZip(options.allocator, exe_path.?, output_path.?);
try options.stdout.print("Created {s}\n", .{output_path.?});
}
fn printHelp(writer: *std.Io.Writer) void {
writer.print(
\\Usage: lambda-build package [options]
\\
\\Create a Lambda deployment zip from an executable.
\\
\\Options:
\\ --exe <path> Path to the executable (required)
\\ --output, -o <path> Output zip file path (required)
\\ --help, -h Show this help message
\\
\\The executable will be packaged as 'bootstrap' in the zip file,
\\which is the expected name for Lambda custom runtimes.
\\
, .{}) catch {};
}
/// Helper to write a little-endian u16
fn writeU16LE(file: std.fs.File, value: u16) !void {
const bytes = std.mem.toBytes(std.mem.nativeToLittle(u16, value));
try file.writeAll(&bytes);
}
/// Helper to write a little-endian u32
fn writeU32LE(file: std.fs.File, value: u32) !void {
const bytes = std.mem.toBytes(std.mem.nativeToLittle(u32, value));
try file.writeAll(&bytes);
}
/// Create a Lambda deployment zip file containing a single "bootstrap" executable.
/// Currently uses "store" (uncompressed) format because Zig 0.15's std.compress.flate.Compress
/// has incomplete implementation.
/// TODO: Add deflate compression (level 6) when the Compress implementation is completed.
fn createLambdaZip(allocator: std.mem.Allocator, exe_path: []const u8, output_path: []const u8) !void {
// Read the executable
const exe_file = try std.fs.cwd().openFile(exe_path, .{});
defer exe_file.close();
const exe_stat = try exe_file.stat();
const exe_size: u32 = @intCast(exe_stat.size);
// Allocate buffer and read file contents
const exe_data = try allocator.alloc(u8, exe_size);
defer allocator.free(exe_data);
const bytes_read = try exe_file.readAll(exe_data);
if (bytes_read != exe_size) return error.IncompleteRead;
// Calculate CRC32 of uncompressed data
const crc = std.hash.crc.Crc32IsoHdlc.hash(exe_data);
// Create the output file
const out_file = try std.fs.cwd().createFile(output_path, .{});
defer out_file.close();
const filename = "bootstrap";
const filename_len: u16 = @intCast(filename.len);
// Reproducible zip files: use fixed timestamp
// September 26, 1995 at midnight (00:00:00)
// DOS time format: bits 0-4: seconds/2, bits 5-10: minute, bits 11-15: hour
// DOS date format: bits 0-4: day, bits 5-8: month, bits 9-15: year-1980
//
// Note: We use a fixed timestamp for reproducible builds.
//
// If current time is needed in the future:
// const now = std.time.timestamp();
// const epoch_secs: std.time.epoch.EpochSeconds = .{ .secs = @intCast(now) };
// const day_secs = epoch_secs.getDaySeconds();
// const year_day = epoch_secs.getEpochDay().calculateYearDay();
// const mod_time: u16 = @as(u16, day_secs.getHoursIntoDay()) << 11 |
// @as(u16, day_secs.getMinutesIntoHour()) << 5 |
// @as(u16, day_secs.getSecondsIntoMinute() / 2);
// const month_day = year_day.calculateMonthDay();
// const mod_date: u16 = @as(u16, year_day.year -% 1980) << 9 |
// @as(u16, @intFromEnum(month_day.month)) << 5 |
// @as(u16, month_day.day_index + 1);
// 1995-09-26 midnight for reproducible builds
const mod_time: u16 = 0x0000; // 00:00:00
const mod_date: u16 = (15 << 9) | (9 << 5) | 26; // 1995-09-26 (year 15 = 1995-1980)
// Local file header
try out_file.writeAll(&zip.local_file_header_sig);
try writeU16LE(out_file, 10); // version needed (1.0 for store)
try writeU16LE(out_file, 0); // general purpose flags
try writeU16LE(out_file, @intFromEnum(zip.CompressionMethod.store)); // store (no compression)
try writeU16LE(out_file, mod_time);
try writeU16LE(out_file, mod_date);
try writeU32LE(out_file, crc);
try writeU32LE(out_file, exe_size); // compressed size = uncompressed for store
try writeU32LE(out_file, exe_size); // uncompressed size
try writeU16LE(out_file, filename_len);
try writeU16LE(out_file, 0); // extra field length
try out_file.writeAll(filename);
// File data (uncompressed)
const local_header_end = 30 + filename_len;
try out_file.writeAll(exe_data);
// Central directory file header
const cd_offset = local_header_end + exe_size;
try out_file.writeAll(&zip.central_file_header_sig);
try writeU16LE(out_file, 0x031e); // version made by (Unix, 3.0)
try writeU16LE(out_file, 10); // version needed (1.0 for store)
try writeU16LE(out_file, 0); // general purpose flags
try writeU16LE(out_file, @intFromEnum(zip.CompressionMethod.store)); // store
try writeU16LE(out_file, mod_time);
try writeU16LE(out_file, mod_date);
try writeU32LE(out_file, crc);
try writeU32LE(out_file, exe_size); // compressed size
try writeU32LE(out_file, exe_size); // uncompressed size
try writeU16LE(out_file, filename_len);
try writeU16LE(out_file, 0); // extra field length
try writeU16LE(out_file, 0); // file comment length
try writeU16LE(out_file, 0); // disk number start
try writeU16LE(out_file, 0); // internal file attributes
try writeU32LE(out_file, 0o100755 << 16); // external file attributes (Unix executable)
try writeU32LE(out_file, 0); // relative offset of local header
try out_file.writeAll(filename);
// End of central directory record
const cd_size: u32 = 46 + filename_len;
try out_file.writeAll(&zip.end_record_sig);
try writeU16LE(out_file, 0); // disk number
try writeU16LE(out_file, 0); // disk number with CD
try writeU16LE(out_file, 1); // number of entries on disk
try writeU16LE(out_file, 1); // total number of entries
try writeU32LE(out_file, cd_size); // size of central directory
try writeU32LE(out_file, cd_offset); // offset of central directory
try writeU16LE(out_file, 0); // comment length
}
test "create zip with test data" {
const allocator = std.testing.allocator;
// Create a temporary test file
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const test_content = "#!/bin/sh\necho hello";
const test_exe = try tmp_dir.dir.createFile("test_exe", .{});
try test_exe.writeAll(test_content);
test_exe.close();
const exe_path = try tmp_dir.dir.realpathAlloc(allocator, "test_exe");
defer allocator.free(exe_path);
const output_path = try tmp_dir.dir.realpathAlloc(allocator, ".");
defer allocator.free(output_path);
const full_output = try std.fs.path.join(allocator, &.{ output_path, "test.zip" });
defer allocator.free(full_output);
try createLambdaZip(allocator, exe_path, full_output);
// Verify the zip file can be read by std.zip
const zip_file = try std.fs.cwd().openFile(full_output, .{});
defer zip_file.close();
var read_buffer: [4096]u8 = undefined;
var file_reader = zip_file.reader(&read_buffer);
var iter = try zip.Iterator.init(&file_reader);
// Should have exactly one entry
const entry = try iter.next();
try std.testing.expect(entry != null);
const e = entry.?;
// Verify filename length is 9 ("bootstrap")
try std.testing.expectEqual(@as(u32, 9), e.filename_len);
// Verify compression method is store
try std.testing.expectEqual(zip.CompressionMethod.store, e.compression_method);
// Verify sizes match test content
try std.testing.expectEqual(@as(u64, test_content.len), e.uncompressed_size);
try std.testing.expectEqual(@as(u64, test_content.len), e.compressed_size);
// Verify CRC32 matches
const expected_crc = std.hash.crc.Crc32IsoHdlc.hash(test_content);
try std.testing.expectEqual(expected_crc, e.crc32);
// Verify no more entries
const next_entry = try iter.next();
try std.testing.expect(next_entry == null);
// Extract and verify contents
var extract_dir = std.testing.tmpDir(.{});
defer extract_dir.cleanup();
// Reset file reader position
try file_reader.seekTo(0);
var filename_buf: [std.fs.max_path_bytes]u8 = undefined;
try e.extract(&file_reader, .{}, &filename_buf, extract_dir.dir);
// Read extracted file and verify contents
const extracted = try extract_dir.dir.openFile("bootstrap", .{});
defer extracted.close();
var extracted_content: [1024]u8 = undefined;
const bytes_read = try extracted.readAll(&extracted_content);
try std.testing.expectEqualStrings(test_content, extracted_content[0..bytes_read]);
}