Compare commits

..

No commits in common. "master" and "master" have entirely different histories.

20 changed files with 274 additions and 3094 deletions

View file

@ -1,37 +0,0 @@
name: Lambda-Zig Build
run-name: ${{ github.actor }} building lambda-zig
on:
push:
branches:
- '*'
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Check out repository code
uses: actions/checkout@v4
- name: Setup Zig
uses: https://codeberg.org/mlugg/setup-zig@v2.2.1
- name: Build
run: zig build --summary all
- name: Run tests
run: zig build test --summary all
- name: Build for other platforms
run: |
zig build -Dtarget=aarch64-linux
zig build -Dtarget=x86_64-linux
- name: Notify
uses: https://git.lerch.org/lobo/action-notify-ntfy@v2
if: always()
with:
host: ${{ secrets.NTFY_HOST }}
topic: ${{ secrets.NTFY_TOPIC }}
user: ${{ secrets.NTFY_USER }}
password: ${{ secrets.NTFY_PASSWORD }}

3
.gitignore vendored
View file

@ -1,3 +1,4 @@
.gyro/
zig-cache/
zig-out/
.zig-cache
deps.zig

View file

@ -1,5 +0,0 @@
[tools]
zig = "0.15.2"
zls = "0.15.1"
"ubi:DonIsaac/zlint" = "0.7.6"
prek = "0.3.1"

View file

@ -1,36 +0,0 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v6.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- repo: https://github.com/batmac/pre-commit-zig
rev: v0.3.0
hooks:
- id: zig-fmt
- repo: local
hooks:
- id: zlint
name: Run zlint
entry: zlint
args: ["--deny-warnings", "--fix"]
language: system
types: [zig]
- repo: https://github.com/batmac/pre-commit-zig
rev: v0.3.0
hooks:
- id: zig-build
- repo: local
hooks:
- id: test
name: Run zig build test
entry: zig
# args: ["build", "coverage", "-Dcoverage-threshold=80"]
args: ["build", "test"]
language: system
types: [file]
pass_filenames: false

312
README.md
View file

@ -1,300 +1,38 @@
lambda-zig: A Custom Runtime for AWS Lambda
===========================================
This is a custom runtime built in Zig (0.15). Simple projects will
execute in <1ms, with a cold start init time of approximately 11ms.
This is a sample custom runtime built in zig. Simple projects will execute
in <1ms, with a cold start init time of approximately 11ms.
Custom build steps are available for packaging and deploying Lambda functions:
Some custom build steps have been added to build.zig:
* `zig build awslambda_package`: Package the Lambda function into a zip file
* `zig build awslambda_iam`: Create or verify IAM role for the Lambda function
* `zig build awslambda_deploy`: Deploy the Lambda function to AWS
* `zig build awslambda_run`: Invoke the deployed Lambda function
* `zig build iam`: Deploy and record a default IAM role for the lambda function
* `zig build package`: Package the lambda function for upload
* `zig build deploy`: Deploy the lambda function
* `zig build run`: Run the lambda function
Build options:
Custom options:
* **function-name**: Name of the AWS Lambda function
* **payload**: JSON payload for function invocation (used with awslambda_run)
* **region**: AWS region for deployment and invocation
* **profile**: AWS profile to use for credentials
* **env-file**: Path to environment variables file for the Lambda function
* **config-file**: Path to lambda.json configuration file (overrides build.zig settings)
* **debug**: boolean flag to avoid the debug symbols to be stripped. Useful to see
error return traces in the AWS Lambda logs
* **function-name**: set the name of the AWS Lambda function
* **payload**: Use this to set the payload of the function when run using `zig build run`
The Lambda function can be compiled for x86_64 or aarch64. The build system
automatically configures the Lambda architecture based on the target.
Additionally, a custom IAM role can be used for the function by appending ``-- --role myawesomerole``
to the `zig build deploy` command. This has not really been tested. The role name
is cached in zig-out/bin/iam_role_name, so you can also just set that to the full
arn of your iam role if you'd like.
A sample project using this runtime can be found at
https://git.lerch.org/lobo/lambda-zig-sample
The AWS Lambda function is compiled as a linux ARM64 executable. Since the build.zig
calls out to the shell for AWS operations, you will need AWS CLI v2.2.43 or greater.
Lambda Configuration
--------------------
This project vendors dependencies with [gyro](https://github.com/mattnite/gyro), so
first time build should be done with `gyro build`. This should be working
on zig master - certain build.zig constructs are not available in zig 0.8.1.
Lambda functions can be configured via a `lambda.json` file or inline in `build.zig`.
The configuration controls IAM roles, function settings, and deployment options.
### Configuration File (lambda.json)
Caveats:
By default, the build system looks for an optional `lambda.json` file in your project root.
If found, it will use these settings for deployment.
```json
{
"role_name": "my_lambda_role",
"timeout": 30,
"memory_size": 512,
"description": "My Lambda function",
"allow_principal": "alexa-appkit.amazon.com",
"tags": [
{ "key": "Environment", "value": "production" },
{ "key": "Project", "value": "my-project" }
]
}
```
### Available Configuration Options
Many of these configuration options are from the Lambda [CreateFunction](https://docs.aws.amazon.com/lambda/latest/api/API_CreateFunction.html#API_CreateFunction_RequestBody)
API call and more details are available there.
| Option | Type | Default | Description |
|----------------------|----------|----------------------------|---------------------------------------------|
| `role_name` | string | `"lambda_basic_execution"` | IAM role name for the function |
| `timeout` | integer | AWS default (3) | Execution timeout in seconds (1-900) |
| `memory_size` | integer | AWS default (128) | Memory allocation in MB (128-10240) |
| `description` | string | null | Human-readable function description |
| `allow_principal` | string | null | AWS service principal for invoke permission |
| `kmskey_arn` | string | null | KMS key ARN for environment encryption |
| `layers` | string[] | null | Lambda layer ARNs to attach |
| `tags` | Tag[] | null | Resource tags (array of `{key, value}`) |
| `vpc_config` | object | null | VPC configuration (see below) |
| `dead_letter_config` | object | null | Dead letter queue configuration |
| `tracing_config` | object | null | X-Ray tracing configuration |
| `ephemeral_storage` | object | AWS default (512) | Ephemeral storage configuration |
| `logging_config` | object | null | CloudWatch logging configuration |
### VPC Configuration
```json
{
"vpc_config": {
"subnet_ids": ["subnet-12345", "subnet-67890"],
"security_group_ids": ["sg-12345"],
"ipv6_allowed_for_dual_stack": false
}
}
```
### Tracing Configuration
```json
{
"tracing_config": {
"mode": "Active"
}
}
```
Mode must be `"Active"` or `"PassThrough"`.
### Logging Configuration
```json
{
"logging_config": {
"log_format": "JSON",
"application_log_level": "INFO",
"system_log_level": "WARN",
"log_group": "/aws/lambda/my-function"
}
}
```
Log format must be `"JSON"` or `"Text"`.
### Ephemeral Storage
```json
{
"ephemeral_storage": {
"size": 512
}
}
```
Size must be between 512-10240 MB.
### Dead Letter Configuration
```json
{
"dead_letter_config": {
"target_arn": "arn:aws:sqs:us-east-1:123456789:my-dlq"
}
}
```
### Build Integration Options
You can also configure Lambda settings directly in `build.zig`:
```zig
// Use a specific config file (required - fails if missing)
_ = try lambda.configureBuild(b, dep, exe, .{
.lambda_config = .{ .file = .{
.path = b.path("deploy/lambda.json"),
.required = true,
}},
});
// Use inline configuration
_ = try lambda.configureBuild(b, dep, exe, .{
.lambda_config = .{ .config = .{
.role_name = "my_role",
.timeout = 30,
.memory_size = 512,
.description = "My function",
}},
});
// Disable config file lookup entirely
_ = try lambda.configureBuild(b, dep, exe, .{
.lambda_config = .none,
});
```
### Overriding Config at Build Time
The `-Dconfig-file` build option overrides the `build.zig` configuration:
```sh
# Use a different config file for staging
zig build awslambda_deploy -Dconfig-file=lambda-staging.json
# Use production config
zig build awslambda_deploy -Dconfig-file=deploy/lambda-prod.json
```
Environment Variables
---------------------
Lambda functions can be configured with environment variables during deployment.
This is useful for passing configuration, secrets, or credentials to your function.
### Using the build system
Pass the `-Denv-file` option to specify a file containing environment variables:
```sh
zig build awslambda_deploy -Dfunction-name=my-function -Denv-file=.env
```
### Using the CLI directly
The `lambda-build` CLI supports both `--env` flags and `--env-file`:
```sh
# Set individual variables
./lambda-build deploy --function-name my-fn --zip-file function.zip \
--env DB_HOST=localhost --env DB_PORT=5432
# Load from file
./lambda-build deploy --function-name my-fn --zip-file function.zip \
--env-file .env
# Combine both (--env values override --env-file)
./lambda-build deploy --function-name my-fn --zip-file function.zip \
--env-file .env --env DEBUG=true
```
### Environment file format
The environment file uses a simple `KEY=VALUE` format, one variable per line:
```sh
# Database configuration
DB_HOST=localhost
DB_PORT=5432
# API keys
API_KEY=secret123
```
Lines starting with `#` are treated as comments. Empty lines are ignored.
Service Permissions
-------------------
Lambda functions can be configured to allow invocation by AWS service principals.
This is required for services like Alexa Skills Kit, API Gateway, or S3 to trigger
your Lambda function.
### Using lambda.json (Recommended)
Add `allow_principal` to your configuration file:
```json
{
"allow_principal": "alexa-appkit.amazon.com"
}
```
Common service principals:
- `alexa-appkit.amazon.com` - Alexa Skills Kit
- `apigateway.amazonaws.com` - API Gateway
- `s3.amazonaws.com` - S3 event notifications
- `events.amazonaws.com` - EventBridge/CloudWatch Events
The permission is idempotent - if it already exists, the deployment will continue
successfully.
Using the Zig Package Manager
-----------------------------
To add Lambda package/deployment steps to another project:
1. Fetch the dependency:
```sh
zig fetch --save git+https://git.lerch.org/lobo/lambda-zig
```
2. Update your `build.zig`:
```zig
const std = @import("std");
const lambda_zig = @import("lambda_zig");
pub fn build(b: *std.Build) !void {
const target = b.standardTargetOptions(.{});
const optimize = b.standardOptimizeOption(.{});
// Get lambda-zig dependency
const lambda_zig_dep = b.dependency("lambda_zig", .{
.target = target,
.optimize = optimize,
});
const exe_module = b.createModule(.{
.root_source_file = b.path("src/main.zig"),
.target = target,
.optimize = optimize,
});
// Add lambda runtime to your module
exe_module.addImport("aws_lambda_runtime", lambda_zig_dep.module("lambda_runtime"));
const exe = b.addExecutable(.{
.name = "bootstrap",
.root_module = exe_module,
});
b.installArtifact(exe);
// Add Lambda build steps
try lambda_zig.configureBuild(b, lambda_zig_dep, exe);
}
```
Note: The build function return type must be `!void` or catch/deal with errors
to support the Lambda build integration.
* Small inbound lambda payloads seem to be confusing [requestz](https://github.com/ducdetronquito/requestz),
which just never returns, causing timeouts
* Unhandled invocation errors seem to be causing the same problem

430
build.zig
View file

@ -1,296 +1,174 @@
const builtin = @import("builtin");
const std = @import("std");
const pkgs = @import("deps.zig").pkgs;
// Although this function looks imperative, note that its job is to
// declaratively construct a build graph that will be executed by an external
// runner.
pub fn build(b: *std.Build) !void {
pub fn build(b: *std.build.Builder) !void {
// Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options
// for restricting supported target set are available.
const target = b.standardTargetOptions(.{});
// We want the target to be aarch64-linux for deploys
const target = std.zig.CrossTarget{
.cpu_arch = .aarch64,
.os_tag = .linux,
};
// Standard optimization options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
// set a preferred release mode, allowing the user to decide how to optimize.
const optimize = b.standardOptimizeOption(.{});
// Standard release options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
// const mode = b.standardReleaseOptions();
// Create a module for lambda.zig
const lambda_module = b.createModule(.{
.root_source_file = b.path("src/lambda.zig"),
.target = target,
.optimize = optimize,
});
const exe = b.addExecutable("bootstrap", "src/main.zig");
const lib = b.addLibrary(.{
.name = "lambda-zig",
.linkage = .static,
.root_module = lambda_module,
});
pkgs.addAllTo(exe);
exe.setTarget(target);
exe.setBuildMode(.ReleaseSafe);
const debug = b.option(bool, "debug", "Debug mode (do not strip executable)") orelse false;
exe.strip = !debug;
exe.install();
// Export the module for other packages to use
_ = b.addModule("lambda_runtime", .{
.root_source_file = b.path("src/lambda.zig"),
.target = target,
.optimize = optimize,
});
// TODO: We can cross-compile of course, but stripping and zip commands
// may vary
if (std.builtin.os.tag == .linux) {
// Package step
const package_step = b.step("package", "Package the function");
package_step.dependOn(b.getInstallStep());
// strip may not be installed or work for the target arch
// TODO: make this much less fragile
const strip = if (debug)
try std.fmt.allocPrint(b.allocator, "true", .{})
else
try std.fmt.allocPrint(b.allocator, "[ -x /usr/aarch64-linux-gnu/bin/strip ] && /usr/aarch64-linux-gnu/bin/strip {s}", .{b.getInstallPath(exe.install_step.?.dest_dir, exe.install_step.?.artifact.out_filename)});
defer b.allocator.free(strip);
package_step.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", strip }).step);
const function_zip = b.getInstallPath(exe.install_step.?.dest_dir, "function.zip");
const zip = try std.fmt.allocPrint(b.allocator, "zip -qj9 {s} {s}", .{ function_zip, b.getInstallPath(exe.install_step.?.dest_dir, exe.install_step.?.artifact.out_filename) });
defer b.allocator.free(zip);
package_step.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", zip }).step);
// This declares intent for the library to be installed into the standard
// location when the user invokes the "install" step (the default step when
// running `zig build`).
b.installArtifact(lib);
// Deployment
const deploy_step = b.step("deploy", "Deploy the function");
var deal_with_iam = true;
if (b.args) |args| {
for (args) |arg| {
if (std.mem.eql(u8, "--role", arg)) {
deal_with_iam = false;
break;
}
}
}
var iam_role: []u8 = &.{};
const iam_step = b.step("iam", "Create/Get IAM role for function");
deploy_step.dependOn(iam_step); // iam_step will either be a noop or all the stuff below
if (deal_with_iam) {
// if someone adds '-- --role arn...' to the command line, we don't
// need to do anything with the iam role. Otherwise, we'll create/
// get the IAM role and stick the name in a file in our destination
// directory to be used later
const iam_role_name_file = b.getInstallPath(exe.install_step.?.dest_dir, "iam_role_name");
iam_role = try std.fmt.allocPrint(b.allocator, "--role $(cat {s})", .{iam_role_name_file});
// defer b.allocator.free(iam_role);
if (!fileExists(iam_role_name_file)) {
// Role get/creation command
const ifstatement_fmt =
\\ if aws iam get-role --role-name lambda_basic_execution 2>&1 |grep -q NoSuchEntity; then aws iam create-role --output text --query Role.Arn --role-name lambda_basic_execution --assume-role-policy-document '{
\\ "Version": "2012-10-17",
\\ "Statement": [
\\ {
\\ "Sid": "",
\\ "Effect": "Allow",
\\ "Principal": {
\\ "Service": "lambda.amazonaws.com"
\\ },
\\ "Action": "sts:AssumeRole"
\\ }
\\ ]}' > /dev/null; fi && \
\\ aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AWSLambdaExecute --role-name lambda_basic_execution && \
\\ aws iam get-role --role-name lambda_basic_execution --query Role.Arn --output text >
;
// Creates a step for unit testing. This only builds the test executable
// but does not run it.
const test_module = b.createModule(.{
.root_source_file = b.path("src/lambda.zig"),
.target = target,
.optimize = optimize,
});
const ifstatement = try std.mem.concat(b.allocator, u8, &[_][]const u8{ ifstatement_fmt, iam_role_name_file });
defer b.allocator.free(ifstatement);
iam_step.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", ifstatement }).step);
}
}
const function_name = b.option([]const u8, "function-name", "Function name for Lambda [zig-fn]") orelse "zig-fn";
const function_name_file = b.getInstallPath(exe.install_step.?.dest_dir, function_name);
const ifstatement = "if [ ! -f {s} ] || [ {s} -nt {s} ]; then if aws lambda get-function --function-name {s} 2>&1 |grep -q ResourceNotFoundException; then echo not found > /dev/null; {s}; else echo found > /dev/null; {s}; fi; fi";
// The architectures option was introduced in 2.2.43 released 2021-10-01
// We want to use arm64 here because it is both faster and cheaper for most
// Amazon Linux 2 is the only arm64 supported option
const not_found = "aws lambda create-function --architectures arm64 --runtime provided.al2 --function-name {s} --zip-file fileb://{s} --handler not_applicable {s} && touch {s}";
const not_found_fmt = try std.fmt.allocPrint(b.allocator, not_found, .{ function_name, function_zip, iam_role, function_name_file });
defer b.allocator.free(not_found_fmt);
const found = "aws lambda update-function-code --function-name {s} --zip-file fileb://{s} && touch {s}";
const found_fmt = try std.fmt.allocPrint(b.allocator, found, .{ function_name, function_zip, function_name_file });
defer b.allocator.free(found_fmt);
var found_final: []const u8 = undefined;
var not_found_final: []const u8 = undefined;
if (b.args) |args| {
found_final = try addArgs(b.allocator, found_fmt, args);
not_found_final = try addArgs(b.allocator, not_found_fmt, args);
} else {
found_final = found_fmt;
not_found_final = not_found_fmt;
}
const cmd = try std.fmt.allocPrint(b.allocator, ifstatement, .{
function_name_file,
std.fs.path.dirname(exe.root_src.?.path),
function_name_file,
function_name,
not_found_fmt,
found_fmt,
});
const main_tests = b.addTest(.{
.name = "test",
.root_module = test_module,
});
defer b.allocator.free(cmd);
const run_main_tests = b.addRunArtifact(main_tests);
// std.debug.print("{s}\n", .{cmd});
deploy_step.dependOn(package_step);
deploy_step.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", cmd }).step);
// Build the lambda-build CLI to ensure it compiles
// This catches dependency version mismatches between tools/build and the main project
const lambda_build_dep = b.dependency("lambda_build", .{
.target = b.graph.host,
.optimize = optimize,
});
const lambda_build_exe = lambda_build_dep.artifact("lambda-build");
// TODO: Looks like IquanaTLS isn't playing nicely with payloads this small
// const payload = b.option([]const u8, "payload", "Lambda payload [{\"foo\":\"bar\"}]") orelse
// \\ {"foo": "bar"}"
// ;
const payload = b.option([]const u8, "payload", "Lambda payload [{\"foo\":\"bar\", \"baz\": \"qux\"}]") orelse
\\ {"foo": "bar", "baz": "qux"}"
;
// This creates a build step. It will be visible in the `zig build --help` menu,
// and can be selected like this: `zig build test`
// This will evaluate the `test` step rather than the default, which is "install".
const test_step = b.step("test", "Run library tests");
test_step.dependOn(&run_main_tests.step);
test_step.dependOn(&lambda_build_exe.step);
const run_script =
\\ f=$(mktemp) && \
\\ logs=$(aws lambda invoke \
\\ --cli-binary-format raw-in-base64-out \
\\ --invocation-type RequestResponse \
\\ --function-name {s} \
\\ --payload '{s}' \
\\ --log-type Tail \
\\ --query LogResult \
\\ --output text "$f" |base64 -d) && \
\\ cat "$f" && rm "$f" && \
\\ echo && echo && echo "$logs"
;
const run_script_fmt = try std.fmt.allocPrint(b.allocator, run_script, .{ function_name, payload });
defer b.allocator.free(run_script_fmt);
const run_cmd = b.addSystemCommand(&.{ "/bin/sh", "-c", run_script_fmt });
run_cmd.step.dependOn(deploy_step);
if (b.args) |args| {
run_cmd.addArgs(args);
}
// Create executable module
const exe_module = b.createModule(.{
.root_source_file = b.path("src/sample-main.zig"),
.target = target,
.optimize = optimize,
});
const exe = b.addExecutable(.{
.name = "custom",
.root_module = exe_module,
});
b.installArtifact(exe);
try configureBuildInternal(b, exe);
const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step);
}
}
/// Internal version of configureBuild for lambda-zig's own build.
///
/// Both this and configureBuild do the same thing, but resolve the lambda_build
/// dependency differently:
///
/// - Here: we call `b.dependency("lambda_build", ...)` directly since `b` is
/// lambda-zig's own Build context, which has lambda_build in its build.zig.zon
///
/// - configureBuild: consumers pass in their lambda_zig dependency, and we use
/// `lambda_zig_dep.builder.dependency("lambda_build", ...)` to resolve it from
/// lambda-zig's build.zig.zon rather than the consumer's
///
/// This avoids requiring consumers to declare lambda_build as a transitive
/// dependency in their own build.zig.zon.
fn configureBuildInternal(b: *std.Build, exe: *std.Build.Step.Compile) !void {
// When called from lambda-zig's own build, use local dependency
const lambda_build_dep = b.dependency("lambda_build", .{
.target = b.graph.host,
.optimize = .ReleaseSafe,
});
// Ignore return value for internal builds
_ = try @import("lambdabuild.zig").configureBuild(b, lambda_build_dep, exe, .{});
fn fileExists(file_name: []const u8) bool {
const file = std.fs.openFileAbsolute(file_name, .{}) catch return false;
defer file.close();
return true;
}
// Re-export types for consumers
const lambdabuild = @import("lambdabuild.zig");
/// Options for Lambda build integration.
pub const Options = lambdabuild.Options;
/// Source for Lambda build configuration (none, file, or inline config).
pub const LambdaConfigSource = lambdabuild.LambdaConfigSource;
/// A config file path with explicit required/optional semantics.
pub const ConfigFile = lambdabuild.ConfigFile;
/// Lambda build configuration struct (role_name, timeout, memory_size, VPC, etc.).
pub const LambdaBuildConfig = lambdabuild.LambdaBuildConfig;
/// Information about the configured Lambda build steps.
pub const BuildInfo = lambdabuild.BuildInfo;
/// Configure Lambda build steps for a Zig project.
///
/// This function adds build steps and options for packaging and deploying
/// Lambda functions to AWS. The `lambda_zig_dep` parameter must be the
/// dependency object obtained from `b.dependency("lambda_zig", ...)`.
///
/// Returns a `LambdaBuildInfo` struct containing:
/// - References to all build steps (package, iam, deploy, invoke)
/// - A `deploy_output` LazyPath to a JSON file with deployment info
/// - The function name used
///
/// ## Build Steps
///
/// The following build steps are added:
///
/// - `awslambda_package`: Package the executable into a Lambda deployment zip
/// - `awslambda_iam`: Create or verify the IAM role for the Lambda function
/// - `awslambda_deploy`: Deploy the function to AWS Lambda (depends on package)
/// - `awslambda_run`: Invoke the deployed function (depends on deploy)
///
/// ## Build Options
///
/// The following command-line options are available:
///
/// - `-Dfunction-name=[string]`: Name of the Lambda function
/// (default: exe.name, or as provided by config parameter)
/// - `-Dregion=[string]`: AWS region for deployment and invocation
/// - `-Dprofile=[string]`: AWS profile to use for credentials
/// - `-Dpayload=[string]`: JSON payload for invocation (default: "{}")
/// - `-Denv-file=[string]`: Path to environment variables file (KEY=VALUE format)
/// - `-Dconfig-file=[string]`: Path to Lambda build config JSON file (overrides function_config)
///
/// ## Configuration File
///
/// Function settings (timeout, memory, VPC, etc.) and deployment settings
/// (role_name, allow_principal) are configured via a JSON file or inline config.
///
/// By default, looks for `lambda.json` in the project root. If not found,
/// uses sensible defaults (role_name = "lambda_basic_execution").
///
/// ### Example lambda.json
///
/// ```json
/// {
/// "role_name": "my_lambda_role",
/// "timeout": 30,
/// "memory_size": 512,
/// "description": "My function description",
/// "allow_principal": "alexa-appkit.amazon.com",
/// "tags": [
/// { "key": "Environment", "value": "production" }
/// ],
/// "logging_config": {
/// "log_format": "JSON",
/// "application_log_level": "INFO"
/// }
/// }
/// ```
///
/// ## Deploy Output
///
/// The `deploy_output` field in the returned struct is a LazyPath to a JSON file
/// containing deployment information (available after deploy completes):
///
/// ```json
/// {
/// "arn": "arn:aws:lambda:us-east-1:123456789012:function:my-function",
/// "function_name": "my-function",
/// "partition": "aws",
/// "region": "us-east-1",
/// "account_id": "123456789012",
/// "role_arn": "arn:aws:iam::123456789012:role/lambda_basic_execution",
/// "architecture": "arm64",
/// "environment_keys": ["MY_VAR"]
/// }
/// ```
///
/// ## Example
///
/// ### Basic Usage (uses lambda.json if present)
///
/// ```zig
/// const lambda_zig = @import("lambda_zig");
///
/// pub fn build(b: *std.Build) !void {
/// const target = b.standardTargetOptions(.{});
/// const optimize = b.standardOptimizeOption(.{});
///
/// const lambda_zig_dep = b.dependency("lambda_zig", .{
/// .target = target,
/// .optimize = optimize,
/// });
///
/// const exe = b.addExecutable(.{ ... });
/// b.installArtifact(exe);
///
/// _ = try lambda_zig.configureBuild(b, lambda_zig_dep, exe, .{});
/// }
/// ```
///
/// ### Inline Configuration
///
/// ```zig
/// _ = try lambda_zig.configureBuild(b, lambda_zig_dep, exe, .{
/// .lambda_config = .{ .config = .{
/// .role_name = "my_custom_role",
/// .timeout = 30,
/// .memory_size = 512,
/// .allow_principal = "alexa-appkit.amazon.com",
/// }},
/// });
/// ```
///
/// ### Custom Config File Path (required by default)
///
/// ```zig
/// _ = try lambda_zig.configureBuild(b, lambda_zig_dep, exe, .{
/// .lambda_config = .{ .file = .{ .path = b.path("deploy/production.json") } },
/// });
/// ```
///
/// ### Optional Config File (silent defaults if missing)
///
/// ```zig
/// _ = try lambda_zig.configureBuild(b, lambda_zig_dep, exe, .{
/// .lambda_config = .{ .file = .{
/// .path = b.path("lambda.json"),
/// .required = false,
/// } },
/// });
/// ```
///
/// ### Dynamically Generated Config
///
/// ```zig
/// const wf = b.addWriteFiles();
/// const config_json = wf.add("lambda-config.json", generated_content);
///
/// _ = try lambda_zig.configureBuild(b, lambda_zig_dep, exe, .{
/// .lambda_config = .{ .file = .{ .path = config_json } },
/// });
/// ```
///
/// ### Using Deploy Output
///
/// ```zig
/// const lambda = try lambda_zig.configureBuild(b, lambda_zig_dep, exe, .{});
///
/// // Use lambda.deploy_output in other steps that need the ARN
/// const my_step = b.addRunArtifact(my_tool);
/// my_step.addFileArg(lambda.deploy_output);
/// my_step.step.dependOn(lambda.deploy_step); // Ensure deploy runs first
/// ```
pub fn configureBuild(
b: *std.Build,
lambda_zig_dep: *std.Build.Dependency,
exe: *std.Build.Step.Compile,
options: Options,
) !BuildInfo {
// Get lambda_build from the lambda_zig dependency's Build context
const lambda_build_dep = lambda_zig_dep.builder.dependency("lambda_build", .{
.target = b.graph.host,
.optimize = .ReleaseSafe,
});
return lambdabuild.configureBuild(b, lambda_build_dep, exe, options);
fn addArgs(allocator: *std.mem.Allocator, original: []const u8, args: [][]const u8) ![]const u8 {
var rc = original;
for (args) |arg| {
rc = try std.mem.concat(allocator, u8, &.{ rc, " ", arg });
}
return rc;
}

View file

@ -1,28 +0,0 @@
.{
.name = .lambda_zig,
// This is a [Semantic Version](https://semver.org/).
// In a future version of Zig it will be used for package deduplication.
.version = "0.1.0",
.fingerprint = 0xae58341fff376efc,
.minimum_zig_version = "0.15.2",
.dependencies = .{
.lambda_build = .{
.path = "tools/build",
},
},
// Specifies the set of files and directories that are included in this package.
// Only files and directories listed here are included in the `hash` that
// is computed for this package.
// Paths are relative to the build root. Use the empty string (`""`) to refer to
// the build root itself.
// A directory listed here means that all files within, recursively, are included.
.paths = .{
"build.zig",
"build.zig.zon",
"lambdabuild.zig",
"src",
"tools",
"LICENSE",
"README.md",
},
}

5
gyro.lock Normal file
View file

@ -0,0 +1,5 @@
pkg default ducdetronquito http 0.1.3
pkg default ducdetronquito h11 0.1.1
github nektro iguanaTLS 953ad821fae6c920fb82399493663668cd91bde7 src/main.zig 953ad821fae6c920fb82399493663668cd91bde7
github MasterQ32 zig-network 15b88658809cac9022ec7d59449b0cd3ebfd0361 network.zig 15b88658809cac9022ec7d59449b0cd3ebfd0361
github elerch requestz 1fa8157641300805b9503f98cd201d0959d19631 src/main.zig 1fa8157641300805b9503f98cd201d0959d19631

7
gyro.zzz Normal file
View file

@ -0,0 +1,7 @@
deps:
requestz:
src:
github:
user: elerch
repo: requestz
ref: 1fa8157641300805b9503f98cd201d0959d19631

View file

@ -1,279 +0,0 @@
//! Lambda Build Integration for Zig Build System
//!
//! This module provides build steps for packaging and deploying Lambda functions.
//! It builds the lambda-build CLI tool and invokes it for each operation.
const std = @import("std");
pub const LambdaBuildConfig = @import("tools/build/src/LambdaBuildConfig.zig");
/// A config file path with explicit required/optional semantics.
pub const ConfigFile = struct {
path: std.Build.LazyPath,
/// If true (default), error when file is missing. If false, silently use defaults.
required: bool = true,
};
/// Source for Lambda build configuration.
///
/// Determines how Lambda function settings (timeout, memory, VPC, etc.)
/// and deployment settings (role_name, allow_principal) are provided.
pub const LambdaConfigSource = union(enum) {
/// No configuration file. Uses hardcoded defaults.
none,
/// Path to a JSON config file with explicit required/optional semantics.
file: ConfigFile,
/// Inline configuration. Will be serialized to JSON and
/// written to a generated file.
config: LambdaBuildConfig,
};
/// Options for Lambda build integration.
///
/// These provide project-level defaults that can still be overridden
/// via command-line options (e.g., `-Dfunction-name=...`).
pub const Options = struct {
/// Default function name if not specified via -Dfunction-name.
/// If null, falls back to the executable name (exe.name).
default_function_name: ?[]const u8 = null,
/// Default environment file if not specified via -Denv-file.
/// If the file doesn't exist, it's silently skipped.
default_env_file: ?[]const u8 = ".env",
/// Lambda build configuration source.
/// Defaults to looking for "lambda.json" (optional - uses defaults if missing).
///
/// Examples:
/// - `.none`: No config file, use defaults
/// - `.{ .file = .{ .path = b.path("lambda.json") } }`: Required config file
/// - `.{ .file = .{ .path = b.path("lambda.json"), .required = false } }`: Optional config file
/// - `.{ .config = .{ ... } }`: Inline configuration
lambda_config: LambdaConfigSource = .{ .file = .{ .path = .{ .cwd_relative = "lambda.json" }, .required = false } },
};
/// Information about the configured Lambda build steps.
///
/// Returned by `configureBuild` to allow consumers to depend on steps
/// and access deployment outputs.
pub const BuildInfo = struct {
/// Package step - creates the deployment zip
package_step: *std.Build.Step,
/// IAM step - creates/verifies the IAM role
iam_step: *std.Build.Step,
/// Deploy step - deploys the function to AWS Lambda
deploy_step: *std.Build.Step,
/// Invoke step - invokes the deployed function
invoke_step: *std.Build.Step,
/// LazyPath to JSON file with deployment info.
/// Contains: arn, function_name, region, account_id, role_arn, architecture, environment_keys
/// Available after deploy_step completes.
deploy_output: std.Build.LazyPath,
/// The function name used for deployment
function_name: []const u8,
};
/// Configure Lambda build steps for a Zig project.
///
/// Adds the following build steps:
/// - awslambda_package: Package the function into a zip file
/// - awslambda_iam: Create/verify IAM role
/// - awslambda_deploy: Deploy the function to AWS
/// - awslambda_run: Invoke the deployed function
///
/// ## Configuration
///
/// Function settings (timeout, memory, VPC, etc.) and deployment settings
/// (role_name, allow_principal) are configured via a JSON file or inline config.
///
/// By default, looks for `lambda.json` in the project root. If not found,
/// uses sensible defaults (role_name = "lambda_basic_execution").
///
/// ### Example lambda.json
///
/// ```json
/// {
/// "role_name": "my_lambda_role",
/// "timeout": 30,
/// "memory_size": 512,
/// "allow_principal": "alexa-appkit.amazon.com",
/// "tags": [
/// { "key": "Environment", "value": "production" }
/// ]
/// }
/// ```
///
/// ### Inline Configuration
///
/// ```zig
/// lambda.configureBuild(b, dep, exe, .{
/// .lambda_config = .{ .config = .{
/// .role_name = "my_role",
/// .timeout = 30,
/// .memory_size = 512,
/// }},
/// });
/// ```
///
/// Returns a `BuildInfo` struct containing references to all steps and
/// a `deploy_output` LazyPath to the deployment info JSON file.
pub fn configureBuild(
b: *std.Build,
lambda_build_dep: *std.Build.Dependency,
exe: *std.Build.Step.Compile,
options: Options,
) !BuildInfo {
// Get the lambda-build CLI artifact from the dependency
const cli = lambda_build_dep.artifact("lambda-build");
// Get configuration options (command-line overrides config defaults)
const function_name = b.option([]const u8, "function-name", "Function name for Lambda") orelse options.default_function_name orelse exe.name;
const region = b.option([]const u8, "region", "AWS region") orelse null;
const profile = b.option([]const u8, "profile", "AWS profile") orelse null;
const payload = b.option(
[]const u8,
"payload",
"Lambda invocation payload",
) orelse "{}";
const env_file = b.option(
[]const u8,
"env-file",
"Path to environment variables file (KEY=VALUE format)",
) orelse options.default_env_file;
const config_file_override = b.option(
[]const u8,
"awslambda-config-file",
"Path to Lambda build config JSON file (overrides function_config)",
);
// Determine architecture for Lambda
const target_arch = exe.root_module.resolved_target.?.result.cpu.arch;
const arch_str = blk: {
switch (target_arch) {
.aarch64 => break :blk "aarch64",
.x86_64 => break :blk "x86_64",
else => {
std.log.warn("Unsupported architecture for Lambda: {}, defaulting to x86_64", .{target_arch});
break :blk "x86_64";
},
}
};
// Determine config file source - resolves to a path and required flag
// Internal struct since we need nullable path for the .none case
const ResolvedConfig = struct {
path: ?std.Build.LazyPath,
required: bool,
};
const config_file: ResolvedConfig = if (config_file_override) |override|
.{ .path = .{ .cwd_relative = override }, .required = true }
else switch (options.lambda_config) {
.none => .{ .path = null, .required = false },
.file => |cf| .{ .path = cf.path, .required = cf.required },
.config => |func_config| blk: {
// Serialize inline config to JSON and write to generated file
const json_content = std.fmt.allocPrint(b.allocator, "{f}", .{
std.json.fmt(func_config, .{}),
}) catch @panic("OOM");
const wf = b.addWriteFiles();
break :blk .{ .path = wf.add("lambda-config.json", json_content), .required = true };
},
};
// Helper to add config file arg to a command
const addConfigArg = struct {
fn add(cmd: *std.Build.Step.Run, file: ResolvedConfig) void {
if (file.path) |f| {
const flag = if (file.required) "--config-file" else "--config-file-optional";
cmd.addArg(flag);
cmd.addFileArg(f);
}
}
}.add;
// Package step - output goes to cache based on input hash
const package_cmd = b.addRunArtifact(cli);
package_cmd.step.name = try std.fmt.allocPrint(b.allocator, "{s} package", .{cli.name});
package_cmd.addArgs(&.{ "package", "--exe" });
package_cmd.addFileArg(exe.getEmittedBin());
package_cmd.addArgs(&.{"--output"});
const zip_output = package_cmd.addOutputFileArg("function.zip");
package_cmd.step.dependOn(&exe.step);
const package_step = b.step("awslambda_package", "Package the Lambda function");
package_step.dependOn(&package_cmd.step);
// IAM step
const iam_cmd = b.addRunArtifact(cli);
iam_cmd.step.name = try std.fmt.allocPrint(b.allocator, "{s} iam", .{cli.name});
if (profile) |p| iam_cmd.addArgs(&.{ "--profile", p });
if (region) |r| iam_cmd.addArgs(&.{ "--region", r });
iam_cmd.addArg("iam");
addConfigArg(iam_cmd, config_file);
const iam_step = b.step("awslambda_iam", "Create/verify IAM role for Lambda");
iam_step.dependOn(&iam_cmd.step);
// Deploy step (depends on package)
// NOTE: has_side_effects = true ensures this always runs, since AWS state
// can change externally (e.g., function deleted via console)
const deploy_cmd = b.addRunArtifact(cli);
deploy_cmd.has_side_effects = true;
deploy_cmd.step.name = try std.fmt.allocPrint(b.allocator, "{s} deploy", .{cli.name});
if (profile) |p| deploy_cmd.addArgs(&.{ "--profile", p });
if (region) |r| deploy_cmd.addArgs(&.{ "--region", r });
deploy_cmd.addArgs(&.{
"deploy",
"--function-name",
function_name,
"--zip-file",
});
deploy_cmd.addFileArg(zip_output);
deploy_cmd.addArgs(&.{
"--arch",
arch_str,
});
if (env_file) |ef| deploy_cmd.addArgs(&.{ "--env-file", ef });
addConfigArg(deploy_cmd, config_file);
// Add deploy output file for deployment info JSON
deploy_cmd.addArg("--deploy-output");
const deploy_output = deploy_cmd.addOutputFileArg("deploy-output.json");
deploy_cmd.step.dependOn(&package_cmd.step);
const deploy_step = b.step("awslambda_deploy", "Deploy the Lambda function");
deploy_step.dependOn(&deploy_cmd.step);
// Invoke/run step (depends on deploy)
const invoke_cmd = b.addRunArtifact(cli);
invoke_cmd.step.name = try std.fmt.allocPrint(b.allocator, "{s} invoke", .{cli.name});
if (profile) |p| invoke_cmd.addArgs(&.{ "--profile", p });
if (region) |r| invoke_cmd.addArgs(&.{ "--region", r });
invoke_cmd.addArgs(&.{
"invoke",
"--function-name",
function_name,
"--payload",
payload,
});
invoke_cmd.step.dependOn(&deploy_cmd.step);
const run_step = b.step("awslambda_run", "Invoke the deployed Lambda function");
run_step.dependOn(&invoke_cmd.step);
return .{
.package_step = package_step,
.iam_step = iam_step,
.deploy_step = deploy_step,
.invoke_step = run_step,
.deploy_output = deploy_output,
.function_name = function_name,
};
}

View file

@ -1,500 +1,96 @@
const std = @import("std");
const requestz = @import("requestz");
pub const HandlerFn = *const fn (std.mem.Allocator, []const u8) anyerror![]const u8;
const log = std.log.scoped(.lambda);
var client: ?std.http.Client = null;
const prefix = "http://";
const postfix = "/2018-06-01/runtime/invocation";
pub fn deinit() void {
if (client) |*c| c.deinit();
client = null;
}
/// Starts the lambda framework. Handler will be called when an event is processing
/// If an allocator is not provided, an approrpriate allocator will be selected and used
/// This function is intended to loop infinitely. If not used in this manner,
/// make sure to call the deinit() function
pub fn run(allocator: ?std.mem.Allocator, event_handler: HandlerFn) !void { // TODO: remove inferred error set?
const lambda_runtime_uri = std.posix.getenv("AWS_LAMBDA_RUNTIME_API") orelse test_lambda_runtime_uri.?;
// TODO: If this is null, go into single use command line mode
pub fn run(event_handler: fn (*std.mem.Allocator, []const u8) anyerror![]const u8) !void { // TODO: remove inferred error set?
const prefix = "http://";
const postfix = "/2018-06-01/runtime/invocation";
const lambda_runtime_uri = std.os.getenv("AWS_LAMBDA_RUNTIME_API");
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const alloc = allocator orelse gpa.allocator();
const allocator = &gpa.allocator;
const url = try std.fmt.allocPrint(alloc, "{s}{s}{s}/next", .{ prefix, lambda_runtime_uri, postfix });
defer alloc.free(url);
const uri = try std.Uri.parse(url);
const url = try std.fmt.allocPrint(allocator, "{s}{s}{s}/next", .{ prefix, lambda_runtime_uri, postfix });
defer allocator.free(url);
// TODO: Simply adding this line without even using the client is enough
// to cause seg faults!?
// client = client orelse .{ .allocator = alloc };
// so we'll do this instead
if (client != null) return error.MustDeInitBeforeCallingRunAgain;
client = .{ .allocator = alloc };
log.info("tid {d} (lambda): Bootstrap initializing with event url: {s}", .{ std.Thread.getCurrentId(), url });
std.log.notice("Bootstrap initializing with event url: {s}", .{url});
while (lambda_remaining_requests == null or lambda_remaining_requests.? > 0) {
if (lambda_remaining_requests) |*r| {
// we're under test
log.debug("lambda remaining requests: {d}", .{r.*});
r.* -= 1;
}
var req_alloc = std.heap.ArenaAllocator.init(alloc);
while (true) {
var req_alloc = std.heap.ArenaAllocator.init(allocator);
defer req_alloc.deinit();
const req_allocator = req_alloc.allocator();
// Fundamentally we're doing 3 things:
// 1. Get the next event from Lambda (event data and request id)
// 2. Call our handler to get the response
// 3. Post the response back to Lambda
var ev = getEvent(req_allocator, uri) catch |err| {
// Well, at this point all we can do is shout at the void
log.err("Error fetching event details: {}", .{err});
std.posix.exit(1);
// continue;
};
if (ev == null) continue; // this gets logged in getEvent, and without
// a request id, we still can't do anything
// reasonable to report back
const event = ev.?;
defer ev.?.deinit();
const event_response = event_handler(req_allocator, event.event_data) catch |err| {
event.reportError(@errorReturnTrace(), err, lambda_runtime_uri) catch @panic("Error reporting error");
continue;
};
event.postResponse(lambda_runtime_uri, event_response) catch |err| {
event.reportError(@errorReturnTrace(), err, lambda_runtime_uri) catch @panic("Error reporting error");
continue;
};
}
}
const Event = struct {
allocator: std.mem.Allocator,
event_data: []const u8,
request_id: []const u8,
const Self = @This();
pub fn init(allocator: std.mem.Allocator, event_data: []const u8, request_id: []const u8) Self {
return .{
.allocator = allocator,
.event_data = event_data,
.request_id = request_id,
};
}
pub fn deinit(self: *Self) void {
self.allocator.free(self.event_data);
self.allocator.free(self.request_id);
}
fn reportError(
self: Self,
return_trace: ?*std.builtin.StackTrace,
err: anytype,
lambda_runtime_uri: []const u8,
) !void {
// If we fail in this function, we're pretty hosed up
if (return_trace) |rt|
log.err("Caught error: {}. Return Trace: {any}", .{ err, rt })
else
log.err("Caught error: {}. No return trace available", .{err});
const err_url = try std.fmt.allocPrint(
self.allocator,
"{s}{s}{s}/{s}/error",
.{ prefix, lambda_runtime_uri, postfix, self.request_id },
);
defer self.allocator.free(err_url);
const err_uri = try std.Uri.parse(err_url);
const content =
\\{{
\\ "errorMessage": "{s}",
\\ "errorType": "HandlerReturnedError",
\\ "stackTrace": [ "{any}" ]
\\}}
;
const content_fmt = if (return_trace) |rt|
try std.fmt.allocPrint(self.allocator, content, .{ @errorName(err), rt })
else
try std.fmt.allocPrint(self.allocator, content, .{ @errorName(err), "no return trace available" });
defer self.allocator.free(content_fmt);
log.err("Posting to {s}: Data {s}", .{ err_url, content_fmt });
// TODO: There is something up with using a shared client in this way
// so we're taking a perf hit in favor of stability. In a practical
// sense, without making HTTPS connections (lambda environment is
// non-ssl), this shouldn't be a big issue
var cl = std.http.Client{ .allocator = self.allocator };
defer cl.deinit();
var req = cl.request(.POST, err_uri, .{
.extra_headers = &.{
.{
.name = "Lambda-Runtime-Function-Error-Type",
.value = "HandlerReturned",
},
},
}) catch |req_err| {
log.err("Error creating request for request id {s}: {}", .{ self.request_id, req_err });
std.posix.exit(1);
};
defer req.deinit();
req.transfer_encoding = .{ .content_length = content_fmt.len };
var body_writer = req.sendBodyUnflushed(&.{}) catch |send_err| {
log.err("Error sending body for request id {s}: {}", .{ self.request_id, send_err });
std.posix.exit(1);
};
body_writer.writer.writeAll(content_fmt) catch |write_err| {
log.err("Error writing body for request id {s}: {}", .{ self.request_id, write_err });
std.posix.exit(1);
};
body_writer.end() catch |end_err| {
log.err("Error ending body for request id {s}: {}", .{ self.request_id, end_err });
std.posix.exit(1);
};
req.connection.?.flush() catch |flush_err| {
log.err("Error flushing for request id {s}: {}", .{ self.request_id, flush_err });
std.posix.exit(1);
};
var redirect_buffer: [1024]u8 = undefined;
const response = req.receiveHead(&redirect_buffer) catch |recv_err| {
log.err("Error receiving response for request id {s}: {}", .{ self.request_id, recv_err });
std.posix.exit(1);
};
if (response.head.status != .ok) {
const req_allocator = &req_alloc.allocator;
var client = try requestz.Client.init(req_allocator);
// defer client.deinit();
// Lambda freezes the process at this line of code. During warm start,
// the process will unfreeze and data will be sent in response to client.get
var response = client.get(url, .{}) catch |err| {
std.log.err("Get fail: {}", .{err});
// Documentation says something about "exit immediately". The
// Lambda infrastrucutre restarts, so it's unclear if that's necessary.
// It seems as though a continue should be fine, and slightly faster
log.err("Post fail: {} {s}", .{
@intFromEnum(response.head.status),
response.head.reason,
});
std.posix.exit(1);
}
log.err("Error reporting post complete", .{});
}
fn postResponse(self: Self, lambda_runtime_uri: []const u8, event_response: []const u8) !void {
const response_url = try std.fmt.allocPrint(
self.allocator,
"{s}{s}{s}/{s}/response",
.{ prefix, lambda_runtime_uri, postfix, self.request_id },
);
defer self.allocator.free(response_url);
const response_uri = try std.Uri.parse(response_url);
var cl = std.http.Client{ .allocator = self.allocator };
defer cl.deinit();
// Lambda does different things, depending on the runtime. Go 1.x takes
// any return value but escapes double quotes. Custom runtimes can
// do whatever they want. node I believe wraps as a json object. We're
// going to leave the return value up to the handler, and they can
// use a seperate API for normalization so we're explicit. As a result,
// we can just post event_response completely raw here
var req = try cl.request(.POST, response_uri, .{});
defer req.deinit();
req.transfer_encoding = .{ .content_length = event_response.len };
var body_writer = try req.sendBodyUnflushed(&.{});
try body_writer.writer.writeAll(event_response);
try body_writer.end();
try req.connection.?.flush();
var redirect_buffer: [1024]u8 = undefined;
const response = try req.receiveHead(&redirect_buffer);
// Lambda Runtime API returns 202 Accepted for successful response posts
if (response.head.status != .ok and response.head.status != .accepted) {
return error.UnexpectedStatusFromPostResponse;
}
}
};
fn getEvent(allocator: std.mem.Allocator, event_data_uri: std.Uri) !?Event {
// TODO: There is something up with using a shared client in this way
// so we're taking a perf hit in favor of stability. In a practical
// sense, without making HTTPS connections (lambda environment is
// non-ssl), this shouldn't be a big issue
var cl = std.http.Client{ .allocator = allocator };
defer cl.deinit();
// Lambda freezes the process at this line of code. During warm start,
// the process will unfreeze and data will be sent in response to client.get
var req = try cl.request(.GET, event_data_uri, .{});
defer req.deinit();
try req.sendBodiless();
var redirect_buffer: [0]u8 = undefined;
var response = try req.receiveHead(&redirect_buffer);
if (response.head.status != .ok) {
// Documentation says something about "exit immediately". The
// Lambda infrastrucutre restarts, so it's unclear if that's necessary.
// It seems as though a continue should be fine, and slightly faster
// std.os.exit(1);
log.err("Lambda server event response returned bad error code: {} {s}", .{
@intFromEnum(response.head.status),
response.head.reason,
});
return error.EventResponseNotOkResponse;
}
// Extract request ID from response headers
var request_id: ?[]const u8 = null;
var header_it = response.head.iterateHeaders();
while (header_it.next()) |h| {
if (std.ascii.eqlIgnoreCase(h.name, "Lambda-Runtime-Aws-Request-Id"))
request_id = h.value;
// TODO: XRay uses an environment variable to do its magic. It's our
// responsibility to set this, but no zig-native setenv(3)/putenv(3)
// exists. I would kind of rather not link in libc for this,
// so we'll hold for now and think on this
// if (std.mem.indexOf(u8, h.name.value, "Lambda-Runtime-Trace-Id")) |_|
// std.process.
// std.os.setenv("AWS_LAMBDA_RUNTIME_API");
}
if (request_id == null) {
// We can't report back an issue because the runtime error reporting endpoint
// uses request id in its path. So the best we can do is log the error and move
// on here.
log.err("Could not find request id: skipping request", .{});
return null;
}
const req_id = request_id.?;
log.debug("got lambda request with id {s}", .{req_id});
// Read response body using a transfer buffer
var transfer_buffer: [64 * 1024]u8 = undefined;
const body_reader = response.reader(&transfer_buffer);
// Read all data into an allocated buffer
// We use content_length if available, otherwise read chunks
const content_len = response.head.content_length orelse (10 * 1024 * 1024); // 10MB max if not specified
var event_data = try allocator.alloc(u8, content_len);
errdefer allocator.free(event_data);
var total_read: usize = 0;
while (total_read < content_len) {
const remaining = event_data[total_read..];
const bytes_read = body_reader.readSliceShort(remaining) catch |err| switch (err) {
error.ReadFailed => return error.ReadFailed,
// std.os.exit(1);
continue;
};
if (bytes_read == 0) break;
total_read += bytes_read;
}
event_data = try allocator.realloc(event_data, total_read);
defer response.deinit();
return Event.init(
allocator,
event_data,
try allocator.dupe(u8, req_id),
);
}
////////////////////////////////////////////////////////////////////////
// All code below this line is for testing
////////////////////////////////////////////////////////////////////////
var server_port: ?u16 = null;
var server_remaining_requests: usize = 0;
var lambda_remaining_requests: ?usize = null;
var server_response: []const u8 = "unset";
var server_request_aka_lambda_response: []u8 = "";
var test_lambda_runtime_uri: ?[]u8 = null;
var server_ready = false;
/// This starts a test server. We're not testing the server itself,
/// so the main tests will start this thing up and create an arena around the
/// whole thing so we can just deallocate everything at once at the end,
/// leaks be damned
fn startServer(allocator: std.mem.Allocator) !std.Thread {
return try std.Thread.spawn(
.{},
threadMain,
.{allocator},
);
}
fn threadMain(allocator: std.mem.Allocator) !void {
const address = try std.net.Address.parseIp("127.0.0.1", 0);
var http_server = try address.listen(.{ .reuse_address = true });
server_port = http_server.listen_address.in.getPort();
test_lambda_runtime_uri = try std.fmt.allocPrint(allocator, "127.0.0.1:{d}", .{server_port.?});
log.debug("server listening at {s}", .{test_lambda_runtime_uri.?});
defer test_lambda_runtime_uri = null;
defer server_port = null;
log.info("starting server thread, tid {d}", .{std.Thread.getCurrentId()});
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
const aa = arena.allocator();
// We're in control of all requests/responses, so this flag will tell us
// when it's time to shut down
while (server_remaining_requests > 0) {
server_remaining_requests -= 1;
processRequest(aa, &http_server) catch |e| {
log.err("Unexpected error processing request: {any}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
};
}
}
fn processRequest(allocator: std.mem.Allocator, server: *std.net.Server) !void {
server_ready = true;
errdefer server_ready = false;
log.debug(
"tid {d} (server): server waiting to accept. requests remaining: {d}",
.{ std.Thread.getCurrentId(), server_remaining_requests + 1 },
);
var connection = try server.accept();
defer connection.stream.close();
server_ready = false;
var read_buffer: [1024 * 16]u8 = undefined;
var write_buffer: [1024 * 16]u8 = undefined;
var stream_reader = std.net.Stream.Reader.init(connection.stream, &read_buffer);
var stream_writer = std.net.Stream.Writer.init(connection.stream, &write_buffer);
var http_server = std.http.Server.init(stream_reader.interface(), &stream_writer.interface);
const request = http_server.receiveHead() catch |err| switch (err) {
error.HttpConnectionClosing => return,
else => {
std.log.err("closing http connection: {s}", .{@errorName(err)});
return;
},
};
// Read request body if present
if (request.head.content_length) |content_len| {
if (content_len > 0) {
var body_transfer_buffer: [64 * 1024]u8 = undefined;
const body_reader = http_server.reader.bodyReader(&body_transfer_buffer, request.head.transfer_encoding, request.head.content_length);
var body_data = try allocator.alloc(u8, content_len);
errdefer allocator.free(body_data);
var total_read: usize = 0;
while (total_read < content_len) {
const remaining = body_data[total_read..];
const bytes_read = body_reader.readSliceShort(remaining) catch break;
if (bytes_read == 0) break;
total_read += bytes_read;
}
server_request_aka_lambda_response = try allocator.realloc(body_data, total_read);
var request_id: ?[]const u8 = null;
for (response.headers.items()) |h| {
if (std.mem.indexOf(u8, h.name.value, "Lambda-Runtime-Aws-Request-Id")) |_|
request_id = h.value;
// TODO: XRay uses an environment variable to do its magic. It's our
// responsibility to set this, but no zig-native setenv(3)/putenv(3)
// exists. I would kind of rather not link in libc for this,
// so we'll hold for now and think on this
// if (std.mem.indexOf(u8, h.name.value, "Lambda-Runtime-Trace-Id")) |_|
// std.process.
// std.os.setenv("AWS_LAMBDA_RUNTIME_API");
}
if (request_id == null) {
// We can't report back an issue because the runtime error reporting endpoint
// uses request id in its path. So the best we can do is log the error and move
// on here.
std.log.err("Could not find request id: skipping request", .{});
continue;
}
const req_id = request_id.?;
const event_response = event_handler(req_allocator, response.body) catch |err| {
// Stack trace will return null if stripped
const return_trace = @errorReturnTrace();
std.log.err("Caught error: {}. Return Trace: {}", .{ err, return_trace });
const err_url = try std.fmt.allocPrint(req_allocator, "{s}{s}/runtime/invocation/{s}/error", .{ prefix, lambda_runtime_uri, req_id });
defer req_allocator.free(err_url);
const content =
\\ {s}
\\ "errorMessage": "{s}",
\\ "errorType": "HandlerReturnedError",
\\ "stackTrace": [ "{}" ]
\\ {s}
;
const content_fmt = try std.fmt.allocPrint(req_allocator, content, .{ "{", @errorName(err), return_trace, "}" });
defer req_allocator.free(content_fmt);
std.log.err("Posting to {s}: Data {s}", .{ err_url, content_fmt });
var headers = .{.{ "Lambda-Runtime-Function-Error-Type", "HandlerReturned" }};
// TODO: Determine why this post is not returning
var err_resp = client.post(err_url, .{
.content = content_fmt,
.headers = headers,
}) catch |post_err| { // Well, at this point all we can do is shout at the void
std.log.err("Error posting response for request id {s}: {}", .{ req_id, post_err });
std.os.exit(0);
continue;
};
std.log.err("Post complete", .{});
defer err_resp.deinit();
continue;
};
const response_url = try std.fmt.allocPrint(req_allocator, "{s}{s}{s}/{s}/response", .{ prefix, lambda_runtime_uri, postfix, req_id });
// defer req_allocator.free(response_url);
var resp_resp = client.post(response_url, .{ .content = event_response }) catch |err| {
// TODO: report error
std.log.err("Error posting response for request id {s}: {}", .{ req_id, err });
continue;
};
defer resp_resp.deinit();
}
// Build and send response
const response_bytes = serve();
var respond_request = request;
try respond_request.respond(response_bytes, .{
.extra_headers = &.{
.{ .name = "Lambda-Runtime-Aws-Request-Id", .value = "69" },
},
});
log.debug(
"tid {d} (server): sent response: {s}",
.{ std.Thread.getCurrentId(), response_bytes },
);
}
fn serve() []const u8 {
return server_response;
}
fn handler(allocator: std.mem.Allocator, event_data: []const u8) ![]const u8 {
_ = allocator;
return event_data;
}
pub fn test_lambda_request(allocator: std.mem.Allocator, request: []const u8, request_count: usize, handler_fn: HandlerFn) ![]u8 {
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
const aa = arena.allocator();
// Setup our server to run, and set the response for the server to the
// request. There is a cognitive disconnect here between mental model and
// physical model.
//
// Mental model:
//
// Lambda request -> λ -> Lambda response
//
// Physcial Model:
//
// 1. λ requests instructions from server
// 2. server provides "Lambda request"
// 3. λ posts response back to server
//
// So here we are setting up our server, then our lambda request loop,
// but it all needs to be in seperate threads so we can control startup
// and shut down. Both server and Lambda are set up to watch global variable
// booleans to know when to shut down. This function is designed for a
// single request/response pair only
lambda_remaining_requests = request_count;
server_remaining_requests = lambda_remaining_requests.? * 2; // Lambda functions
// fetch from the server,
// then post back. Always
// 2, no more, no less
server_response = request; // set our instructions to lambda, which in our
// physical model above, is the server response
defer server_response = "unset"; // set it back so we don't get confused later
// when subsequent tests fail
const server_thread = try startServer(aa); // start the server, get it ready
while (!server_ready)
std.Thread.sleep(100);
log.debug("tid {d} (main): server reports ready", .{std.Thread.getCurrentId()});
// we aren't testing the server,
// so we'll use the arena allocator
defer server_thread.join(); // we'll be shutting everything down before we exit
// Now we need to start the lambda framework
try run(allocator, handler_fn); // We want our function under test to report leaks
return try allocator.dupe(u8, server_request_aka_lambda_response);
}
test "basic request" {
// std.testing.log_level = .debug;
const allocator = std.testing.allocator;
const request =
\\{"foo": "bar", "baz": "qux"}
;
// This is what's actually coming back. Is this right?
const expected_response =
\\{"foo": "bar", "baz": "qux"}
;
const lambda_response = try test_lambda_request(allocator, request, 1, handler);
defer deinit();
defer allocator.free(lambda_response);
try std.testing.expectEqualStrings(expected_response, lambda_response);
}
test "several requests do not fail" {
// std.testing.log_level = .debug;
const allocator = std.testing.allocator;
const request =
\\{"foo": "bar", "baz": "qux"}
;
// This is what's actually coming back. Is this right?
const expected_response =
\\{"foo": "bar", "baz": "qux"}
;
const lambda_response = try test_lambda_request(allocator, request, 5, handler);
defer deinit();
defer allocator.free(lambda_response);
try std.testing.expectEqualStrings(expected_response, lambda_response);
}

View file

@ -2,10 +2,10 @@ const std = @import("std");
const lambda = @import("lambda.zig");
pub fn main() anyerror!void {
try lambda.run(null, handler);
try lambda.run(handler);
}
fn handler(allocator: std.mem.Allocator, event_data: []const u8) ![]const u8 {
fn handler(allocator: *std.mem.Allocator, event_data: []const u8) ![]const u8 {
_ = allocator;
return event_data;
}

View file

@ -1,49 +0,0 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
const target = b.standardTargetOptions(.{});
const optimize = b.standardOptimizeOption(.{});
// Create the main module for the CLI
const main_module = b.createModule(.{
.root_source_file = b.path("src/main.zig"),
.target = target,
.optimize = optimize,
});
// Add aws dependency to the module
const aws_dep = b.dependency("aws", .{ .target = target, .optimize = optimize });
main_module.addImport("aws", aws_dep.module("aws"));
const exe = b.addExecutable(.{
.name = "lambda-build",
.root_module = main_module,
});
b.installArtifact(exe);
// Run step for testing: zig build run -- package --exe /path/to/exe --output /path/to/out.zip
const run_cmd = b.addRunArtifact(exe);
run_cmd.step.dependOn(b.getInstallStep());
if (b.args) |args|
run_cmd.addArgs(args);
const run_step = b.step("run", "Run the CLI");
run_step.dependOn(&run_cmd.step);
// Test step
const test_module = b.createModule(.{
.root_source_file = b.path("src/main.zig"),
.target = target,
.optimize = optimize,
});
test_module.addImport("aws", aws_dep.module("aws"));
const unit_tests = b.addTest(.{
.root_module = test_module,
});
const run_unit_tests = b.addRunArtifact(unit_tests);
const test_step = b.step("test", "Run unit tests");
test_step.dependOn(&run_unit_tests.step);
}

View file

@ -1,16 +0,0 @@
.{
.name = .lambda_build,
.version = "0.1.0",
.fingerprint = 0x6e61de08e7e51114,
.dependencies = .{
.aws = .{
.url = "git+https://git.lerch.org/lobo/aws-sdk-for-zig#1a03250fbeb2840ab8b6010f1ad4e899cdfc185a",
.hash = "aws-0.0.1-SbsFcCg7CgC0yYv2Y7aOjonSAU3mltOSfY0x2w9jZlMV",
},
},
.paths = .{
"build.zig",
"build.zig.zon",
"src",
},
}

View file

@ -1,195 +0,0 @@
//! Lambda build configuration types.
//!
//! These types define the JSON schema for lambda.json configuration files,
//! encompassing IAM, Lambda function, and deployment settings.
//!
//! Used by both the build system (lambdabuild.zig) and the CLI commands
//! (deploy.zig, iam.zig).
const std = @import("std");
const LambdaBuildConfig = @This();
/// Wrapper for parsed config that owns both the JSON parse result
/// and the source file data (since parsed strings point into it).
pub const Parsed = struct {
parsed: std.json.Parsed(LambdaBuildConfig),
source_data: []const u8,
allocator: std.mem.Allocator,
pub fn deinit(self: *Parsed) void {
self.parsed.deinit();
self.allocator.free(self.source_data);
}
};
// === IAM Configuration ===
/// IAM role name for the Lambda function.
role_name: []const u8 = "lambda_basic_execution",
// Future: policy_statements, trust_policy, etc.
// === Deployment Settings ===
/// AWS service principal to grant invoke permission.
/// Example: "alexa-appkit.amazon.com" for Alexa Skills.
allow_principal: ?[]const u8 = null,
// === Lambda Function Configuration ===
/// Human-readable description of the function.
description: ?[]const u8 = null,
/// Maximum execution time in seconds (1-900).
timeout: ?i64 = null,
/// Memory allocation in MB (128-10240).
memory_size: ?i64 = null,
/// KMS key ARN for environment variable encryption.
kmskey_arn: ?[]const u8 = null,
// Nested configs
vpc_config: ?VpcConfig = null,
dead_letter_config: ?DeadLetterConfig = null,
tracing_config: ?TracingConfig = null,
ephemeral_storage: ?EphemeralStorage = null,
logging_config: ?LoggingConfig = null,
// Collections
tags: ?[]const Tag = null,
layers: ?[]const []const u8 = null,
pub const VpcConfig = struct {
subnet_ids: ?[]const []const u8 = null,
security_group_ids: ?[]const []const u8 = null,
ipv6_allowed_for_dual_stack: ?bool = null,
};
pub const DeadLetterConfig = struct {
target_arn: ?[]const u8 = null,
};
pub const TracingConfig = struct {
/// "Active" or "PassThrough"
mode: ?[]const u8 = null,
};
pub const EphemeralStorage = struct {
/// Size in MB (512-10240)
size: i64,
};
pub const LoggingConfig = struct {
/// "JSON" or "Text"
log_format: ?[]const u8 = null,
/// "TRACE", "DEBUG", "INFO", "WARN", "ERROR", "FATAL"
application_log_level: ?[]const u8 = null,
system_log_level: ?[]const u8 = null,
log_group: ?[]const u8 = null,
};
pub const Tag = struct {
key: []const u8,
value: []const u8,
};
/// Validate configuration values are within AWS limits.
pub fn validate(self: LambdaBuildConfig) !void {
// Timeout: 1-900 seconds
if (self.timeout) |t| {
if (t < 1 or t > 900) {
std.log.err("Invalid timeout: {} (must be 1-900 seconds)", .{t});
return error.InvalidTimeout;
}
}
// Memory: 128-10240 MB
if (self.memory_size) |m| {
if (m < 128 or m > 10240) {
std.log.err("Invalid memory_size: {} (must be 128-10240 MB)", .{m});
return error.InvalidMemorySize;
}
}
// Ephemeral storage: 512-10240 MB
if (self.ephemeral_storage) |es| {
if (es.size < 512 or es.size > 10240) {
std.log.err("Invalid ephemeral_storage.size: {} (must be 512-10240 MB)", .{es.size});
return error.InvalidEphemeralStorage;
}
}
// Tracing mode validation
if (self.tracing_config) |tc| {
if (tc.mode) |mode| {
if (!std.mem.eql(u8, mode, "Active") and !std.mem.eql(u8, mode, "PassThrough")) {
std.log.err("Invalid tracing_config.mode: '{s}' (must be 'Active' or 'PassThrough')", .{mode});
return error.InvalidTracingMode;
}
}
}
// Log format validation
if (self.logging_config) |lc| {
if (lc.log_format) |format| {
if (!std.mem.eql(u8, format, "JSON") and !std.mem.eql(u8, format, "Text")) {
std.log.err("Invalid logging_config.log_format: '{s}' (must be 'JSON' or 'Text')", .{format});
return error.InvalidLogFormat;
}
}
}
}
/// Load configuration from a JSON file.
///
/// If is_default is true and the file doesn't exist, returns null.
/// If is_default is false (explicitly specified) and file doesn't exist, returns error.
pub fn loadFromFile(
allocator: std.mem.Allocator,
path: []const u8,
is_default: bool,
) !?Parsed {
const file = std.fs.cwd().openFile(path, .{}) catch |err| {
if (err == error.FileNotFound) {
if (is_default) {
std.log.debug("Config file '{s}' not found, using defaults", .{path});
return null;
}
std.log.err("Config file not found: {s}", .{path});
return error.ConfigFileNotFound;
}
std.log.err("Failed to open config file '{s}': {}", .{ path, err });
return error.ConfigFileOpenError;
};
defer file.close();
// Read entire file
var read_buffer: [4096]u8 = undefined;
var file_reader = file.reader(&read_buffer);
const content = file_reader.interface.allocRemaining(allocator, std.Io.Limit.limited(64 * 1024)) catch |err| {
std.log.err("Error reading config file: {}", .{err});
return error.ConfigFileReadError;
};
errdefer allocator.free(content);
// Parse JSON - strings will point into content, which we keep alive
const parsed = std.json.parseFromSlice(
LambdaBuildConfig,
allocator,
content,
.{},
) catch |err| {
std.log.err("Error parsing config JSON: {}", .{err});
return error.ConfigFileParseError;
};
errdefer parsed.deinit();
try parsed.value.validate();
return .{
.parsed = parsed,
.source_data = content,
.allocator = allocator,
};
}

View file

@ -1,726 +0,0 @@
//! Deploy command - deploys a Lambda function to AWS.
//!
//! Creates a new function or updates an existing one.
//! Supports setting environment variables via --env or --env-file.
//! Function configuration (timeout, memory, VPC, etc.) comes from --config-file.
const std = @import("std");
const aws = @import("aws");
const iam_cmd = @import("iam.zig");
const RunOptions = @import("main.zig").RunOptions;
const LambdaBuildConfig = @import("LambdaBuildConfig.zig");
// Get Lambda EnvironmentVariableKeyValue type from AWS SDK
const EnvVar = aws.services.lambda.EnvironmentVariableKeyValue;
pub fn run(args: []const []const u8, options: RunOptions) !void {
var function_name: ?[]const u8 = null;
var zip_file: ?[]const u8 = null;
var role_arn: ?[]const u8 = null;
var arch: ?[]const u8 = null;
var deploy_output: ?[]const u8 = null;
var config_file: ?[]const u8 = null;
var is_config_required = false;
// Environment variables storage
var env_vars = std.StringHashMap([]const u8).init(options.allocator);
defer {
var it = env_vars.iterator();
while (it.next()) |entry| {
options.allocator.free(entry.key_ptr.*);
options.allocator.free(entry.value_ptr.*);
}
env_vars.deinit();
}
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (std.mem.eql(u8, arg, "--function-name")) {
i += 1;
if (i >= args.len) return error.MissingFunctionName;
function_name = args[i];
} else if (std.mem.eql(u8, arg, "--zip-file")) {
i += 1;
if (i >= args.len) return error.MissingZipFile;
zip_file = args[i];
} else if (std.mem.eql(u8, arg, "--role-arn")) {
i += 1;
if (i >= args.len) return error.MissingRoleArn;
role_arn = args[i];
} else if (std.mem.eql(u8, arg, "--arch")) {
i += 1;
if (i >= args.len) return error.MissingArch;
arch = args[i];
} else if (std.mem.eql(u8, arg, "--env")) {
i += 1;
if (i >= args.len) return error.MissingEnvValue;
try parseEnvVar(args[i], &env_vars, options.allocator);
} else if (std.mem.eql(u8, arg, "--env-file")) {
i += 1;
if (i >= args.len) return error.MissingEnvFile;
try loadEnvFile(args[i], &env_vars, options.allocator);
} else if (std.mem.eql(u8, arg, "--config-file")) {
i += 1;
if (i >= args.len) return error.MissingConfigFile;
config_file = args[i];
is_config_required = true;
} else if (std.mem.eql(u8, arg, "--config-file-optional")) {
i += 1;
if (i >= args.len) return error.MissingConfigFile;
config_file = args[i];
is_config_required = false;
} else if (std.mem.eql(u8, arg, "--deploy-output")) {
i += 1;
if (i >= args.len) return error.MissingDeployOutput;
deploy_output = args[i];
} else if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) {
printHelp(options.stdout);
try options.stdout.flush();
return;
} else {
try options.stderr.print("Unknown option: {s}\n", .{arg});
try options.stderr.flush();
return error.UnknownOption;
}
}
if (function_name == null) {
try options.stderr.print("Error: --function-name is required\n", .{});
printHelp(options.stderr);
try options.stderr.flush();
return error.MissingFunctionName;
}
if (zip_file == null) {
try options.stderr.print("Error: --zip-file is required\n", .{});
printHelp(options.stderr);
try options.stderr.flush();
return error.MissingZipFile;
}
// Load config file if provided
var parsed_config = if (config_file) |path|
try LambdaBuildConfig.loadFromFile(options.allocator, path, !is_config_required)
else
null;
defer if (parsed_config) |*pc| pc.deinit();
try deployFunction(.{
.function_name = function_name.?,
.zip_file = zip_file.?,
.role_arn = role_arn,
.arch = arch,
.env_vars = if (env_vars.count() > 0) &env_vars else null,
.deploy_output = deploy_output,
.config = if (parsed_config) |pc| &pc.parsed.value else null,
}, options);
}
/// Parse a KEY=VALUE string and add to the env vars map
fn parseEnvVar(
env_str: []const u8,
env_vars: *std.StringHashMap([]const u8),
allocator: std.mem.Allocator,
) !void {
const eq_pos = std.mem.indexOf(u8, env_str, "=") orelse {
return error.InvalidEnvFormat;
};
const key = try allocator.dupe(u8, env_str[0..eq_pos]);
errdefer allocator.free(key);
const value = try allocator.dupe(u8, env_str[eq_pos + 1 ..]);
errdefer allocator.free(value);
// If key already exists, free the old value
if (env_vars.fetchRemove(key)) |old| {
allocator.free(old.key);
allocator.free(old.value);
}
try env_vars.put(key, value);
}
/// Load environment variables from a file (KEY=VALUE format, one per line)
fn loadEnvFile(
path: []const u8,
env_vars: *std.StringHashMap([]const u8),
allocator: std.mem.Allocator,
) !void {
const file = std.fs.cwd().openFile(path, .{}) catch |err| {
if (err == error.FileNotFound) {
std.log.info("Env file '{s}' not found, skipping", .{path});
return;
}
std.log.err("Failed to open env file '{s}': {}", .{ path, err });
return error.EnvFileOpenError;
};
defer file.close();
// Read entire file (env files are typically small)
// SAFETY: set on read
var read_buffer: [4096]u8 = undefined;
var file_reader = file.reader(&read_buffer);
const content = file_reader.interface.allocRemaining(allocator, std.Io.Limit.limited(64 * 1024)) catch |err| {
std.log.err("Error reading env file: {}", .{err});
return error.EnvFileReadError;
};
defer allocator.free(content);
// Parse line by line
var line_start: usize = 0;
for (content, 0..) |c, idx| {
if (c == '\n') {
const line = content[line_start..idx];
line_start = idx + 1;
// Skip empty lines and comments
const trimmed = std.mem.trim(u8, line, " \t\r");
if (trimmed.len == 0 or trimmed[0] == '#') continue;
try parseEnvVar(trimmed, env_vars, allocator);
}
}
// Handle last line if no trailing newline
if (line_start < content.len) {
const line = content[line_start..];
const trimmed = std.mem.trim(u8, line, " \t\r");
if (trimmed.len > 0 and trimmed[0] != '#') {
try parseEnvVar(trimmed, env_vars, allocator);
}
}
}
fn printHelp(writer: anytype) void {
writer.print(
\\Usage: lambda-build deploy [options]
\\
\\Deploy a Lambda function to AWS.
\\
\\Options:
\\ --function-name <name> Name of the Lambda function (required)
\\ --zip-file <path> Path to the deployment zip (required)
\\ --role-arn <arn> IAM role ARN (optional - creates role if omitted)
\\ --arch <arch> Architecture: x86_64 or aarch64 (default: x86_64)
\\ --env <KEY=VALUE> Set environment variable (can be repeated)
\\ --env-file <path> Load environment variables from file
\\ --config-file <path> Path to JSON config file (required, error if missing)
\\ --config-file-optional <path> Path to JSON config file (optional, use defaults if missing)
\\ --deploy-output <path> Write deployment info to JSON file
\\ --help, -h Show this help message
\\
\\Config File:
\\ The config file specifies function settings:
\\ {{
\\ "role_name": "my_lambda_role",
\\ "timeout": 30,
\\ "memory_size": 512,
\\ "allow_principal": "alexa-appkit.amazon.com",
\\ "description": "My function",
\\ "tags": [{{ "key": "Env", "value": "prod" }}]
\\ }}
\\
\\Environment File Format:
\\ The --env-file option reads a file with KEY=VALUE pairs, one per line.
\\ Lines starting with # are treated as comments. Empty lines are ignored.
\\
\\ Example .env file:
\\ # Database configuration
\\ DB_HOST=localhost
\\ DB_PORT=5432
\\
\\If the function exists, its code is updated. Otherwise, a new function
\\is created with the provided configuration.
\\
, .{}) catch {};
}
const DeployOptions = struct {
function_name: []const u8,
zip_file: []const u8,
role_arn: ?[]const u8,
arch: ?[]const u8,
env_vars: ?*const std.StringHashMap([]const u8),
deploy_output: ?[]const u8,
config: ?*const LambdaBuildConfig,
};
fn deployFunction(deploy_opts: DeployOptions, options: RunOptions) !void {
// Validate architecture
const arch_str = deploy_opts.arch orelse "x86_64";
if (!std.mem.eql(u8, arch_str, "x86_64") and !std.mem.eql(u8, arch_str, "aarch64") and !std.mem.eql(u8, arch_str, "arm64")) {
return error.InvalidArchitecture;
}
// Get role_name from config or use default
const role_name = if (deploy_opts.config) |c| c.role_name else "lambda_basic_execution";
// Get or create IAM role if not provided
const role_arn = if (deploy_opts.role_arn) |r|
try options.allocator.dupe(u8, r)
else
try iam_cmd.getOrCreateRole(role_name, options);
defer options.allocator.free(role_arn);
// Read the zip file and encode as base64
const zip_file = try std.fs.cwd().openFile(deploy_opts.zip_file, .{});
defer zip_file.close();
// SAFETY: set on read
var read_buffer: [4096]u8 = undefined;
var file_reader = zip_file.reader(&read_buffer);
const zip_data = try file_reader.interface.allocRemaining(options.allocator, std.Io.Limit.limited(50 * 1024 * 1024));
defer options.allocator.free(zip_data);
const base64_data = try std.fmt.allocPrint(options.allocator, "{b64}", .{zip_data});
defer options.allocator.free(base64_data);
const services = aws.Services(.{.lambda}){};
// Convert arch string to Lambda format
const lambda_arch: []const u8 = if (std.mem.eql(u8, arch_str, "aarch64") or std.mem.eql(u8, arch_str, "arm64"))
"arm64"
else
"x86_64";
// Use a mutable array so the slice type is [][]const u8, not []const []const u8
var architectures_arr = [_][]const u8{lambda_arch};
const architectures: [][]const u8 = &architectures_arr;
// Build environment variables for AWS API
const env_variables = try buildEnvVariables(deploy_opts.env_vars, options.allocator);
defer if (env_variables) |vars| {
for (vars) |v| {
options.allocator.free(v.key);
if (v.value) |val| options.allocator.free(val);
}
options.allocator.free(vars);
};
// Build config-based parameters
const config = deploy_opts.config;
// Build tags array if present in config
const tags = if (config) |c| if (c.tags) |t| blk: {
var tag_arr = try options.allocator.alloc(aws.services.lambda.TagKeyValue, t.len);
for (t, 0..) |tag, idx| {
tag_arr[idx] = .{ .key = tag.key, .value = tag.value };
}
break :blk tag_arr;
} else null else null;
defer if (tags) |t| options.allocator.free(t);
// Build VPC config if present
const vpc_config: ?aws.services.lambda.VpcConfig = if (config) |c| if (c.vpc_config) |vc|
.{
.subnet_ids = if (vc.subnet_ids) |ids| @constCast(ids) else null,
.security_group_ids = if (vc.security_group_ids) |ids| @constCast(ids) else null,
.ipv6_allowed_for_dual_stack = vc.ipv6_allowed_for_dual_stack,
}
else
null else null;
// Build dead letter config if present
const dead_letter_config: ?aws.services.lambda.DeadLetterConfig = if (config) |c| if (c.dead_letter_config) |dlc|
.{ .target_arn = dlc.target_arn }
else
null else null;
// Build tracing config if present
const tracing_config: ?aws.services.lambda.TracingConfig = if (config) |c| if (c.tracing_config) |tc|
.{ .mode = tc.mode }
else
null else null;
// Build ephemeral storage if present
const ephemeral_storage: ?aws.services.lambda.EphemeralStorage = if (config) |c| if (c.ephemeral_storage) |es|
.{ .size = es.size }
else
null else null;
// Build logging config if present
const logging_config: ?aws.services.lambda.LoggingConfig = if (config) |c| if (c.logging_config) |lc|
.{
.log_format = lc.log_format,
.application_log_level = lc.application_log_level,
.system_log_level = lc.system_log_level,
.log_group = lc.log_group,
}
else
null else null;
// Try to create the function first - if it already exists, we'll update it
std.log.info("Attempting to create function: {s}", .{deploy_opts.function_name});
var create_diagnostics = aws.Diagnostics{
// SAFETY: set by sdk on error
.response_status = undefined,
// SAFETY: set by sdk on error
.response_body = undefined,
.allocator = options.allocator,
};
// Use the shared aws_options but add diagnostics for create call
var create_options = options.aws_options;
create_options.diagnostics = &create_diagnostics;
// Track the function ARN from whichever path succeeds
var function_arn: ?[]const u8 = null;
defer if (function_arn) |arn| options.allocator.free(arn);
const create_result = aws.Request(services.lambda.create_function).call(.{
.function_name = deploy_opts.function_name,
.architectures = architectures,
.code = .{ .zip_file = base64_data },
.handler = "bootstrap",
.package_type = "Zip",
.runtime = "provided.al2023",
.role = role_arn,
.environment = if (env_variables) |vars| .{ .variables = vars } else null,
// Config-based parameters
.description = if (config) |c| c.description else null,
.timeout = if (config) |c| c.timeout else null,
.memory_size = if (config) |c| c.memory_size else null,
.kmskey_arn = if (config) |c| c.kmskey_arn else null,
.vpc_config = vpc_config,
.dead_letter_config = dead_letter_config,
.tracing_config = tracing_config,
.ephemeral_storage = ephemeral_storage,
.logging_config = logging_config,
.tags = tags,
.layers = if (config) |c| if (c.layers) |l| @constCast(l) else null else null,
}, create_options) catch |err| {
defer create_diagnostics.deinit();
// Function already exists (409 Conflict) - update it instead
if (create_diagnostics.response_status == .conflict) {
std.log.info("Function already exists, updating: {s}", .{deploy_opts.function_name});
const update_result = try aws.Request(services.lambda.update_function_code).call(.{
.function_name = deploy_opts.function_name,
.architectures = architectures,
.zip_file = base64_data,
}, options.aws_options);
defer update_result.deinit();
try options.stdout.print("Updated function: {s}\n", .{deploy_opts.function_name});
if (update_result.response.function_arn) |arn| {
try options.stdout.print("ARN: {s}\n", .{arn});
function_arn = try options.allocator.dupe(u8, arn);
}
try options.stdout.flush();
// Wait for function to be ready before updating configuration
try waitForFunctionReady(deploy_opts.function_name, options);
// Update function configuration if we have config or env variables
if (config != null or env_variables != null)
try updateFunctionConfiguration(
deploy_opts.function_name,
env_variables,
config,
options,
);
// Add invoke permission if requested
if (config) |c|
if (c.allow_principal) |principal|
try addPermission(deploy_opts.function_name, principal, options);
// Write deploy output if requested
if (deploy_opts.deploy_output) |output_path|
try writeDeployOutput(output_path, function_arn.?, role_arn, lambda_arch, deploy_opts.env_vars);
return;
}
std.log.err(
"Lambda CreateFunction failed: {} (HTTP Response code {})",
.{ err, create_diagnostics.response_status },
);
return error.LambdaCreateFunctionFailed;
};
defer create_result.deinit();
try options.stdout.print("Created function: {s}\n", .{deploy_opts.function_name});
if (create_result.response.function_arn) |arn| {
try options.stdout.print("ARN: {s}\n", .{arn});
function_arn = try options.allocator.dupe(u8, arn);
}
try options.stdout.flush();
// Wait for function to be ready before returning
try waitForFunctionReady(deploy_opts.function_name, options);
// Add invoke permission if requested
if (config) |c|
if (c.allow_principal) |principal|
try addPermission(deploy_opts.function_name, principal, options);
// Write deploy output if requested
if (deploy_opts.deploy_output) |output_path|
try writeDeployOutput(output_path, function_arn.?, role_arn, lambda_arch, deploy_opts.env_vars);
}
/// Build environment variables in the format expected by AWS Lambda API
fn buildEnvVariables(
env_vars: ?*const std.StringHashMap([]const u8),
allocator: std.mem.Allocator,
) !?[]EnvVar {
const vars = env_vars orelse return null;
if (vars.count() == 0) return null;
var result = try allocator.alloc(EnvVar, vars.count());
errdefer allocator.free(result);
var idx: usize = 0;
var it = vars.iterator();
while (it.next()) |entry| {
result[idx] = .{
.key = try allocator.dupe(u8, entry.key_ptr.*),
.value = try allocator.dupe(u8, entry.value_ptr.*),
};
idx += 1;
}
return result;
}
/// Update function configuration (environment variables and config settings)
fn updateFunctionConfiguration(
function_name: []const u8,
env_variables: ?[]EnvVar,
config: ?*const LambdaBuildConfig,
options: RunOptions,
) !void {
const services = aws.Services(.{.lambda}){};
std.log.info("Updating function configuration for: {s}", .{function_name});
// Build VPC config if present
const vpc_config: ?aws.services.lambda.VpcConfig = if (config) |c| if (c.vpc_config) |vc|
.{
.subnet_ids = if (vc.subnet_ids) |ids| @constCast(ids) else null,
.security_group_ids = if (vc.security_group_ids) |ids| @constCast(ids) else null,
.ipv6_allowed_for_dual_stack = vc.ipv6_allowed_for_dual_stack,
}
else
null else null;
// Build dead letter config if present
const dead_letter_config: ?aws.services.lambda.DeadLetterConfig = if (config) |c| if (c.dead_letter_config) |dlc|
.{ .target_arn = dlc.target_arn }
else
null else null;
// Build tracing config if present
const tracing_config: ?aws.services.lambda.TracingConfig = if (config) |c| if (c.tracing_config) |tc|
.{ .mode = tc.mode }
else
null else null;
// Build ephemeral storage if present
const ephemeral_storage: ?aws.services.lambda.EphemeralStorage = if (config) |c| if (c.ephemeral_storage) |es|
.{ .size = es.size }
else
null else null;
// Build logging config if present
const logging_config: ?aws.services.lambda.LoggingConfig = if (config) |c| if (c.logging_config) |lc|
.{
.log_format = lc.log_format,
.application_log_level = lc.application_log_level,
.system_log_level = lc.system_log_level,
.log_group = lc.log_group,
}
else
null else null;
const update_config_result = try aws.Request(services.lambda.update_function_configuration).call(.{
.function_name = function_name,
.environment = if (env_variables) |vars| .{ .variables = vars } else null,
// Config-based parameters
.description = if (config) |c| c.description else null,
.timeout = if (config) |c| c.timeout else null,
.memory_size = if (config) |c| c.memory_size else null,
.kmskey_arn = if (config) |c| c.kmskey_arn else null,
.vpc_config = vpc_config,
.dead_letter_config = dead_letter_config,
.tracing_config = tracing_config,
.ephemeral_storage = ephemeral_storage,
.logging_config = logging_config,
.layers = if (config) |c| if (c.layers) |l| @constCast(l) else null else null,
}, options.aws_options);
defer update_config_result.deinit();
try options.stdout.print("Updated function configuration\n", .{});
try options.stdout.flush();
// Wait for configuration update to complete
try waitForFunctionReady(function_name, options);
}
fn waitForFunctionReady(function_name: []const u8, options: RunOptions) !void {
const services = aws.Services(.{.lambda}){};
var retries: usize = 30; // Up to ~6 seconds total
while (retries > 0) : (retries -= 1) {
const result = aws.Request(services.lambda.get_function).call(.{
.function_name = function_name,
}, options.aws_options) catch |err| {
// Function should exist at this point, but retry on transient errors
std.log.warn("GetFunction failed during wait: {}", .{err});
std.Thread.sleep(200 * std.time.ns_per_ms);
continue;
};
defer result.deinit();
// Check if function is ready
if (result.response.configuration) |cfg| {
if (cfg.last_update_status) |status| {
if (std.mem.eql(u8, status, "Successful")) {
std.log.debug("Function is ready", .{});
return;
} else if (std.mem.eql(u8, status, "Failed")) {
return error.FunctionUpdateFailed;
}
// "InProgress" - keep waiting
} else return; // No status means it's ready
} else return; // No configuration means we can't check, assume ready
std.Thread.sleep(200 * std.time.ns_per_ms);
}
return error.FunctionNotReady;
}
/// Add invoke permission for a service principal
fn addPermission(
function_name: []const u8,
principal: []const u8,
options: RunOptions,
) !void {
const services = aws.Services(.{.lambda}){};
// Generate statement ID from principal: "alexa-appkit.amazon.com" -> "allow-alexa-appkit-amazon-com"
// SAFETY: set on write
var statement_id_buf: [128]u8 = undefined;
var statement_id_len: usize = 0;
// Add "allow-" prefix
const prefix = "allow-";
@memcpy(statement_id_buf[0..prefix.len], prefix);
statement_id_len = prefix.len;
// Sanitize principal: replace dots with dashes
for (principal) |c| {
if (statement_id_len >= statement_id_buf.len - 1) break;
statement_id_buf[statement_id_len] = if (c == '.') '-' else c;
statement_id_len += 1;
}
const statement_id = statement_id_buf[0..statement_id_len];
std.log.info("Adding invoke permission for principal: {s}", .{principal});
var diagnostics = aws.Diagnostics{
// SAFETY: set by sdk on error
.response_status = undefined,
// SAFETY: set by sdk on error
.response_body = undefined,
.allocator = options.allocator,
};
var add_perm_options = options.aws_options;
add_perm_options.diagnostics = &diagnostics;
const result = aws.Request(services.lambda.add_permission).call(.{
.function_name = function_name,
.statement_id = statement_id,
.action = "lambda:InvokeFunction",
.principal = principal,
}, add_perm_options) catch |err| {
defer diagnostics.deinit();
// 409 Conflict means permission already exists - that's fine
if (diagnostics.response_status == .conflict) {
std.log.info("Permission already exists for: {s}", .{principal});
try options.stdout.print("Permission already exists for: {s}\n", .{principal});
try options.stdout.flush();
return;
}
std.log.err(
"AddPermission failed: {} (HTTP Response code {})",
.{ err, diagnostics.response_status },
);
return error.AddPermissionFailed;
};
defer result.deinit();
try options.stdout.print("Added invoke permission for: {s}\n", .{principal});
try options.stdout.flush();
}
/// Write deployment information to a JSON file
fn writeDeployOutput(
output_path: []const u8,
function_arn: []const u8,
role_arn: []const u8,
architecture: []const u8,
env_vars: ?*const std.StringHashMap([]const u8),
) !void {
// Parse ARN to extract components
// ARN format: arn:{partition}:lambda:{region}:{account_id}:function:{name}
var arn_parts = std.mem.splitScalar(u8, function_arn, ':');
_ = arn_parts.next(); // arn
const partition = arn_parts.next() orelse return error.InvalidArn;
_ = arn_parts.next(); // lambda
const region = arn_parts.next() orelse return error.InvalidArn;
const account_id = arn_parts.next() orelse return error.InvalidArn;
_ = arn_parts.next(); // function
const fn_name = arn_parts.next() orelse return error.InvalidArn;
const file = try std.fs.cwd().createFile(output_path, .{});
defer file.close();
// SAFETY: set on write
var write_buffer: [4096]u8 = undefined;
var buffered = file.writer(&write_buffer);
const writer = &buffered.interface;
try writer.print(
\\{{
\\ "arn": "{s}",
\\ "function_name": "{s}",
\\ "partition": "{s}",
\\ "region": "{s}",
\\ "account_id": "{s}",
\\ "role_arn": "{s}",
\\ "architecture": "{s}",
\\ "environment_keys": [
, .{ function_arn, fn_name, partition, region, account_id, role_arn, architecture });
// Write environment variable keys
if (env_vars) |vars| {
var it = vars.keyIterator();
var first = true;
while (it.next()) |key| {
if (!first) {
try writer.writeAll(",");
}
try writer.print("\n \"{s}\"", .{key.*});
first = false;
}
}
try writer.writeAll(
\\
\\ ]
\\}
\\
);
try writer.flush();
std.log.info("Wrote deployment info to: {s}", .{output_path});
}

View file

@ -1,161 +0,0 @@
//! IAM command - creates or retrieves an IAM role for Lambda execution.
const std = @import("std");
const aws = @import("aws");
const RunOptions = @import("main.zig").RunOptions;
const LambdaBuildConfig = @import("LambdaBuildConfig.zig");
pub fn run(args: []const []const u8, options: RunOptions) !void {
var config_file: ?[]const u8 = null;
var is_config_required = false;
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (std.mem.eql(u8, arg, "--config-file")) {
i += 1;
if (i >= args.len) return error.MissingConfigFile;
config_file = args[i];
is_config_required = true;
} else if (std.mem.eql(u8, arg, "--config-file-optional")) {
i += 1;
if (i >= args.len) return error.MissingConfigFile;
config_file = args[i];
is_config_required = false;
} else if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) {
printHelp(options.stdout);
try options.stdout.flush();
return;
} else {
try options.stderr.print("Unknown option: {s}\n", .{arg});
try options.stderr.flush();
return error.UnknownOption;
}
}
// Load config file if provided
var parsed_config = if (config_file) |path|
try LambdaBuildConfig.loadFromFile(options.allocator, path, !is_config_required)
else
null;
defer if (parsed_config) |*pc| pc.deinit();
// Get role_name from config or use default
const role_name = if (parsed_config) |pc|
pc.parsed.value.role_name
else
"lambda_basic_execution";
const arn = try getOrCreateRole(role_name, options);
defer options.allocator.free(arn);
try options.stdout.print("{s}\n", .{arn});
try options.stdout.flush();
}
fn printHelp(writer: anytype) void {
writer.print(
\\Usage: lambda-build iam [options]
\\
\\Create or retrieve an IAM role for Lambda execution.
\\
\\Options:
\\ --config-file <path> Path to JSON config file (required, error if missing)
\\ --config-file-optional <path> Path to JSON config file (optional, use defaults if missing)
\\ --help, -h Show this help message
\\
\\Config File:
\\ The config file can specify the IAM role name:
\\ {{
\\ "role_name": "my_lambda_role"
\\ }}
\\
\\If no config file is provided, uses "lambda_basic_execution" as the role name.
\\If the role exists, its ARN is returned. If not, a new role is created
\\with the AWSLambdaExecute policy attached.
\\
, .{}) catch {};
}
/// Get or create an IAM role for Lambda execution
/// Returns the role ARN
pub fn getOrCreateRole(role_name: []const u8, options: RunOptions) ![]const u8 {
const services = aws.Services(.{.iam}){};
var diagnostics = aws.Diagnostics{
// SAFETY: set by sdk on error
.response_status = undefined,
// SAFETY: set by sdk on error
.response_body = undefined,
.allocator = options.allocator,
};
// Use the shared aws_options but add diagnostics for this call
var aws_options = options.aws_options;
aws_options.diagnostics = &diagnostics;
defer aws_options.diagnostics = null;
const get_result = aws.Request(services.iam.get_role).call(.{
.role_name = role_name,
}, aws_options) catch |err| {
defer diagnostics.deinit();
// Check for "not found" via HTTP status or error response body
if (diagnostics.response_status == .not_found or
std.mem.indexOf(u8, diagnostics.response_body, "NoSuchEntity") != null)
// Role doesn't exist, create it
return try createRole(role_name, options);
std.log.err("IAM GetRole failed: {} (HTTP {})", .{ err, diagnostics.response_status });
return error.IamGetRoleFailed;
};
defer get_result.deinit();
// Role exists, return ARN
return try options.allocator.dupe(u8, get_result.response.role.arn);
}
fn createRole(role_name: []const u8, options: RunOptions) ![]const u8 {
const services = aws.Services(.{.iam}){};
const assume_role_policy =
\\{
\\ "Version": "2012-10-17",
\\ "Statement": [
\\ {
\\ "Sid": "",
\\ "Effect": "Allow",
\\ "Principal": {
\\ "Service": "lambda.amazonaws.com"
\\ },
\\ "Action": "sts:AssumeRole"
\\ }
\\ ]
\\}
;
std.log.info("Creating IAM role: {s}", .{role_name});
const create_result = try aws.Request(services.iam.create_role).call(.{
.role_name = role_name,
.assume_role_policy_document = assume_role_policy,
}, options.aws_options);
defer create_result.deinit();
const arn = try options.allocator.dupe(u8, create_result.response.role.arn);
// Attach the Lambda execution policy
std.log.info("Attaching AWSLambdaExecute policy", .{});
const attach_result = try aws.Request(services.iam.attach_role_policy).call(.{
.policy_arn = "arn:aws:iam::aws:policy/AWSLambdaExecute",
.role_name = role_name,
}, options.aws_options);
defer attach_result.deinit();
// IAM role creation can take a moment to propagate
std.log.info("Role created: {s}", .{arn});
std.log.info("Note: New roles may take a few seconds to propagate", .{});
return arn;
}

View file

@ -1,93 +0,0 @@
//! Invoke command - invokes a Lambda function.
const std = @import("std");
const aws = @import("aws");
const RunOptions = @import("main.zig").RunOptions;
pub fn run(args: []const []const u8, options: RunOptions) !void {
var function_name: ?[]const u8 = null;
var payload: []const u8 = "{}";
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (std.mem.eql(u8, arg, "--function-name")) {
i += 1;
if (i >= args.len) return error.MissingFunctionName;
function_name = args[i];
} else if (std.mem.eql(u8, arg, "--payload")) {
i += 1;
if (i >= args.len) return error.MissingPayload;
payload = args[i];
} else if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) {
printHelp(options.stdout);
try options.stdout.flush();
return;
} else {
try options.stderr.print("Unknown option: {s}\n", .{arg});
try options.stderr.flush();
return error.UnknownOption;
}
}
if (function_name == null) {
try options.stderr.print("Error: --function-name is required\n", .{});
printHelp(options.stderr);
try options.stderr.flush();
return error.MissingFunctionName;
}
try invokeFunction(function_name.?, payload, options);
}
fn printHelp(writer: *std.Io.Writer) void {
writer.print(
\\Usage: lambda-build invoke [options]
\\
\\Invoke a Lambda function.
\\
\\Options:
\\ --function-name <name> Name of the Lambda function (required)
\\ --payload <json> JSON payload to send (default: empty object)
\\ --help, -h Show this help message
\\
\\The function response is printed to stdout.
\\
, .{}) catch {};
}
fn invokeFunction(function_name: []const u8, payload: []const u8, options: RunOptions) !void {
const services = aws.Services(.{.lambda}){};
std.log.info("Invoking function: {s}", .{function_name});
const result = try aws.Request(services.lambda.invoke).call(.{
.function_name = function_name,
.payload = payload,
.log_type = "Tail",
.invocation_type = "RequestResponse",
}, options.aws_options);
defer result.deinit();
// Print response payload
if (result.response.payload) |response_payload| {
try options.stdout.print("{s}\n", .{response_payload});
}
// Print function error if any
if (result.response.function_error) |func_error| {
try options.stdout.print("Function error: {s}\n", .{func_error});
}
// Print logs if available (base64 decoded)
if (result.response.log_result) |log_result| {
const decoder = std.base64.standard.Decoder;
const decoded_len = try decoder.calcSizeForSlice(log_result);
const decoded = try options.allocator.alloc(u8, decoded_len);
defer options.allocator.free(decoded);
try decoder.decode(decoded, log_result);
try options.stdout.print("\n--- Logs ---\n{s}\n", .{decoded});
}
try options.stdout.flush();
}

View file

@ -1,155 +0,0 @@
//! Lambda Build CLI
//!
//! A command-line tool for packaging, deploying, and invoking AWS Lambda functions.
//!
//! Usage: lambda-build <command> [options]
//!
//! Commands:
//! package Create deployment zip from executable
//! iam Create/verify IAM role for Lambda
//! deploy Deploy function to AWS Lambda
//! invoke Invoke the deployed function
const std = @import("std");
const aws = @import("aws");
const package = @import("package.zig");
const iam_cmd = @import("iam.zig");
const deploy_cmd = @import("deploy.zig");
const invoke_cmd = @import("invoke.zig");
/// Options passed to all commands
pub const RunOptions = struct {
allocator: std.mem.Allocator,
stdout: *std.Io.Writer,
stderr: *std.Io.Writer,
region: []const u8,
aws_options: aws.Options,
};
pub fn main() !u8 {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
var stdout_buffer: [4096]u8 = undefined;
var stderr_buffer: [4096]u8 = undefined;
var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer);
var stderr_writer = std.fs.File.stderr().writer(&stderr_buffer);
run(allocator, &stdout_writer.interface, &stderr_writer.interface) catch |err| {
stderr_writer.interface.print("Error: {}\n", .{err}) catch {};
try stderr_writer.interface.flush();
return 1;
};
try stderr_writer.interface.flush();
try stdout_writer.interface.flush();
return 0;
}
fn run(allocator: std.mem.Allocator, stdout: *std.Io.Writer, stderr: *std.Io.Writer) !void {
const args = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, args);
if (args.len < 2) {
printUsage(stderr);
try stderr.flush();
return error.MissingCommand;
}
// Parse global options and find command
var cmd_start: usize = 1;
var region: []const u8 = "us-east-1";
var profile: ?[]const u8 = null;
while (cmd_start < args.len) {
const arg = args[cmd_start];
if (std.mem.eql(u8, arg, "--region")) {
cmd_start += 1;
if (cmd_start >= args.len) return error.MissingRegionValue;
region = args[cmd_start];
cmd_start += 1;
} else if (std.mem.eql(u8, arg, "--profile")) {
cmd_start += 1;
if (cmd_start >= args.len) return error.MissingProfileValue;
profile = args[cmd_start];
cmd_start += 1;
} else if (std.mem.startsWith(u8, arg, "--")) {
// Unknown global option - might be command-specific, let command handle it
break;
} else {
// Found command
break;
}
}
if (cmd_start >= args.len) {
printUsage(stderr);
try stderr.flush();
return error.MissingCommand;
}
// Create AWS client and options once, used by all commands
var client = aws.Client.init(allocator, .{});
defer client.deinit();
const aws_options = aws.Options{
.client = client,
.region = region,
.credential_options = .{
.profile = .{
.profile_name = profile,
.prefer_profile_from_file = profile != null,
},
},
};
const options = RunOptions{
.allocator = allocator,
.stdout = stdout,
.stderr = stderr,
.region = region,
.aws_options = aws_options,
};
const command = args[cmd_start];
const cmd_args = args[cmd_start + 1 ..];
if (std.mem.eql(u8, command, "package")) {
try package.run(cmd_args, options);
} else if (std.mem.eql(u8, command, "iam")) {
try iam_cmd.run(cmd_args, options);
} else if (std.mem.eql(u8, command, "deploy")) {
try deploy_cmd.run(cmd_args, options);
} else if (std.mem.eql(u8, command, "invoke")) {
try invoke_cmd.run(cmd_args, options);
} else if (std.mem.eql(u8, command, "--help") or std.mem.eql(u8, command, "-h")) {
printUsage(stdout);
try stdout.flush();
} else {
stderr.print("Unknown command: {s}\n\n", .{command}) catch {};
printUsage(stderr);
try stderr.flush();
return error.UnknownCommand;
}
}
fn printUsage(writer: *std.Io.Writer) void {
writer.print(
\\Usage: lambda-build [global-options] <command> [options]
\\
\\Lambda deployment CLI tool
\\
\\Global Options:
\\ --region <region> AWS region (default: us-east-1)
\\ --profile <profile> AWS profile to use
\\
\\Commands:
\\ package Create deployment zip from executable
\\ iam Create/verify IAM role for Lambda
\\ deploy Deploy function to AWS Lambda
\\ invoke Invoke the deployed function
\\
\\Run 'lambda-build <command> --help' for command-specific options.
\\
, .{}) catch {};
}

View file

@ -1,265 +0,0 @@
//! Package command - creates a Lambda deployment zip from an executable.
//!
//! The zip file contains a single file named "bootstrap" (Lambda's expected name
//! for custom runtime executables).
//!
//! Note: Uses "store" (uncompressed) format because Zig 0.15's std.compress.flate.Compress
//! has incomplete implementation (drain function panics with TODO). When the compression
//! implementation is completed, this should use deflate level 6.
const std = @import("std");
const zip = std.zip;
const RunOptions = @import("main.zig").RunOptions;
pub fn run(args: []const []const u8, options: RunOptions) !void {
var exe_path: ?[]const u8 = null;
var output_path: ?[]const u8 = null;
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (std.mem.eql(u8, arg, "--exe")) {
i += 1;
if (i >= args.len) return error.MissingExePath;
exe_path = args[i];
} else if (std.mem.eql(u8, arg, "--output") or std.mem.eql(u8, arg, "-o")) {
i += 1;
if (i >= args.len) return error.MissingOutputPath;
output_path = args[i];
} else if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) {
printHelp(options.stdout);
try options.stdout.flush();
return;
} else {
try options.stderr.print("Unknown option: {s}\n", .{arg});
try options.stderr.flush();
return error.UnknownOption;
}
}
if (exe_path == null) {
try options.stderr.print("Error: --exe is required\n", .{});
printHelp(options.stderr);
try options.stderr.flush();
return error.MissingExePath;
}
if (output_path == null) {
try options.stderr.print("Error: --output is required\n", .{});
printHelp(options.stderr);
try options.stderr.flush();
return error.MissingOutputPath;
}
try createLambdaZip(options.allocator, exe_path.?, output_path.?);
try options.stdout.print("Created {s}\n", .{output_path.?});
}
fn printHelp(writer: *std.Io.Writer) void {
writer.print(
\\Usage: lambda-build package [options]
\\
\\Create a Lambda deployment zip from an executable.
\\
\\Options:
\\ --exe <path> Path to the executable (required)
\\ --output, -o <path> Output zip file path (required)
\\ --help, -h Show this help message
\\
\\The executable will be packaged as 'bootstrap' in the zip file,
\\which is the expected name for Lambda custom runtimes.
\\
, .{}) catch {};
}
/// Helper to write a little-endian u16
fn writeU16LE(file: std.fs.File, value: u16) !void {
const bytes = std.mem.toBytes(std.mem.nativeToLittle(u16, value));
try file.writeAll(&bytes);
}
/// Helper to write a little-endian u32
fn writeU32LE(file: std.fs.File, value: u32) !void {
const bytes = std.mem.toBytes(std.mem.nativeToLittle(u32, value));
try file.writeAll(&bytes);
}
/// Create a Lambda deployment zip file containing a single "bootstrap" executable.
/// Currently uses "store" (uncompressed) format because Zig 0.15's std.compress.flate.Compress
/// has incomplete implementation.
/// TODO: Add deflate compression (level 6) when the Compress implementation is completed.
fn createLambdaZip(allocator: std.mem.Allocator, exe_path: []const u8, output_path: []const u8) !void {
// Read the executable
const exe_file = try std.fs.cwd().openFile(exe_path, .{});
defer exe_file.close();
const exe_stat = try exe_file.stat();
const exe_size: u32 = @intCast(exe_stat.size);
// Allocate buffer and read file contents
const exe_data = try allocator.alloc(u8, exe_size);
defer allocator.free(exe_data);
const bytes_read = try exe_file.readAll(exe_data);
if (bytes_read != exe_size) return error.IncompleteRead;
// Calculate CRC32 of uncompressed data
const crc = std.hash.crc.Crc32IsoHdlc.hash(exe_data);
// Create the output file
const out_file = try std.fs.cwd().createFile(output_path, .{});
defer out_file.close();
const filename = "bootstrap";
const filename_len: u16 = @intCast(filename.len);
// Reproducible zip files: use fixed timestamp
// September 26, 1995 at midnight (00:00:00)
// DOS time format: bits 0-4: seconds/2, bits 5-10: minute, bits 11-15: hour
// DOS date format: bits 0-4: day, bits 5-8: month, bits 9-15: year-1980
//
// Note: We use a fixed timestamp for reproducible builds.
//
// If current time is needed in the future:
// const now = std.time.timestamp();
// const epoch_secs: std.time.epoch.EpochSeconds = .{ .secs = @intCast(now) };
// const day_secs = epoch_secs.getDaySeconds();
// const year_day = epoch_secs.getEpochDay().calculateYearDay();
// const mod_time: u16 = @as(u16, day_secs.getHoursIntoDay()) << 11 |
// @as(u16, day_secs.getMinutesIntoHour()) << 5 |
// @as(u16, day_secs.getSecondsIntoMinute() / 2);
// const month_day = year_day.calculateMonthDay();
// const mod_date: u16 = @as(u16, year_day.year -% 1980) << 9 |
// @as(u16, @intFromEnum(month_day.month)) << 5 |
// @as(u16, month_day.day_index + 1);
// 1995-09-26 midnight for reproducible builds
const mod_time: u16 = 0x0000; // 00:00:00
const mod_date: u16 = (15 << 9) | (9 << 5) | 26; // 1995-09-26 (year 15 = 1995-1980)
// Local file header
try out_file.writeAll(&zip.local_file_header_sig);
try writeU16LE(out_file, 10); // version needed (1.0 for store)
try writeU16LE(out_file, 0); // general purpose flags
try writeU16LE(out_file, @intFromEnum(zip.CompressionMethod.store)); // store (no compression)
try writeU16LE(out_file, mod_time);
try writeU16LE(out_file, mod_date);
try writeU32LE(out_file, crc);
try writeU32LE(out_file, exe_size); // compressed size = uncompressed for store
try writeU32LE(out_file, exe_size); // uncompressed size
try writeU16LE(out_file, filename_len);
try writeU16LE(out_file, 0); // extra field length
try out_file.writeAll(filename);
// File data (uncompressed)
const local_header_end = 30 + filename_len;
try out_file.writeAll(exe_data);
// Central directory file header
const cd_offset = local_header_end + exe_size;
try out_file.writeAll(&zip.central_file_header_sig);
try writeU16LE(out_file, 0x031e); // version made by (Unix, 3.0)
try writeU16LE(out_file, 10); // version needed (1.0 for store)
try writeU16LE(out_file, 0); // general purpose flags
try writeU16LE(out_file, @intFromEnum(zip.CompressionMethod.store)); // store
try writeU16LE(out_file, mod_time);
try writeU16LE(out_file, mod_date);
try writeU32LE(out_file, crc);
try writeU32LE(out_file, exe_size); // compressed size
try writeU32LE(out_file, exe_size); // uncompressed size
try writeU16LE(out_file, filename_len);
try writeU16LE(out_file, 0); // extra field length
try writeU16LE(out_file, 0); // file comment length
try writeU16LE(out_file, 0); // disk number start
try writeU16LE(out_file, 0); // internal file attributes
try writeU32LE(out_file, 0o100755 << 16); // external file attributes (Unix executable)
try writeU32LE(out_file, 0); // relative offset of local header
try out_file.writeAll(filename);
// End of central directory record
const cd_size: u32 = 46 + filename_len;
try out_file.writeAll(&zip.end_record_sig);
try writeU16LE(out_file, 0); // disk number
try writeU16LE(out_file, 0); // disk number with CD
try writeU16LE(out_file, 1); // number of entries on disk
try writeU16LE(out_file, 1); // total number of entries
try writeU32LE(out_file, cd_size); // size of central directory
try writeU32LE(out_file, cd_offset); // offset of central directory
try writeU16LE(out_file, 0); // comment length
}
test "create zip with test data" {
const allocator = std.testing.allocator;
// Create a temporary test file
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const test_content = "#!/bin/sh\necho hello";
const test_exe = try tmp_dir.dir.createFile("test_exe", .{});
try test_exe.writeAll(test_content);
test_exe.close();
const exe_path = try tmp_dir.dir.realpathAlloc(allocator, "test_exe");
defer allocator.free(exe_path);
const output_path = try tmp_dir.dir.realpathAlloc(allocator, ".");
defer allocator.free(output_path);
const full_output = try std.fs.path.join(allocator, &.{ output_path, "test.zip" });
defer allocator.free(full_output);
try createLambdaZip(allocator, exe_path, full_output);
// Verify the zip file can be read by std.zip
const zip_file = try std.fs.cwd().openFile(full_output, .{});
defer zip_file.close();
var read_buffer: [4096]u8 = undefined;
var file_reader = zip_file.reader(&read_buffer);
var iter = try zip.Iterator.init(&file_reader);
// Should have exactly one entry
const entry = try iter.next();
try std.testing.expect(entry != null);
const e = entry.?;
// Verify filename length is 9 ("bootstrap")
try std.testing.expectEqual(@as(u32, 9), e.filename_len);
// Verify compression method is store
try std.testing.expectEqual(zip.CompressionMethod.store, e.compression_method);
// Verify sizes match test content
try std.testing.expectEqual(@as(u64, test_content.len), e.uncompressed_size);
try std.testing.expectEqual(@as(u64, test_content.len), e.compressed_size);
// Verify CRC32 matches
const expected_crc = std.hash.crc.Crc32IsoHdlc.hash(test_content);
try std.testing.expectEqual(expected_crc, e.crc32);
// Verify no more entries
const next_entry = try iter.next();
try std.testing.expect(next_entry == null);
// Extract and verify contents
var extract_dir = std.testing.tmpDir(.{});
defer extract_dir.cleanup();
// Reset file reader position
try file_reader.seekTo(0);
var filename_buf: [std.fs.max_path_bytes]u8 = undefined;
try e.extract(&file_reader, .{}, &filename_buf, extract_dir.dir);
// Read extracted file and verify contents
const extracted = try extract_dir.dir.openFile("bootstrap", .{});
defer extracted.close();
var extracted_content: [1024]u8 = undefined;
const bytes_read = try extracted.readAll(&extracted_content);
try std.testing.expectEqualStrings(test_content, extracted_content[0..bytes_read]);
}