Compare commits

..

No commits in common. "master" and "master" have entirely different histories.

16 changed files with 275 additions and 1394 deletions

View File

@ -1,29 +0,0 @@
name: Generic zig build
on:
workflow_dispatch:
push:
branches:
- '*'
- '!zig-develop*'
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: elerch/setup-zig@v3
with:
version: 0.13.0
- uses: elerch/zig-action-cache@v1.1.6
- name: Build project
run: zig build --summary all
- name: Run tests
run: zig build test --summary all
- name: Notify
uses: elerch/action-notify-ntfy@v2.github
if: always() && env.GITEA_ACTIONS == 'true'
with:
host: ${{ secrets.NTFY_HOST }}
topic: ${{ secrets.NTFY_TOPIC }}
status: ${{ job.status }}
user: ${{ secrets.NTFY_USER }}
password: ${{ secrets.NTFY_PASSWORD }}

3
.gitignore vendored
View File

@ -1,3 +1,4 @@
.gyro/
zig-cache/
zig-out/
.zig-cache
deps.zig

View File

@ -1,70 +1,38 @@
lambda-zig: A Custom Runtime for AWS Lambda
===========================================
This is a sample custom runtime built in zig (0.13). Simple projects will execute
This is a sample custom runtime built in zig. Simple projects will execute
in <1ms, with a cold start init time of approximately 11ms.
Some custom build steps have been added to build.zig, which will only currently appear if compiling from a linux operating system:
Some custom build steps have been added to build.zig:
* `zig build awslambda_iam`: Deploy and record a default IAM role for the lambda function
* `zig build awslambda_package`: Package the lambda function for upload
* `zig build awslambda_deploy`: Deploy the lambda function
* `zig build awslambda_run`: Run the lambda function
* `zig build iam`: Deploy and record a default IAM role for the lambda function
* `zig build package`: Package the lambda function for upload
* `zig build deploy`: Deploy the lambda function
* `zig build run`: Run the lambda function
Custom options:
* **debug**: boolean flag to avoid the debug symbols to be stripped. Useful to see
error return traces in the AWS Lambda logs
* **function-name**: set the name of the AWS Lambda function
* **payload**: Use this to set the payload of the function when run using `zig build awslambda_run`
* **region**: Use this to set the region for the function deployment/run
* **function-role**: Name of the role to use for the function. The system will
look up the arn from this name, and create if it does not exist
* **function-arn**: Role arn to use with the function. This must exist
* **payload**: Use this to set the payload of the function when run using `zig build run`
Additionally, a custom IAM role can be used for the function by appending ``-- --role myawesomerole``
to the `zig build deploy` command. This has not really been tested. The role name
is cached in zig-out/bin/iam_role_name, so you can also just set that to the full
arn of your iam role if you'd like.
The AWS Lambda function is compiled as a linux ARM64 executable. Since the build.zig
calls out to the shell for AWS operations, you will need AWS CLI v2.2.43 or greater.
This project vendors dependencies with [gyro](https://github.com/mattnite/gyro), so
first time build should be done with `gyro build`. This should be working
on zig master - certain build.zig constructs are not available in zig 0.8.1.
The AWS Lambda function can be compiled as a linux x86_64 or linux aarch64
executable. The build script will set the architecture appropriately
Caveats:
* Building on Windows will not yet work, as the package step still uses
system commands due to the need to create a zip file, and the current lack
of zip file creation capabilities in the standard library (you can read, but
not write, zip files with the standard library). A TODO exists with more
information should you wish to file a PR.
A sample project using this runtime can be found at https://git.lerch.org/lobo/lambda-zig-sample
Using the zig package manager
-----------------------------
The zig package manager [works just fine](https://github.com/ziglang/zig/issues/14279)
in build.zig, which works well for use of this runtime.
To add lambda package/deployment steps to another project:
1. `zig build init-exe`
2. Add a `build.zig.zon` similar to the below
3. Add a line to build.zig to add necessary build options, etc. Not the build function
return type should be changed from `void` to `!void`
`build.zig`:
```zig
try @import("lambda-zig").lambdaBuildOptions(b, exe);
```
`build.zig.zon`:
```zig
.{
.name = "lambda-zig",
.version = "0.1.0",
.dependencies = .{
.@"lambda-zig" = .{
.url = "https://git.lerch.org/lobo/lambda-zig/archive/fa13a08c4d91034a9b19d85f8c4c0af4cedaa67e.tar.gz",
.hash = "122037c357f834ffddf7b3a514f55edd5a4d7a3cde138a4021b6ac51be8fd2926000",
},
},
}
```
That's it! Now you should have the 4 custom build steps
* Small inbound lambda payloads seem to be confusing [requestz](https://github.com/ducdetronquito/requestz),
which just never returns, causing timeouts
* Unhandled invocation errors seem to be causing the same problem

226
build.zig
View File

@ -1,88 +1,174 @@
const builtin = @import("builtin");
const std = @import("std");
const pkgs = @import("deps.zig").pkgs;
// Although this function looks imperative, note that its job is to
// declaratively construct a build graph that will be executed by an external
// runner.
pub fn build(b: *std.Build) !void {
pub fn build(b: *std.build.Builder) !void {
// Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options
// for restricting supported target set are available.
const target = b.standardTargetOptions(.{});
// We want the target to be aarch64-linux for deploys
const target = std.zig.CrossTarget{
.cpu_arch = .aarch64,
.os_tag = .linux,
};
// Standard optimization options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
// set a preferred release mode, allowing the user to decide how to optimize.
const optimize = b.standardOptimizeOption(.{});
// Standard release options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
// const mode = b.standardReleaseOptions();
const lib = b.addStaticLibrary(.{
.name = "lambda-zig",
// In this case the main source file is merely a path, however, in more
// complicated build scripts, this could be a generated file.
.root_source_file = b.path("src/lambda.zig"),
.target = target,
.optimize = optimize,
});
const exe = b.addExecutable("bootstrap", "src/main.zig");
_ = b.addModule("lambda_runtime", .{
.root_source_file = b.path("src/lambda.zig"),
.target = target,
.optimize = optimize,
});
pkgs.addAllTo(exe);
exe.setTarget(target);
exe.setBuildMode(.ReleaseSafe);
const debug = b.option(bool, "debug", "Debug mode (do not strip executable)") orelse false;
exe.strip = !debug;
exe.install();
// This declares intent for the library to be installed into the standard
// location when the user invokes the "install" step (the default step when
// running `zig build`).
b.installArtifact(lib);
// TODO: We can cross-compile of course, but stripping and zip commands
// may vary
if (std.builtin.os.tag == .linux) {
// Package step
const package_step = b.step("package", "Package the function");
package_step.dependOn(b.getInstallStep());
// strip may not be installed or work for the target arch
// TODO: make this much less fragile
const strip = if (debug)
try std.fmt.allocPrint(b.allocator, "true", .{})
else
try std.fmt.allocPrint(b.allocator, "[ -x /usr/aarch64-linux-gnu/bin/strip ] && /usr/aarch64-linux-gnu/bin/strip {s}", .{b.getInstallPath(exe.install_step.?.dest_dir, exe.install_step.?.artifact.out_filename)});
defer b.allocator.free(strip);
package_step.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", strip }).step);
const function_zip = b.getInstallPath(exe.install_step.?.dest_dir, "function.zip");
const zip = try std.fmt.allocPrint(b.allocator, "zip -qj9 {s} {s}", .{ function_zip, b.getInstallPath(exe.install_step.?.dest_dir, exe.install_step.?.artifact.out_filename) });
defer b.allocator.free(zip);
package_step.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", zip }).step);
// Creates a step for unit testing. This only builds the test executable
// but does not run it.
const main_tests = b.addTest(.{
.root_source_file = b.path("src/lambda.zig"),
.target = target,
.optimize = optimize,
});
// Deployment
const deploy_step = b.step("deploy", "Deploy the function");
var deal_with_iam = true;
if (b.args) |args| {
for (args) |arg| {
if (std.mem.eql(u8, "--role", arg)) {
deal_with_iam = false;
break;
}
}
}
var iam_role: []u8 = &.{};
const iam_step = b.step("iam", "Create/Get IAM role for function");
deploy_step.dependOn(iam_step); // iam_step will either be a noop or all the stuff below
if (deal_with_iam) {
// if someone adds '-- --role arn...' to the command line, we don't
// need to do anything with the iam role. Otherwise, we'll create/
// get the IAM role and stick the name in a file in our destination
// directory to be used later
const iam_role_name_file = b.getInstallPath(exe.install_step.?.dest_dir, "iam_role_name");
iam_role = try std.fmt.allocPrint(b.allocator, "--role $(cat {s})", .{iam_role_name_file});
// defer b.allocator.free(iam_role);
if (!fileExists(iam_role_name_file)) {
// Role get/creation command
const ifstatement_fmt =
\\ if aws iam get-role --role-name lambda_basic_execution 2>&1 |grep -q NoSuchEntity; then aws iam create-role --output text --query Role.Arn --role-name lambda_basic_execution --assume-role-policy-document '{
\\ "Version": "2012-10-17",
\\ "Statement": [
\\ {
\\ "Sid": "",
\\ "Effect": "Allow",
\\ "Principal": {
\\ "Service": "lambda.amazonaws.com"
\\ },
\\ "Action": "sts:AssumeRole"
\\ }
\\ ]}' > /dev/null; fi && \
\\ aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AWSLambdaExecute --role-name lambda_basic_execution && \
\\ aws iam get-role --role-name lambda_basic_execution --query Role.Arn --output text >
;
const run_main_tests = b.addRunArtifact(main_tests);
const ifstatement = try std.mem.concat(b.allocator, u8, &[_][]const u8{ ifstatement_fmt, iam_role_name_file });
defer b.allocator.free(ifstatement);
iam_step.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", ifstatement }).step);
}
}
const function_name = b.option([]const u8, "function-name", "Function name for Lambda [zig-fn]") orelse "zig-fn";
const function_name_file = b.getInstallPath(exe.install_step.?.dest_dir, function_name);
const ifstatement = "if [ ! -f {s} ] || [ {s} -nt {s} ]; then if aws lambda get-function --function-name {s} 2>&1 |grep -q ResourceNotFoundException; then echo not found > /dev/null; {s}; else echo found > /dev/null; {s}; fi; fi";
// The architectures option was introduced in 2.2.43 released 2021-10-01
// We want to use arm64 here because it is both faster and cheaper for most
// Amazon Linux 2 is the only arm64 supported option
const not_found = "aws lambda create-function --architectures arm64 --runtime provided.al2 --function-name {s} --zip-file fileb://{s} --handler not_applicable {s} && touch {s}";
const not_found_fmt = try std.fmt.allocPrint(b.allocator, not_found, .{ function_name, function_zip, iam_role, function_name_file });
defer b.allocator.free(not_found_fmt);
const found = "aws lambda update-function-code --function-name {s} --zip-file fileb://{s} && touch {s}";
const found_fmt = try std.fmt.allocPrint(b.allocator, found, .{ function_name, function_zip, function_name_file });
defer b.allocator.free(found_fmt);
var found_final: []const u8 = undefined;
var not_found_final: []const u8 = undefined;
if (b.args) |args| {
found_final = try addArgs(b.allocator, found_fmt, args);
not_found_final = try addArgs(b.allocator, not_found_fmt, args);
} else {
found_final = found_fmt;
not_found_final = not_found_fmt;
}
const cmd = try std.fmt.allocPrint(b.allocator, ifstatement, .{
function_name_file,
std.fs.path.dirname(exe.root_src.?.path),
function_name_file,
function_name,
not_found_fmt,
found_fmt,
});
// This creates a build step. It will be visible in the `zig build --help` menu,
// and can be selected like this: `zig build test`
// This will evaluate the `test` step rather than the default, which is "install".
const test_step = b.step("test", "Run library tests");
test_step.dependOn(&run_main_tests.step);
defer b.allocator.free(cmd);
const exe = b.addExecutable(.{
.name = "custom",
.root_source_file = b.path("src/sample-main.zig"),
.target = target,
.optimize = optimize,
});
// std.debug.print("{s}\n", .{cmd});
deploy_step.dependOn(package_step);
deploy_step.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", cmd }).step);
b.installArtifact(exe);
try lambdaBuildOptions(b, exe);
// TODO: Looks like IquanaTLS isn't playing nicely with payloads this small
// const payload = b.option([]const u8, "payload", "Lambda payload [{\"foo\":\"bar\"}]") orelse
// \\ {"foo": "bar"}"
// ;
const payload = b.option([]const u8, "payload", "Lambda payload [{\"foo\":\"bar\", \"baz\": \"qux\"}]") orelse
\\ {"foo": "bar", "baz": "qux"}"
;
const run_script =
\\ f=$(mktemp) && \
\\ logs=$(aws lambda invoke \
\\ --cli-binary-format raw-in-base64-out \
\\ --invocation-type RequestResponse \
\\ --function-name {s} \
\\ --payload '{s}' \
\\ --log-type Tail \
\\ --query LogResult \
\\ --output text "$f" |base64 -d) && \
\\ cat "$f" && rm "$f" && \
\\ echo && echo && echo "$logs"
;
const run_script_fmt = try std.fmt.allocPrint(b.allocator, run_script, .{ function_name, payload });
defer b.allocator.free(run_script_fmt);
const run_cmd = b.addSystemCommand(&.{ "/bin/sh", "-c", run_script_fmt });
run_cmd.step.dependOn(deploy_step);
if (b.args) |args| {
run_cmd.addArgs(args);
}
const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step);
}
}
/// lambdaBuildOptions will add three build options to the build (if compiling
/// the code on a Linux host):
///
/// * package: Packages the function for deployment to Lambda
/// (dependencies are the zip executable and a shell)
/// * iam: Gets an IAM role for the Lambda function, and creates it if it does not exist
/// (dependencies are the AWS CLI, grep and a shell)
/// * deploy: Deploys the lambda function to a live AWS environment
/// (dependencies are the AWS CLI, and a shell)
/// * remoterun: Runs the lambda function in a live AWS environment
/// (dependencies are the AWS CLI, and a shell)
///
/// remoterun depends on deploy
/// deploy depends on iam and package
///
/// iam and package do not have any dependencies
pub fn lambdaBuildOptions(b: *std.Build, exe: *std.Build.Step.Compile) !void {
const function_name = b.option([]const u8, "function-name", "Function name for Lambda [zig-fn]") orelse "zig-fn";
try @import("lambdabuild.zig").configureBuild(b, exe, function_name);
fn fileExists(file_name: []const u8) bool {
const file = std.fs.openFileAbsolute(file_name, .{}) catch return false;
defer file.close();
return true;
}
pub fn configureBuild(b: *std.Build, exe: *std.Build.Step.Compile, function_name: []const u8) !void {
try @import("lambdabuild.zig").configureBuild(b, exe, function_name);
fn addArgs(allocator: *std.mem.Allocator, original: []const u8, args: [][]const u8) ![]const u8 {
var rc = original;
for (args) |arg| {
rc = try std.mem.concat(allocator, u8, &.{ rc, " ", arg });
}
return rc;
}

View File

@ -1,33 +0,0 @@
.{
.name = "lambda-zig",
// This is a [Semantic Version](https://semver.org/).
// In a future version of Zig it will be used for package deduplication.
.version = "0.0.0",
// This field is optional.
// This is currently advisory only; Zig does not yet do anything
// with this value.
.dependencies = .{
.aws = .{
.url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/908c9d2d429b1f38c835363db566aa17bf1742fd/908c9d2d429b1f38c835363db566aa17bf1742fd-with-models.tar.gz",
.hash = "122022770a177afb2ee46632f88ad5468a5dea8df22170d1dea5163890b0a881399d",
},
},
.minimum_zig_version = "0.12.0",
// Specifies the set of files and directories that are included in this package.
// Only files and directories listed here are included in the `hash` that
// is computed for this package.
// Paths are relative to the build root. Use the empty string (`""`) to refer to
// the build root itself.
// A directory listed here means that all files within, recursively, are included.
.paths = .{
"build.zig",
"build.zig.zon",
"lambdabuild.zig",
"src",
"lambdabuild",
"LICENSE",
"README.md",
},
}

5
gyro.lock Normal file
View File

@ -0,0 +1,5 @@
pkg default ducdetronquito http 0.1.3
pkg default ducdetronquito h11 0.1.1
github nektro iguanaTLS 953ad821fae6c920fb82399493663668cd91bde7 src/main.zig 953ad821fae6c920fb82399493663668cd91bde7
github MasterQ32 zig-network 15b88658809cac9022ec7d59449b0cd3ebfd0361 network.zig 15b88658809cac9022ec7d59449b0cd3ebfd0361
github elerch requestz 1fa8157641300805b9503f98cd201d0959d19631 src/main.zig 1fa8157641300805b9503f98cd201d0959d19631

7
gyro.zzz Normal file
View File

@ -0,0 +1,7 @@
deps:
requestz:
src:
github:
user: elerch
repo: requestz
ref: 1fa8157641300805b9503f98cd201d0959d19631

View File

@ -1,168 +0,0 @@
const std = @import("std");
const builtin = @import("builtin");
const Package = @import("lambdabuild/Package.zig");
const Iam = @import("lambdabuild/Iam.zig");
const Deploy = @import("lambdabuild/Deploy.zig");
const Invoke = @import("lambdabuild/Invoke.zig");
fn fileExists(file_name: []const u8) bool {
const file = std.fs.openFileAbsolute(file_name, .{}) catch return false;
defer file.close();
return true;
}
fn addArgs(allocator: std.mem.Allocator, original: []const u8, args: [][]const u8) ![]const u8 {
var rc = original;
for (args) |arg| {
rc = try std.mem.concat(allocator, u8, &.{ rc, " ", arg });
}
return rc;
}
/// lambdaBuildSteps will add four build steps to the build (if compiling
/// the code on a Linux host):
///
/// * awslambda_package: Packages the function for deployment to Lambda
/// (dependencies are the zip executable and a shell)
/// * awslambda_iam: Gets an IAM role for the Lambda function, and creates it if it does not exist
/// (dependencies are the AWS CLI, grep and a shell)
/// * awslambda_deploy: Deploys the lambda function to a live AWS environment
/// (dependencies are the AWS CLI, and a shell)
/// * awslambda_run: Runs the lambda function in a live AWS environment
/// (dependencies are the AWS CLI, and a shell)
///
/// awslambda_run depends on deploy
/// awslambda_deploy depends on iam and package
///
/// iam and package do not have any dependencies
pub fn configureBuild(b: *std.Build, exe: *std.Build.Step.Compile, function_name: []const u8) !void {
// The rest of this function is currently reliant on the use of Linux
// system being used to build the lambda function
//
// It is likely that much of this will work on other Unix-like OSs, but
// we will work this out later
//
// TODO: support other host OSs
if (builtin.os.tag != .linux) return;
@import("aws").aws.globalLogControl(.info, .warn, .info, false);
const package_step = Package.create(b, .{ .exe = exe });
const step = b.step("awslambda_package", "Package the function");
step.dependOn(&package_step.step);
package_step.step.dependOn(b.getInstallStep());
// Doing this will require that the aws dependency be added to the downstream
// build.zig.zon
// const lambdabuild = b.addExecutable(.{
// .name = "lambdabuild",
// .root_source_file = .{
// // we use cwd_relative here because we need to compile this relative
// // to whatever directory this file happens to be. That is likely
// // in a cache directory, not the base of the build.
// .cwd_relative = try std.fs.path.join(b.allocator, &[_][]const u8{
// std.fs.path.dirname(@src().file).?,
// "lambdabuild/src/main.zig",
// }),
// },
// .target = b.host,
// });
// const aws_dep = b.dependency("aws", .{
// .target = b.host,
// .optimize = lambdabuild.root_module.optimize orelse .Debug,
// });
// const aws_module = aws_dep.module("aws");
// lambdabuild.root_module.addImport("aws", aws_module);
//
const iam_role_name = b.option(
[]const u8,
"function-role",
"IAM role name for function (will create if it does not exist) [lambda_basic_execution]",
) orelse "lambda_basic_execution_blah2";
const iam_role_arn = b.option(
[]const u8,
"function-arn",
"Preexisting IAM role arn for function",
);
const iam = Iam.create(b, .{
.role_name = iam_role_name,
.role_arn = iam_role_arn,
});
const iam_step = b.step("awslambda_iam", "Create/Get IAM role for function");
iam_step.dependOn(&iam.step);
const region = try b.allocator.create(@import("lambdabuild/Region.zig"));
region.* = .{
.allocator = b.allocator,
.specified_region = b.option([]const u8, "region", "Region to use [default is autodetect from environment/config]"),
};
// Deployment
const deploy = Deploy.create(b, .{
.name = function_name,
.arch = exe.root_module.resolved_target.?.result.cpu.arch,
.iam_step = iam,
.package_step = package_step,
.region = region,
});
const deploy_step = b.step("awslambda_deploy", "Deploy the function");
deploy_step.dependOn(&deploy.step);
const payload = b.option([]const u8, "payload", "Lambda payload [{\"foo\":\"bar\", \"baz\": \"qux\"}]") orelse
\\ {"foo": "bar", "baz": "qux"}"
;
const invoke = Invoke.create(b, .{
.name = function_name,
.payload = payload,
.region = region,
});
invoke.step.dependOn(&deploy.step);
const run_step = b.step("awslambda_run", "Run the app in AWS lambda");
run_step.dependOn(&invoke.step);
}
// AWS_CONFIG_FILE (default is ~/.aws/config
// AWS_DEFAULT_REGION
fn findRegionFromSystem(allocator: std.mem.Allocator) ![]const u8 {
const env_map = try std.process.getEnvMap(allocator);
if (env_map.get("AWS_DEFAULT_REGION")) |r| return r;
const config_file_path = env_map.get("AWS_CONFIG_FILE") orelse
try std.fs.path.join(allocator, &[_][]const u8{
env_map.get("HOME") orelse env_map.get("USERPROFILE").?,
".aws",
"config",
});
const config_file = try std.fs.openFileAbsolute(config_file_path, .{});
defer config_file.close();
const config_bytes = try config_file.readToEndAlloc(allocator, 1024 * 1024);
const profile = env_map.get("AWS_PROFILE") orelse "default";
var line_iterator = std.mem.split(u8, config_bytes, "\n");
var in_profile = false;
while (line_iterator.next()) |line| {
const trimmed = std.mem.trim(u8, line, " \t\r");
if (trimmed.len == 0 or trimmed[0] == '#') continue;
if (!in_profile) {
if (trimmed[0] == '[' and trimmed[trimmed.len - 1] == ']') {
// this is a profile directive!
// std.debug.print("profile: {s}, in file: {s}\n", .{ profile, trimmed[1 .. trimmed.len - 1] });
if (std.mem.eql(u8, profile, trimmed[1 .. trimmed.len - 1])) {
in_profile = true;
}
}
continue; // we're only looking for a profile at this point
}
// look for our region directive
if (trimmed[0] == '[' and trimmed[trimmed.len - 1] == ']')
return error.RegionNotFound; // we've hit another profile without getting our region
if (!std.mem.startsWith(u8, trimmed, "region")) continue;
var equalityiterator = std.mem.split(u8, trimmed, "=");
_ = equalityiterator.next() orelse return error.RegionNotFound;
const raw_val = equalityiterator.next() orelse return error.RegionNotFound;
return try allocator.dupe(u8, std.mem.trimLeft(u8, raw_val, " \t"));
}
return error.RegionNotFound;
}

View File

@ -1,165 +0,0 @@
const std = @import("std");
const Region = @import("Region.zig");
const aws = @import("aws").aws;
const Deploy = @This();
step: std.Build.Step,
options: Options,
const base_id: std.Build.Step.Id = .custom;
pub const Options = struct {
/// Function name to be used for the function
name: []const u8,
/// Architecture for Lambda function
arch: std.Target.Cpu.Arch,
/// Iam step. This will be a dependency of the deployment
iam_step: *@import("Iam.zig"),
/// Packaging step. This will be a dependency of the deployment
package_step: *@import("Package.zig"),
/// Region for deployment
region: *Region,
};
pub fn create(owner: *std.Build, options: Options) *Deploy {
const name = owner.dupe(options.name);
const step_name = owner.fmt("{s} {s}{s}", .{
"aws lambda",
"deploy ",
name,
});
const self = owner.allocator.create(Deploy) catch @panic("OOM");
self.* = .{
.step = std.Build.Step.init(.{
.id = base_id,
.name = step_name,
.owner = owner,
.makeFn = make,
}),
.options = options,
};
self.step.dependOn(&options.iam_step.step);
self.step.dependOn(&options.package_step.step);
return self;
}
/// gets the last time we deployed this function from the name in cache.
/// If not in cache, null is returned. Note that cache is not account specific,
/// so if you're banging around multiple accounts, you'll want to use different
/// local zig caches for each
fn getlastDeployedTime(step: *std.Build.Step, name: []const u8) !?[]const u8 {
try step.owner.cache_root.handle.makePath("iam");
// we should be able to use the role name, as only the following characters
// are allowed: _+=,.@-.
const cache_file = try std.fmt.allocPrint(
step.owner.allocator,
"deploy{s}{s}",
.{ std.fs.path.sep_str, name },
);
const buff = try step.owner.allocator.alloc(u8, 64);
const time = step.owner.cache_root.handle.readFile(cache_file, buff) catch return null;
return time;
}
fn make(step: *std.Build.Step, node: std.Progress.Node) anyerror!void {
_ = node;
const self: *Deploy = @fieldParentPtr("step", step);
if (self.options.arch != .aarch64 and self.options.arch != .x86_64)
return step.fail("AWS Lambda can only deploy aarch64 and x86_64 functions ({} not allowed)", .{self.options.arch});
const last_packaged_sha256 = blk: {
// file should always be there, but we shouldn't break if the cache doesn't exist
const last_deployed_id_file = std.fs.openFileAbsolute(try self.options.package_step.shasumFilePath(), .{}) catch break :blk null;
defer last_deployed_id_file.close();
break :blk try last_deployed_id_file.readToEndAlloc(step.owner.allocator, 2048);
};
var client = aws.Client.init(self.step.owner.allocator, .{});
defer client.deinit();
const services = aws.Services(.{.lambda}){};
const function = blk: {
var diagnostics = aws.Diagnostics{
.http_code = undefined,
.response_body = undefined,
.allocator = self.step.owner.allocator,
};
const options = aws.Options{
.client = client,
.diagnostics = &diagnostics,
.region = try self.options.region.region(),
};
aws.globalLogControl(.info, .warn, .info, true);
defer aws.globalLogControl(.info, .warn, .info, false);
const call = aws.Request(services.lambda.get_function).call(.{
.function_name = self.options.name,
}, options) catch |e| {
// There seems an issue here, but realistically, we have an arena
// so there's no leak leaving this out
defer diagnostics.deinit();
if (diagnostics.http_code == 404) break :blk null;
return step.fail(
"Unknown error {} from Lambda GetFunction. HTTP code {}, message: {s}",
.{ e, diagnostics.http_code, diagnostics.response_body },
);
};
defer call.deinit();
break :blk .{
.last_modified = try step.owner.allocator.dupe(u8, call.response.configuration.?.last_modified.?),
.revision_id = try step.owner.allocator.dupe(u8, call.response.configuration.?.revision_id.?),
.sha256 = try step.owner.allocator.dupe(u8, call.response.configuration.?.code_sha256.?),
};
};
if (last_packaged_sha256) |s|
if (function) |f|
if (std.mem.eql(u8, s, f.sha256)) {
step.result_cached = true;
return;
};
const encoder = std.base64.standard.Encoder;
const file = try std.fs.openFileAbsolute(self.options.package_step.packagedFileLazyPath().getPath2(step.owner, step), .{});
defer file.close();
const bytes = try file.readToEndAlloc(step.owner.allocator, 100 * 1024 * 1024);
const base64_buf = try step.owner.allocator.alloc(u8, encoder.calcSize(bytes.len));
const base64_bytes = encoder.encode(base64_buf, bytes);
const options = aws.Options{
.client = client,
.region = try self.options.region.region(),
};
const arm64_arch = [_][]const u8{"arm64"};
const x86_64_arch = [_][]const u8{"x86_64"};
const architectures = (if (self.options.arch == .aarch64) arm64_arch else x86_64_arch);
const arches: [][]const u8 = @constCast(architectures[0..]);
if (function) |f| {
// TODO: make sure our zipfile newer than the lambda function
const update_call = try aws.Request(services.lambda.update_function_code).call(.{
.function_name = self.options.name,
.architectures = arches,
.revision_id = f.revision_id,
.zip_file = base64_bytes,
}, options);
defer update_call.deinit();
} else {
// New function - we need to create from scratch
const create_call = try aws.Request(services.lambda.create_function).call(.{
.function_name = self.options.name,
.architectures = arches,
.code = .{ .zip_file = base64_bytes },
.handler = "not_applicable",
.package_type = "Zip",
.runtime = "provided.al2",
.role = self.options.iam_step.resolved_arn,
}, options);
defer create_call.deinit();
}
}

View File

@ -1,146 +0,0 @@
const std = @import("std");
const aws = @import("aws").aws;
const Iam = @This();
step: std.Build.Step,
options: Options,
/// resolved_arn will be set only after make is run
resolved_arn: []const u8 = undefined,
arn_buf: [2048]u8 = undefined, // https://docs.aws.amazon.com/IAM/latest/APIReference/API_Role.html has 2k limit
const base_id: std.Build.Step.Id = .custom;
pub const Options = struct {
name: []const u8 = "",
role_name: []const u8,
role_arn: ?[]const u8,
};
pub fn create(owner: *std.Build, options: Options) *Iam {
const name = owner.dupe(options.name);
const step_name = owner.fmt("{s} {s}{s}", .{
"aws lambda",
"iam",
name,
});
const self = owner.allocator.create(Iam) catch @panic("OOM");
self.* = .{
.step = std.Build.Step.init(.{
.id = base_id,
.name = step_name,
.owner = owner,
.makeFn = make,
}),
.options = options,
};
return self;
}
/// gets an IamArn from the name in cache. If not in cache, null is returned
/// Note that cache is not account specific, so if you're banging around multiple
/// accounts, you'll want to use different local zig caches for each
pub fn getIamArnFromName(step: *std.Build.Step, name: []const u8) !?[]const u8 {
try step.owner.cache_root.handle.makePath("iam");
// we should be able to use the role name, as only the following characters
// are allowed: _+=,.@-.
const iam_file = try std.fmt.allocPrint(
step.owner.allocator,
"iam{s}{s}",
.{ std.fs.path.sep_str, name },
);
const buff = try step.owner.allocator.alloc(u8, 64);
const arn = step.owner.cache_root.handle.readFile(iam_file, buff) catch return null;
return arn;
}
fn make(step: *std.Build.Step, node: std.Progress.Node) anyerror!void {
_ = node;
const self: *Iam = @fieldParentPtr("step", step);
if (try getIamArnFromName(step, self.options.role_name)) |a| {
step.result_cached = true;
@memcpy(self.arn_buf[0..a.len], a);
self.resolved_arn = self.arn_buf[0..a.len];
return; // exists in cache - nothing to do
}
var client = aws.Client.init(self.step.owner.allocator, .{});
defer client.deinit();
const services = aws.Services(.{.iam}){};
var arn = blk: {
var diagnostics = aws.Diagnostics{
.http_code = undefined,
.response_body = undefined,
.allocator = self.step.owner.allocator,
};
const options = aws.Options{
.client = client,
.diagnostics = &diagnostics,
};
const call = aws.Request(services.iam.get_role).call(.{
.role_name = self.options.role_name, // TODO: if we have a role_arn, we should use it and skip
}, options) catch |e| {
defer diagnostics.deinit();
if (diagnostics.http_code == 404) break :blk null;
return step.fail(
"Unknown error {} from IAM GetRole. HTTP code {}, message: {s}",
.{ e, diagnostics.http_code, diagnostics.response_body },
);
};
defer call.deinit();
break :blk try step.owner.allocator.dupe(u8, call.response.role.arn);
};
// Now ARN will either be null (does not exist), or a value
if (arn == null) {
// we need to create the role before proceeding
const options = aws.Options{
.client = client,
};
const create_call = try aws.Request(services.iam.create_role).call(.{
.role_name = self.options.role_name,
.assume_role_policy_document =
\\{
\\ "Version": "2012-10-17",
\\ "Statement": [
\\ {
\\ "Sid": "",
\\ "Effect": "Allow",
\\ "Principal": {
\\ "Service": "lambda.amazonaws.com"
\\ },
\\ "Action": "sts:AssumeRole"
\\ }
\\ ]
\\}
,
}, options);
defer create_call.deinit();
arn = try step.owner.allocator.dupe(u8, create_call.response.role.arn);
const attach_call = try aws.Request(services.iam.attach_role_policy).call(.{
.policy_arn = "arn:aws:iam::aws:policy/AWSLambdaExecute",
.role_name = self.options.role_name,
}, options);
defer attach_call.deinit();
}
@memcpy(self.arn_buf[0..arn.?.len], arn.?);
self.resolved_arn = self.arn_buf[0..arn.?.len];
// NOTE: This must match getIamArnFromName
const iam_file = try std.fmt.allocPrint(
step.owner.allocator,
"iam{s}{s}",
.{ std.fs.path.sep_str, self.options.role_name },
);
try step.owner.cache_root.handle.writeFile(.{
.sub_path = iam_file,
.data = arn.?,
});
}

View File

@ -1,90 +0,0 @@
const std = @import("std");
const aws = @import("aws").aws;
const Region = @import("Region.zig");
const Invoke = @This();
step: std.Build.Step,
options: Options,
const base_id: std.Build.Step.Id = .custom;
pub const Options = struct {
/// Function name to invoke
name: []const u8,
/// Payload to send to the function
payload: []const u8,
/// Region for deployment
region: *Region,
};
pub fn create(owner: *std.Build, options: Options) *Invoke {
const name = owner.dupe(options.name);
const step_name = owner.fmt("{s} {s}{s}", .{
"aws lambda",
"invoke",
name,
});
const self = owner.allocator.create(Invoke) catch @panic("OOM");
self.* = .{
.step = std.Build.Step.init(.{
.id = base_id,
.name = step_name,
.owner = owner,
.makeFn = make,
}),
.options = options,
};
return self;
}
fn make(step: *std.Build.Step, node: std.Progress.Node) anyerror!void {
_ = node;
const self: *Invoke = @fieldParentPtr("step", step);
var client = aws.Client.init(self.step.owner.allocator, .{});
defer client.deinit();
const services = aws.Services(.{.lambda}){};
const options = aws.Options{
.client = client,
.region = try self.options.region.region(),
};
var inx: usize = 10; // 200ms * 10
while (inx > 0) : (inx -= 1) {
var diagnostics = aws.Diagnostics{
.http_code = undefined,
.response_body = undefined,
.allocator = self.step.owner.allocator,
};
const call = aws.Request(services.lambda.get_function).call(.{
.function_name = self.options.name,
}, options) catch |e| {
// There seems an issue here, but realistically, we have an arena
// so there's no leak leaving this out
defer diagnostics.deinit();
if (diagnostics.http_code == 404) continue; // function was just created...it's ok
return step.fail(
"Unknown error {} from Lambda GetFunction. HTTP code {}, message: {s}",
.{ e, diagnostics.http_code, diagnostics.response_body },
);
};
defer call.deinit();
if (!std.mem.eql(u8, "InProgress", call.response.configuration.?.last_update_status.?))
break; // We're ready to invoke!
const ms: usize = if (inx == 5) 500 else 50;
std.time.sleep(ms * std.time.ns_per_ms);
}
if (inx == 0)
return step.fail("Timed out waiting for lambda to update function", .{});
const call = try aws.Request(services.lambda.invoke).call(.{
.function_name = self.options.name,
.payload = self.options.payload,
.log_type = "Tail",
.invocation_type = "RequestResponse",
}, options);
defer call.deinit();
std.debug.print("{?s}\n", .{call.response.payload});
}

View File

@ -1,158 +0,0 @@
const std = @import("std");
const Package = @This();
step: std.Build.Step,
options: Options,
/// This is set as part of the make phase, and is the location in the cache
/// for the lambda package. The package will also be copied to the output
/// directory, but this location makes for a good cache key for deployments
zipfile_cache_dest: ?[]const u8 = null,
zipfile_dest: ?[]const u8 = null,
const base_id: std.Build.Step.Id = .install_file;
pub const Options = struct {
name: []const u8 = "",
exe: *std.Build.Step.Compile,
zipfile_name: []const u8 = "function.zip",
};
pub fn create(owner: *std.Build, options: Options) *Package {
const name = owner.dupe(options.name);
const step_name = owner.fmt("{s} {s}{s}", .{
"aws lambda",
"package",
name,
});
const package = owner.allocator.create(Package) catch @panic("OOM");
package.* = .{
.step = std.Build.Step.init(.{
.id = base_id,
.name = step_name,
.owner = owner,
.makeFn = make,
}),
.options = options,
};
return package;
}
pub fn shasumFilePath(self: Package) ![]const u8 {
return try std.fmt.allocPrint(
self.step.owner.allocator,
"{s}{s}{s}",
.{ std.fs.path.dirname(self.zipfile_cache_dest.?).?, std.fs.path.sep_str, "sha256sum.txt" },
);
}
pub fn packagedFilePath(self: Package) []const u8 {
return self.step.owner.getInstallPath(.prefix, self.options.zipfile_name);
}
pub fn packagedFileLazyPath(self: Package) std.Build.LazyPath {
return .{ .src_path = .{
.owner = self.step.owner,
.sub_path = self.step.owner.getInstallPath(.prefix, self.options.zipfile_name),
} };
}
fn make(step: *std.Build.Step, node: std.Progress.Node) anyerror!void {
_ = node;
const self: *Package = @fieldParentPtr("step", step);
// get a hash of the bootstrap and whatever other files we put into the zip
// file (because a zip is not really reproducible). That hash becomes the
// cache directory, similar to the way rest of zig works
//
// Otherwise, create the package in our cache indexed by hash, and copy
// our bootstrap, zip things up and install the file into zig-out
const bootstrap = bootstrapLocation(self.*) catch |e| {
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
return step.fail("Could not copy output to bootstrap: {}", .{e});
};
const bootstrap_dirname = std.fs.path.dirname(bootstrap).?;
const zipfile_src = try std.fs.path.join(step.owner.allocator, &[_][]const u8{ bootstrap_dirname, self.options.zipfile_name });
self.zipfile_cache_dest = zipfile_src;
self.zipfile_dest = self.step.owner.getInstallPath(.prefix, self.options.zipfile_name);
if (std.fs.copyFileAbsolute(zipfile_src, self.zipfile_dest.?, .{})) |_| {
// we're good here. The zip file exists in cache and has been copied
step.result_cached = true;
} else |_| {
// error, but this is actually the normal case. We will zip the file
// using system zip and store that in cache with the output file for later
// use
// TODO: For Windows, tar.exe can actually do zip files.
// tar -a -cf function.zip file1 [file2...]
//
// See: https://superuser.com/questions/201371/create-zip-folder-from-the-command-line-windows#comment2725283_898508
var child = std.process.Child.init(&[_][]const u8{
"zip",
"-qj9X",
zipfile_src,
bootstrap,
}, self.step.owner.allocator);
child.stdout_behavior = .Ignore;
child.stdin_behavior = .Ignore; // we'll allow stderr through
switch (try child.spawnAndWait()) {
.Exited => |rc| if (rc != 0) return step.fail("Non-zero exit code {} from zip", .{rc}),
.Signal, .Stopped, .Unknown => return step.fail("Abnormal termination from zip step", .{}),
}
try std.fs.copyFileAbsolute(zipfile_src, self.zipfile_dest.?, .{}); // It better be there now
// One last thing. We want to get a Sha256 sum of the zip file, and
// store it in cache. This will help the deployment process compare
// to what's out in AWS, since revision id is apparently trash for these
// purposes
const zipfile = try std.fs.openFileAbsolute(zipfile_src, .{});
defer zipfile.close();
const zip_bytes = try zipfile.readToEndAlloc(step.owner.allocator, 100 * 1024 * 1024);
var hash: [std.crypto.hash.sha2.Sha256.digest_length]u8 = undefined;
std.crypto.hash.sha2.Sha256.hash(zip_bytes, &hash, .{});
const base64 = std.base64.standard.Encoder;
var encoded: [base64.calcSize(std.crypto.hash.sha2.Sha256.digest_length)]u8 = undefined;
const shaoutput = try std.fs.createFileAbsolute(try self.shasumFilePath(), .{});
defer shaoutput.close();
try shaoutput.writeAll(base64.encode(encoded[0..], hash[0..]));
}
}
fn bootstrapLocation(package: Package) ![]const u8 {
const output = package.step.owner.getInstallPath(.bin, package.options.exe.out_filename);
// We will always copy the output file, mainly because we also need the hash...
// if (std.mem.eql(u8, "bootstrap", package.options.exe.out_filename))
// return output; // easy path
// Not so easy...read the file, get a hash of contents, see if it's in cache
const output_file = try std.fs.openFileAbsolute(output, .{});
defer output_file.close();
const output_bytes = try output_file.readToEndAlloc(package.step.owner.allocator, 100 * 1024 * 1024); // 100MB file
// std.Build.Cache.Hasher
// std.Buidl.Cache.hasher_init
var hasher = std.Build.Cache.HashHelper{}; // We'll reuse the same file hasher from cache
hasher.addBytes(output_bytes);
const hash = std.fmt.bytesToHex(hasher.hasher.finalResult(), .lower);
const dest_path = try package.step.owner.cache_root.join(
package.step.owner.allocator,
&[_][]const u8{ "p", hash[0..], "bootstrap" },
);
const dest_file = std.fs.openFileAbsolute(dest_path, .{}) catch null;
if (dest_file) |d| {
d.close();
return dest_path;
}
const pkg_path = try package.step.owner.cache_root.join(
package.step.owner.allocator,
&[_][]const u8{"p"},
);
// Destination file does not exist. Write the bootstrap (after creating the directory)
std.fs.makeDirAbsolute(pkg_path) catch {};
std.fs.makeDirAbsolute(std.fs.path.dirname(dest_path).?) catch {};
const write_file = try std.fs.createFileAbsolute(dest_path, .{});
defer write_file.close();
try write_file.writeAll(output_bytes);
return dest_path;
}

View File

@ -1,55 +0,0 @@
const std = @import("std");
specified_region: ?[]const u8,
allocator: std.mem.Allocator,
/// internal state, please do not use
_calculated_region: ?[]const u8 = null,
const Region = @This();
pub fn region(self: *Region) ![]const u8 {
if (self.specified_region) |r| return r; // user specified
if (self._calculated_region) |r| return r; // cached
self._calculated_region = try findRegionFromSystem(self.allocator);
return self._calculated_region.?;
}
// AWS_CONFIG_FILE (default is ~/.aws/config
// AWS_DEFAULT_REGION
fn findRegionFromSystem(allocator: std.mem.Allocator) ![]const u8 {
const env_map = try std.process.getEnvMap(allocator);
if (env_map.get("AWS_DEFAULT_REGION")) |r| return r;
const config_file_path = env_map.get("AWS_CONFIG_FILE") orelse
try std.fs.path.join(allocator, &[_][]const u8{
env_map.get("HOME") orelse env_map.get("USERPROFILE").?,
".aws",
"config",
});
const config_file = try std.fs.openFileAbsolute(config_file_path, .{});
defer config_file.close();
const config_bytes = try config_file.readToEndAlloc(allocator, 1024 * 1024);
const profile = env_map.get("AWS_PROFILE") orelse "default";
var line_iterator = std.mem.split(u8, config_bytes, "\n");
var in_profile = false;
while (line_iterator.next()) |line| {
const trimmed = std.mem.trim(u8, line, " \t\r");
if (trimmed.len == 0 or trimmed[0] == '#') continue;
if (!in_profile) {
if (trimmed[0] == '[' and trimmed[trimmed.len - 1] == ']') {
// this is a profile directive!
// std.debug.print("profile: {s}, in file: {s}\n", .{ profile, trimmed[1 .. trimmed.len - 1] });
if (std.mem.eql(u8, profile, trimmed[1 .. trimmed.len - 1])) {
in_profile = true;
}
}
continue; // we're only looking for a profile at this point
}
// look for our region directive
if (trimmed[0] == '[' and trimmed[trimmed.len - 1] == ']')
return error.RegionNotFound; // we've hit another profile without getting our region
if (!std.mem.startsWith(u8, trimmed, "region")) continue;
var equalityiterator = std.mem.split(u8, trimmed, "=");
_ = equalityiterator.next() orelse return error.RegionNotFound;
const raw_val = equalityiterator.next() orelse return error.RegionNotFound;
return try allocator.dupe(u8, std.mem.trimLeft(u8, raw_val, " \t"));
}
return error.RegionNotFound;
}

Binary file not shown.

View File

@ -1,438 +1,96 @@
const std = @import("std");
const builtin = @import("builtin");
const requestz = @import("requestz");
pub const HandlerFn = *const fn (std.mem.Allocator, []const u8) anyerror![]const u8;
const log = std.log.scoped(.lambda);
var client: ?std.http.Client = null;
const prefix = "http://";
const postfix = "/2018-06-01/runtime/invocation";
pub fn deinit() void {
if (client) |*c| c.deinit();
client = null;
}
/// Starts the lambda framework. Handler will be called when an event is processing
/// If an allocator is not provided, an approrpriate allocator will be selected and used
/// This function is intended to loop infinitely. If not used in this manner,
/// make sure to call the deinit() function
pub fn run(allocator: ?std.mem.Allocator, event_handler: HandlerFn) !void { // TODO: remove inferred error set?
const lambda_runtime_uri = std.posix.getenv("AWS_LAMBDA_RUNTIME_API") orelse test_lambda_runtime_uri.?;
// TODO: If this is null, go into single use command line mode
pub fn run(event_handler: fn (*std.mem.Allocator, []const u8) anyerror![]const u8) !void { // TODO: remove inferred error set?
const prefix = "http://";
const postfix = "/2018-06-01/runtime/invocation";
const lambda_runtime_uri = std.os.getenv("AWS_LAMBDA_RUNTIME_API");
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const alloc = allocator orelse gpa.allocator();
const allocator = &gpa.allocator;
const url = try std.fmt.allocPrint(alloc, "{s}{s}{s}/next", .{ prefix, lambda_runtime_uri, postfix });
defer alloc.free(url);
const uri = try std.Uri.parse(url);
const url = try std.fmt.allocPrint(allocator, "{s}{s}{s}/next", .{ prefix, lambda_runtime_uri, postfix });
defer allocator.free(url);
// TODO: Simply adding this line without even using the client is enough
// to cause seg faults!?
// client = client orelse .{ .allocator = alloc };
// so we'll do this instead
if (client != null) return error.MustDeInitBeforeCallingRunAgain;
client = .{ .allocator = alloc };
log.info("tid {d} (lambda): Bootstrap initializing with event url: {s}", .{ std.Thread.getCurrentId(), url });
std.log.notice("Bootstrap initializing with event url: {s}", .{url});
while (lambda_remaining_requests == null or lambda_remaining_requests.? > 0) {
if (lambda_remaining_requests) |*r| {
// we're under test
log.debug("lambda remaining requests: {d}", .{r.*});
r.* -= 1;
}
var req_alloc = std.heap.ArenaAllocator.init(alloc);
while (true) {
var req_alloc = std.heap.ArenaAllocator.init(allocator);
defer req_alloc.deinit();
const req_allocator = req_alloc.allocator();
// Fundamentally we're doing 3 things:
// 1. Get the next event from Lambda (event data and request id)
// 2. Call our handler to get the response
// 3. Post the response back to Lambda
var ev = getEvent(req_allocator, uri) catch |err| {
// Well, at this point all we can do is shout at the void
log.err("Error fetching event details: {}", .{err});
std.posix.exit(1);
// continue;
};
if (ev == null) continue; // this gets logged in getEvent, and without
// a request id, we still can't do anything
// reasonable to report back
const event = ev.?;
defer ev.?.deinit();
const event_response = event_handler(req_allocator, event.event_data) catch |err| {
event.reportError(@errorReturnTrace(), err, lambda_runtime_uri) catch unreachable;
continue;
};
event.postResponse(lambda_runtime_uri, event_response) catch |err| {
event.reportError(@errorReturnTrace(), err, lambda_runtime_uri) catch unreachable;
continue;
};
}
}
const Event = struct {
allocator: std.mem.Allocator,
event_data: []const u8,
request_id: []const u8,
const Self = @This();
pub fn init(allocator: std.mem.Allocator, event_data: []const u8, request_id: []const u8) Self {
return .{
.allocator = allocator,
.event_data = event_data,
.request_id = request_id,
};
}
pub fn deinit(self: *Self) void {
self.allocator.free(self.event_data);
self.allocator.free(self.request_id);
}
fn reportError(
self: Self,
return_trace: ?*std.builtin.StackTrace,
err: anytype,
lambda_runtime_uri: []const u8,
) !void {
// If we fail in this function, we're pretty hosed up
if (return_trace) |rt|
log.err("Caught error: {}. Return Trace: {any}", .{ err, rt })
else
log.err("Caught error: {}. No return trace available", .{err});
const err_url = try std.fmt.allocPrint(
self.allocator,
"{s}{s}{s}/{s}/error",
.{ prefix, lambda_runtime_uri, postfix, self.request_id },
);
defer self.allocator.free(err_url);
const err_uri = try std.Uri.parse(err_url);
const content =
\\{{
\\ "errorMessage": "{s}",
\\ "errorType": "HandlerReturnedError",
\\ "stackTrace": [ "{any}" ]
\\}}
;
const content_fmt = if (return_trace) |rt|
try std.fmt.allocPrint(self.allocator, content, .{ @errorName(err), rt })
else
try std.fmt.allocPrint(self.allocator, content, .{ @errorName(err), "no return trace available" });
defer self.allocator.free(content_fmt);
log.err("Posting to {s}: Data {s}", .{ err_url, content_fmt });
// TODO: There is something up with using a shared client in this way
// so we're taking a perf hit in favor of stability. In a practical
// sense, without making HTTPS connections (lambda environment is
// non-ssl), this shouldn't be a big issue
var cl = std.http.Client{ .allocator = self.allocator };
defer cl.deinit();
const res = cl.fetch(.{
.method = .POST,
.payload = content_fmt,
.location = .{ .uri = err_uri },
.extra_headers = &.{
.{
.name = "Lambda-Runtime-Function-Error-Type",
.value = "HandlerReturned",
},
},
}) catch |post_err| { // Well, at this point all we can do is shout at the void
log.err("Error posting response (start) for request id {s}: {}", .{ self.request_id, post_err });
std.posix.exit(1);
};
// TODO: Determine why this post is not returning
if (res.status != .ok) {
const req_allocator = &req_alloc.allocator;
var client = try requestz.Client.init(req_allocator);
// defer client.deinit();
// Lambda freezes the process at this line of code. During warm start,
// the process will unfreeze and data will be sent in response to client.get
var response = client.get(url, .{}) catch |err| {
std.log.err("Get fail: {}", .{err});
// Documentation says something about "exit immediately". The
// Lambda infrastrucutre restarts, so it's unclear if that's necessary.
// It seems as though a continue should be fine, and slightly faster
log.err("Post fail: {} {s}", .{
@intFromEnum(res.status),
res.status.phrase() orelse "",
});
std.posix.exit(1);
// std.os.exit(1);
continue;
};
defer response.deinit();
var request_id: ?[]const u8 = null;
for (response.headers.items()) |h| {
if (std.mem.indexOf(u8, h.name.value, "Lambda-Runtime-Aws-Request-Id")) |_|
request_id = h.value;
// TODO: XRay uses an environment variable to do its magic. It's our
// responsibility to set this, but no zig-native setenv(3)/putenv(3)
// exists. I would kind of rather not link in libc for this,
// so we'll hold for now and think on this
// if (std.mem.indexOf(u8, h.name.value, "Lambda-Runtime-Trace-Id")) |_|
// std.process.
// std.os.setenv("AWS_LAMBDA_RUNTIME_API");
}
log.err("Error reporting post complete", .{});
}
if (request_id == null) {
// We can't report back an issue because the runtime error reporting endpoint
// uses request id in its path. So the best we can do is log the error and move
// on here.
std.log.err("Could not find request id: skipping request", .{});
continue;
}
const req_id = request_id.?;
fn postResponse(self: Self, lambda_runtime_uri: []const u8, event_response: []const u8) !void {
const response_url = try std.fmt.allocPrint(
self.allocator,
"{s}{s}{s}/{s}/response",
.{ prefix, lambda_runtime_uri, postfix, self.request_id },
);
defer self.allocator.free(response_url);
var cl = std.http.Client{ .allocator = self.allocator };
defer cl.deinit();
// Lambda does different things, depending on the runtime. Go 1.x takes
// any return value but escapes double quotes. Custom runtimes can
// do whatever they want. node I believe wraps as a json object. We're
// going to leave the return value up to the handler, and they can
// use a seperate API for normalization so we're explicit. As a result,
// we can just post event_response completely raw here
const res = try cl.fetch(.{
.method = .POST,
.payload = event_response,
.location = .{ .url = response_url },
});
if (res.status != .ok) return error.UnexpectedStatusFromPostResponse;
}
};
fn getEvent(allocator: std.mem.Allocator, event_data_uri: std.Uri) !?Event {
// TODO: There is something up with using a shared client in this way
// so we're taking a perf hit in favor of stability. In a practical
// sense, without making HTTPS connections (lambda environment is
// non-ssl), this shouldn't be a big issue
var cl = std.http.Client{ .allocator = allocator };
defer cl.deinit();
var response_bytes = std.ArrayList(u8).init(allocator);
defer response_bytes.deinit();
var server_header_buffer: [16 * 1024]u8 = undefined;
// Lambda freezes the process at this line of code. During warm start,
// the process will unfreeze and data will be sent in response to client.get
var res = try cl.fetch(.{
.server_header_buffer = &server_header_buffer,
.location = .{ .uri = event_data_uri },
.response_storage = .{ .dynamic = &response_bytes },
});
if (res.status != .ok) {
// Documentation says something about "exit immediately". The
// Lambda infrastrucutre restarts, so it's unclear if that's necessary.
// It seems as though a continue should be fine, and slightly faster
// std.os.exit(1);
log.err("Lambda server event response returned bad error code: {} {s}", .{
@intFromEnum(res.status),
res.status.phrase() orelse "",
});
return error.EventResponseNotOkResponse;
}
var request_id: ?[]const u8 = null;
var header_it = std.http.HeaderIterator.init(server_header_buffer[0..]);
while (header_it.next()) |h| {
if (std.ascii.eqlIgnoreCase(h.name, "Lambda-Runtime-Aws-Request-Id"))
request_id = h.value;
// TODO: XRay uses an environment variable to do its magic. It's our
// responsibility to set this, but no zig-native setenv(3)/putenv(3)
// exists. I would kind of rather not link in libc for this,
// so we'll hold for now and think on this
// if (std.mem.indexOf(u8, h.name.value, "Lambda-Runtime-Trace-Id")) |_|
// std.process.
// std.os.setenv("AWS_LAMBDA_RUNTIME_API");
}
if (request_id == null) {
// We can't report back an issue because the runtime error reporting endpoint
// uses request id in its path. So the best we can do is log the error and move
// on here.
log.err("Could not find request id: skipping request", .{});
return null;
}
const req_id = request_id.?;
log.debug("got lambda request with id {s}", .{req_id});
return Event.init(
allocator,
try response_bytes.toOwnedSlice(),
try allocator.dupe(u8, req_id),
);
}
////////////////////////////////////////////////////////////////////////
// All code below this line is for testing
////////////////////////////////////////////////////////////////////////
var server_port: ?u16 = null;
var server_remaining_requests: usize = 0;
var lambda_remaining_requests: ?usize = null;
var server_response: []const u8 = "unset";
var server_request_aka_lambda_response: []u8 = "";
var test_lambda_runtime_uri: ?[]u8 = null;
var server_ready = false;
/// This starts a test server. We're not testing the server itself,
/// so the main tests will start this thing up and create an arena around the
/// whole thing so we can just deallocate everything at once at the end,
/// leaks be damned
fn startServer(allocator: std.mem.Allocator) !std.Thread {
return try std.Thread.spawn(
.{},
threadMain,
.{allocator},
);
}
fn threadMain(allocator: std.mem.Allocator) !void {
const address = try std.net.Address.parseIp("127.0.0.1", 0);
var http_server = try address.listen(.{ .reuse_address = true });
server_port = http_server.listen_address.in.getPort();
test_lambda_runtime_uri = try std.fmt.allocPrint(allocator, "127.0.0.1:{d}", .{server_port.?});
log.debug("server listening at {s}", .{test_lambda_runtime_uri.?});
defer test_lambda_runtime_uri = null;
defer server_port = null;
log.info("starting server thread, tid {d}", .{std.Thread.getCurrentId()});
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
const aa = arena.allocator();
// We're in control of all requests/responses, so this flag will tell us
// when it's time to shut down
while (server_remaining_requests > 0) {
server_remaining_requests -= 1;
// defer {
// if (!arena.reset(.{ .retain_capacity = {} })) {
// // reallocation failed, arena is degraded
// log.warn("Arena reset failed and is degraded. Resetting arena", .{});
// arena.deinit();
// arena = std.heap.ArenaAllocator.init(allocator);
// aa = arena.allocator();
// }
// }
processRequest(aa, &http_server) catch |e| {
log.err("Unexpected error processing request: {any}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
const event_response = event_handler(req_allocator, response.body) catch |err| {
// Stack trace will return null if stripped
const return_trace = @errorReturnTrace();
std.log.err("Caught error: {}. Return Trace: {}", .{ err, return_trace });
const err_url = try std.fmt.allocPrint(req_allocator, "{s}{s}/runtime/invocation/{s}/error", .{ prefix, lambda_runtime_uri, req_id });
defer req_allocator.free(err_url);
const content =
\\ {s}
\\ "errorMessage": "{s}",
\\ "errorType": "HandlerReturnedError",
\\ "stackTrace": [ "{}" ]
\\ {s}
;
const content_fmt = try std.fmt.allocPrint(req_allocator, content, .{ "{", @errorName(err), return_trace, "}" });
defer req_allocator.free(content_fmt);
std.log.err("Posting to {s}: Data {s}", .{ err_url, content_fmt });
var headers = .{.{ "Lambda-Runtime-Function-Error-Type", "HandlerReturned" }};
// TODO: Determine why this post is not returning
var err_resp = client.post(err_url, .{
.content = content_fmt,
.headers = headers,
}) catch |post_err| { // Well, at this point all we can do is shout at the void
std.log.err("Error posting response for request id {s}: {}", .{ req_id, post_err });
std.os.exit(0);
continue;
};
std.log.err("Post complete", .{});
defer err_resp.deinit();
continue;
};
const response_url = try std.fmt.allocPrint(req_allocator, "{s}{s}{s}/{s}/response", .{ prefix, lambda_runtime_uri, postfix, req_id });
// defer req_allocator.free(response_url);
var resp_resp = client.post(response_url, .{ .content = event_response }) catch |err| {
// TODO: report error
std.log.err("Error posting response for request id {s}: {}", .{ req_id, err });
continue;
};
defer resp_resp.deinit();
}
}
fn processRequest(allocator: std.mem.Allocator, server: *std.net.Server) !void {
server_ready = true;
errdefer server_ready = false;
log.debug(
"tid {d} (server): server waiting to accept. requests remaining: {d}",
.{ std.Thread.getCurrentId(), server_remaining_requests + 1 },
);
var connection = try server.accept();
defer connection.stream.close();
server_ready = false;
var read_buffer: [1024 * 16]u8 = undefined;
var http_server = std.http.Server.init(connection, &read_buffer);
if (http_server.state == .ready) {
var request = http_server.receiveHead() catch |err| switch (err) {
error.HttpConnectionClosing => return,
else => {
std.log.err("closing http connection: {s}", .{@errorName(err)});
std.log.debug("Error occurred from this request: \n{s}", .{read_buffer[0..http_server.read_buffer_len]});
return;
},
};
server_request_aka_lambda_response = try (try request.reader()).readAllAlloc(allocator, std.math.maxInt(usize));
var respond_options = std.http.Server.Request.RespondOptions{};
const response_bytes = serve(allocator, request, &respond_options) catch |e| brk: {
respond_options.status = .internal_server_error;
// TODO: more about this particular request
log.err("Unexpected error from executor processing request: {any}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
break :brk "Unexpected error generating request to lambda";
};
try request.respond(response_bytes, respond_options);
log.debug(
"tid {d} (server): sent response: {s}",
.{ std.Thread.getCurrentId(), response_bytes },
);
}
}
fn serve(allocator: std.mem.Allocator, request: std.http.Server.Request, respond_options: *std.http.Server.Request.RespondOptions) ![]const u8 {
_ = allocator;
_ = request;
respond_options.extra_headers = &.{
.{ .name = "Lambda-Runtime-Aws-Request-Id", .value = "69" },
};
return server_response;
}
fn handler(allocator: std.mem.Allocator, event_data: []const u8) ![]const u8 {
_ = allocator;
return event_data;
}
pub fn test_lambda_request(allocator: std.mem.Allocator, request: []const u8, request_count: usize, handler_fn: HandlerFn) ![]u8 {
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
const aa = arena.allocator();
// Setup our server to run, and set the response for the server to the
// request. There is a cognitive disconnect here between mental model and
// physical model.
//
// Mental model:
//
// Lambda request -> λ -> Lambda response
//
// Physcial Model:
//
// 1. λ requests instructions from server
// 2. server provides "Lambda request"
// 3. λ posts response back to server
//
// So here we are setting up our server, then our lambda request loop,
// but it all needs to be in seperate threads so we can control startup
// and shut down. Both server and Lambda are set up to watch global variable
// booleans to know when to shut down. This function is designed for a
// single request/response pair only
lambda_remaining_requests = request_count;
server_remaining_requests = lambda_remaining_requests.? * 2; // Lambda functions
// fetch from the server,
// then post back. Always
// 2, no more, no less
server_response = request; // set our instructions to lambda, which in our
// physical model above, is the server response
defer server_response = "unset"; // set it back so we don't get confused later
// when subsequent tests fail
const server_thread = try startServer(aa); // start the server, get it ready
while (!server_ready)
std.time.sleep(100);
log.debug("tid {d} (main): server reports ready", .{std.Thread.getCurrentId()});
// we aren't testing the server,
// so we'll use the arena allocator
defer server_thread.join(); // we'll be shutting everything down before we exit
// Now we need to start the lambda framework
try run(allocator, handler_fn); // We want our function under test to report leaks
return try allocator.dupe(u8, server_request_aka_lambda_response);
}
test "basic request" {
// std.testing.log_level = .debug;
const allocator = std.testing.allocator;
const request =
\\{"foo": "bar", "baz": "qux"}
;
// This is what's actually coming back. Is this right?
const expected_response =
\\{"foo": "bar", "baz": "qux"}
;
const lambda_response = try test_lambda_request(allocator, request, 1, handler);
defer deinit();
defer allocator.free(lambda_response);
try std.testing.expectEqualStrings(expected_response, lambda_response);
}
test "several requests do not fail" {
// std.testing.log_level = .debug;
const allocator = std.testing.allocator;
const request =
\\{"foo": "bar", "baz": "qux"}
;
// This is what's actually coming back. Is this right?
const expected_response =
\\{"foo": "bar", "baz": "qux"}
;
const lambda_response = try test_lambda_request(allocator, request, 5, handler);
defer deinit();
defer allocator.free(lambda_response);
try std.testing.expectEqualStrings(expected_response, lambda_response);
}

View File

@ -2,10 +2,10 @@ const std = @import("std");
const lambda = @import("lambda.zig");
pub fn main() anyerror!void {
try lambda.run(null, handler);
try lambda.run(handler);
}
fn handler(allocator: std.mem.Allocator, event_data: []const u8) ![]const u8 {
fn handler(allocator: *std.mem.Allocator, event_data: []const u8) ![]const u8 {
_ = allocator;
return event_data;
}