Compare commits

...

9 Commits

4 changed files with 458 additions and 207 deletions

223
build.zig
View File

@ -1,39 +1,59 @@
const builtin = @import("builtin");
const std = @import("std");
pub fn build(b: *std.build.Builder) !void {
// Standard release options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
// const mode = b.standardReleaseOptions();
// Although this function looks imperative, note that its job is to
// declaratively construct a build graph that will be executed by an external
// runner.
pub fn build(b: *std.Build) !void {
// Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options
// for restricting supported target set are available.
const target = b.standardTargetOptions(.{});
// Standard optimization options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
// set a preferred release mode, allowing the user to decide how to optimize.
const optimize = b.standardOptimizeOption(.{});
const lib = b.addStaticLibrary(.{
.name = "lambda-zig",
// In this case the main source file is merely a path, however, in more
// complicated build scripts, this could be a generated file.
.root_source_file = .{ .path = "src/lambda.zig" },
.target = target,
.optimize = optimize,
});
// This declares intent for the library to be installed into the standard
// location when the user invokes the "install" step (the default step when
// running `zig build`).
b.installArtifact(lib);
// Creates a step for unit testing. This only builds the test executable
// but does not run it.
const main_tests = b.addTest(.{
.root_source_file = .{ .path = "src/lambda.zig" },
.target = target,
.optimize = optimize,
});
const run_main_tests = b.addRunArtifact(main_tests);
// This creates a build step. It will be visible in the `zig build --help` menu,
// and can be selected like this: `zig build test`
// This will evaluate the `test` step rather than the default, which is "install".
const test_step = b.step("test", "Run library tests");
test_step.dependOn(&run_main_tests.step);
var exe = b.addExecutable(.{
.name = "custom",
.root_source_file = .{ .path = "src/main.zig" },
.root_source_file = .{ .path = "src/sample-main.zig" },
.target = target,
.optimize = optimize,
});
b.installArtifact(exe);
try lambdaBuildOptions(b, exe);
// TODO: We can cross-compile of course, but stripping and zip commands
// may vary
// TODO: Add test
}
fn fileExists(file_name: []const u8) bool {
const file = std.fs.openFileAbsolute(file_name, .{}) catch return false;
defer file.close();
return true;
}
fn addArgs(allocator: std.mem.Allocator, original: []const u8, args: [][]const u8) ![]const u8 {
var rc = original;
for (args) |arg| {
rc = try std.mem.concat(allocator, u8, &.{ rc, " ", arg });
}
return rc;
}
/// lambdaBuildOptions will add three build options to the build (if compiling
@ -53,160 +73,5 @@ fn addArgs(allocator: std.mem.Allocator, original: []const u8, args: [][]const u
///
/// iam and package do not have any dependencies
pub fn lambdaBuildOptions(b: *std.build.Builder, exe: *std.Build.Step.Compile) !void {
// The rest of this function is currently reliant on the use of Linux
// system being used to build the lambda function
//
// It is likely that much of this will work on other Unix-like OSs, but
// we will work this out later
//
// TODO: support other host OSs
if (builtin.os.tag != .linux) return;
// Package step
const package_step = b.step("package", "Package the function");
const function_zip = b.getInstallPath(.bin, "function.zip");
// TODO: Avoid use of system-installed zip, maybe using something like
// https://github.com/hdorio/hwzip.zig/blob/master/src/hwzip.zig
const zip = if (std.mem.eql(u8, "bootstrap", exe.out_filename))
try std.fmt.allocPrint(b.allocator,
\\zip -qj9 {s} {s}
, .{
function_zip,
b.getInstallPath(.bin, "bootstrap"),
})
else
// We need to copy stuff around
try std.fmt.allocPrint(b.allocator,
\\cp {s} {s} && \
\\zip -qj9 {s} {s} && \
\\rm {s}
, .{
b.getInstallPath(.bin, exe.out_filename),
b.getInstallPath(.bin, "bootstrap"),
function_zip,
b.getInstallPath(.bin, "bootstrap"),
b.getInstallPath(.bin, "bootstrap"),
});
// std.debug.print("\nzip cmdline: {s}", .{zip});
defer b.allocator.free(zip);
var zip_cmd = b.addSystemCommand(&.{ "/bin/sh", "-c", zip });
zip_cmd.step.dependOn(b.getInstallStep());
package_step.dependOn(&zip_cmd.step);
// Deployment
const deploy_step = b.step("deploy", "Deploy the function");
var deal_with_iam = true;
if (b.args) |args| {
for (args) |arg| {
if (std.mem.eql(u8, "--role", arg)) {
deal_with_iam = false;
break;
}
}
}
// TODO: Allow custom lambda role names
var iam_role: []u8 = &.{};
const iam_step = b.step("iam", "Create/Get IAM role for function");
deploy_step.dependOn(iam_step); // iam_step will either be a noop or all the stuff below
if (deal_with_iam) {
// if someone adds '-- --role arn...' to the command line, we don't
// need to do anything with the iam role. Otherwise, we'll create/
// get the IAM role and stick the name in a file in our destination
// directory to be used later
const iam_role_name_file = b.getInstallPath(.bin, "iam_role_name");
iam_role = try std.fmt.allocPrint(b.allocator, "--role $(cat {s})", .{iam_role_name_file});
// defer b.allocator.free(iam_role);
if (!fileExists(iam_role_name_file)) {
// Role get/creation command
const ifstatement_fmt =
\\ if aws iam get-role --role-name lambda_basic_execution 2>&1 |grep -q NoSuchEntity; then aws iam create-role --output text --query Role.Arn --role-name lambda_basic_execution --assume-role-policy-document '{
\\ "Version": "2012-10-17",
\\ "Statement": [
\\ {
\\ "Sid": "",
\\ "Effect": "Allow",
\\ "Principal": {
\\ "Service": "lambda.amazonaws.com"
\\ },
\\ "Action": "sts:AssumeRole"
\\ }
\\ ]}' > /dev/null; fi && \
\\ aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AWSLambdaExecute --role-name lambda_basic_execution && \
\\ aws iam get-role --role-name lambda_basic_execution --query Role.Arn --output text >
;
const ifstatement = try std.mem.concat(b.allocator, u8, &[_][]const u8{ ifstatement_fmt, iam_role_name_file });
defer b.allocator.free(ifstatement);
iam_step.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", ifstatement }).step);
}
}
const function_name = b.option([]const u8, "function-name", "Function name for Lambda [zig-fn]") orelse "zig-fn";
const function_name_file = b.getInstallPath(.bin, function_name);
const ifstatement = "if [ ! -f {s} ] || [ {s} -nt {s} ]; then if aws lambda get-function --function-name {s} 2>&1 |grep -q ResourceNotFoundException; then echo not found > /dev/null; {s}; else echo found > /dev/null; {s}; fi; fi";
// The architectures option was introduced in 2.2.43 released 2021-10-01
// We want to use arm64 here because it is both faster and cheaper for most
// Amazon Linux 2 is the only arm64 supported option
const not_found = "aws lambda create-function --architectures arm64 --runtime provided.al2 --function-name {s} --zip-file fileb://{s} --handler not_applicable {s} && touch {s}";
const not_found_fmt = try std.fmt.allocPrint(b.allocator, not_found, .{ function_name, function_zip, iam_role, function_name_file });
defer b.allocator.free(not_found_fmt);
const found = "aws lambda update-function-code --function-name {s} --zip-file fileb://{s} && touch {s}";
const found_fmt = try std.fmt.allocPrint(b.allocator, found, .{ function_name, function_zip, function_name_file });
defer b.allocator.free(found_fmt);
var found_final: []const u8 = undefined;
var not_found_final: []const u8 = undefined;
if (b.args) |args| {
found_final = try addArgs(b.allocator, found_fmt, args);
not_found_final = try addArgs(b.allocator, not_found_fmt, args);
} else {
found_final = found_fmt;
not_found_final = not_found_fmt;
}
const cmd = try std.fmt.allocPrint(b.allocator, ifstatement, .{
function_name_file,
std.fs.path.dirname(exe.root_src.?.path).?,
function_name_file,
function_name,
not_found_fmt,
found_fmt,
});
defer b.allocator.free(cmd);
// std.debug.print("{s}\n", .{cmd});
deploy_step.dependOn(package_step);
deploy_step.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", cmd }).step);
// TODO: Looks like IquanaTLS isn't playing nicely with payloads this small
// const payload = b.option([]const u8, "payload", "Lambda payload [{\"foo\":\"bar\"}]") orelse
// \\ {"foo": "bar"}"
// ;
const payload = b.option([]const u8, "payload", "Lambda payload [{\"foo\":\"bar\", \"baz\": \"qux\"}]") orelse
\\ {"foo": "bar", "baz": "qux"}"
;
const run_script =
\\ f=$(mktemp) && \
\\ logs=$(aws lambda invoke \
\\ --cli-binary-format raw-in-base64-out \
\\ --invocation-type RequestResponse \
\\ --function-name {s} \
\\ --payload '{s}' \
\\ --log-type Tail \
\\ --query LogResult \
\\ --output text "$f" |base64 -d) && \
\\ cat "$f" && rm "$f" && \
\\ echo && echo && echo "$logs"
;
const run_script_fmt = try std.fmt.allocPrint(b.allocator, run_script, .{ function_name, payload });
defer b.allocator.free(run_script_fmt);
const run_cmd = b.addSystemCommand(&.{ "/bin/sh", "-c", run_script_fmt });
run_cmd.step.dependOn(deploy_step);
if (b.args) |args| {
run_cmd.addArgs(args);
}
const run_step = b.step("remoterun", "Run the app in AWS lambda");
run_step.dependOn(&run_cmd.step);
try @import("lambdabuild.zig").lambdaBuildOptions(b, exe);
}

190
lambdabuild.zig Normal file
View File

@ -0,0 +1,190 @@
const std = @import("std");
const builtin = @import("builtin");
fn fileExists(file_name: []const u8) bool {
const file = std.fs.openFileAbsolute(file_name, .{}) catch return false;
defer file.close();
return true;
}
fn addArgs(allocator: std.mem.Allocator, original: []const u8, args: [][]const u8) ![]const u8 {
var rc = original;
for (args) |arg| {
rc = try std.mem.concat(allocator, u8, &.{ rc, " ", arg });
}
return rc;
}
/// lambdaBuildOptions will add three build options to the build (if compiling
/// the code on a Linux host):
///
/// * package: Packages the function for deployment to Lambda
/// (dependencies are the zip executable and a shell)
/// * iam: Gets an IAM role for the Lambda function, and creates it if it does not exist
/// (dependencies are the AWS CLI, grep and a shell)
/// * deploy: Deploys the lambda function to a live AWS environment
/// (dependencies are the AWS CLI, and a shell)
/// * remoterun: Runs the lambda function in a live AWS environment
/// (dependencies are the AWS CLI, and a shell)
///
/// remoterun depends on deploy
/// deploy depends on iam and package
///
/// iam and package do not have any dependencies
pub fn lambdaBuildOptions(b: *std.build.Builder, exe: *std.Build.Step.Compile) !void {
// The rest of this function is currently reliant on the use of Linux
// system being used to build the lambda function
//
// It is likely that much of this will work on other Unix-like OSs, but
// we will work this out later
//
// TODO: support other host OSs
if (builtin.os.tag != .linux) return;
// Package step
const package_step = b.step("package", "Package the function");
const function_zip = b.getInstallPath(.bin, "function.zip");
// TODO: Avoid use of system-installed zip, maybe using something like
// https://github.com/hdorio/hwzip.zig/blob/master/src/hwzip.zig
const zip = if (std.mem.eql(u8, "bootstrap", exe.out_filename))
try std.fmt.allocPrint(b.allocator,
\\zip -qj9 {s} {s}
, .{
function_zip,
b.getInstallPath(.bin, "bootstrap"),
})
else
// We need to copy stuff around
try std.fmt.allocPrint(b.allocator,
\\cp {s} {s} && \
\\zip -qj9 {s} {s} && \
\\rm {s}
, .{
b.getInstallPath(.bin, exe.out_filename),
b.getInstallPath(.bin, "bootstrap"),
function_zip,
b.getInstallPath(.bin, "bootstrap"),
b.getInstallPath(.bin, "bootstrap"),
});
// std.debug.print("\nzip cmdline: {s}", .{zip});
defer b.allocator.free(zip);
var zip_cmd = b.addSystemCommand(&.{ "/bin/sh", "-c", zip });
zip_cmd.step.dependOn(b.getInstallStep());
package_step.dependOn(&zip_cmd.step);
// Deployment
const deploy_step = b.step("deploy", "Deploy the function");
var deal_with_iam = true;
if (b.args) |args| {
for (args) |arg| {
if (std.mem.eql(u8, "--role", arg)) {
deal_with_iam = false;
break;
}
}
}
// TODO: Allow custom lambda role names
var iam_role: []u8 = &.{};
const iam_step = b.step("iam", "Create/Get IAM role for function");
deploy_step.dependOn(iam_step); // iam_step will either be a noop or all the stuff below
if (deal_with_iam) {
// if someone adds '-- --role arn...' to the command line, we don't
// need to do anything with the iam role. Otherwise, we'll create/
// get the IAM role and stick the name in a file in our destination
// directory to be used later
const iam_role_name_file = b.getInstallPath(.bin, "iam_role_name");
iam_role = try std.fmt.allocPrint(b.allocator, "--role $(cat {s})", .{iam_role_name_file});
// defer b.allocator.free(iam_role);
if (!fileExists(iam_role_name_file)) {
// Role get/creation command
const ifstatement_fmt =
\\ if aws iam get-role --role-name lambda_basic_execution 2>&1 |grep -q NoSuchEntity; then aws iam create-role --output text --query Role.Arn --role-name lambda_basic_execution --assume-role-policy-document '{
\\ "Version": "2012-10-17",
\\ "Statement": [
\\ {
\\ "Sid": "",
\\ "Effect": "Allow",
\\ "Principal": {
\\ "Service": "lambda.amazonaws.com"
\\ },
\\ "Action": "sts:AssumeRole"
\\ }
\\ ]}' > /dev/null; fi && \
\\ aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AWSLambdaExecute --role-name lambda_basic_execution && \
\\ aws iam get-role --role-name lambda_basic_execution --query Role.Arn --output text >
;
const ifstatement = try std.mem.concat(b.allocator, u8, &[_][]const u8{ ifstatement_fmt, iam_role_name_file });
defer b.allocator.free(ifstatement);
iam_step.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", ifstatement }).step);
}
}
const function_name = b.option([]const u8, "function-name", "Function name for Lambda [zig-fn]") orelse "zig-fn";
const function_name_file = b.getInstallPath(.bin, function_name);
const ifstatement = "if [ ! -f {s} ] || [ {s} -nt {s} ]; then if aws lambda get-function --function-name {s} 2>&1 |grep -q ResourceNotFoundException; then echo not found > /dev/null; {s}; else echo found > /dev/null; {s}; fi; fi";
// The architectures option was introduced in 2.2.43 released 2021-10-01
// We want to use arm64 here because it is both faster and cheaper for most
// Amazon Linux 2 is the only arm64 supported option
const not_found = "aws lambda create-function --architectures arm64 --runtime provided.al2 --function-name {s} --zip-file fileb://{s} --handler not_applicable {s} && touch {s}";
const not_found_fmt = try std.fmt.allocPrint(b.allocator, not_found, .{ function_name, function_zip, iam_role, function_name_file });
defer b.allocator.free(not_found_fmt);
const found = "aws lambda update-function-code --function-name {s} --zip-file fileb://{s} && touch {s}";
const found_fmt = try std.fmt.allocPrint(b.allocator, found, .{ function_name, function_zip, function_name_file });
defer b.allocator.free(found_fmt);
var found_final: []const u8 = undefined;
var not_found_final: []const u8 = undefined;
if (b.args) |args| {
found_final = try addArgs(b.allocator, found_fmt, args);
not_found_final = try addArgs(b.allocator, not_found_fmt, args);
} else {
found_final = found_fmt;
not_found_final = not_found_fmt;
}
const cmd = try std.fmt.allocPrint(b.allocator, ifstatement, .{
function_name_file,
std.fs.path.dirname(exe.root_src.?.path).?,
function_name_file,
function_name,
not_found_fmt,
found_fmt,
});
defer b.allocator.free(cmd);
// std.debug.print("{s}\n", .{cmd});
deploy_step.dependOn(package_step);
deploy_step.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", cmd }).step);
// TODO: Looks like IquanaTLS isn't playing nicely with payloads this small
// const payload = b.option([]const u8, "payload", "Lambda payload [{\"foo\":\"bar\"}]") orelse
// \\ {"foo": "bar"}"
// ;
const payload = b.option([]const u8, "payload", "Lambda payload [{\"foo\":\"bar\", \"baz\": \"qux\"}]") orelse
\\ {"foo": "bar", "baz": "qux"}"
;
const run_script =
\\ f=$(mktemp) && \
\\ logs=$(aws lambda invoke \
\\ --cli-binary-format raw-in-base64-out \
\\ --invocation-type RequestResponse \
\\ --function-name {s} \
\\ --payload '{s}' \
\\ --log-type Tail \
\\ --query LogResult \
\\ --output text "$f" |base64 -d) && \
\\ cat "$f" && rm "$f" && \
\\ echo && echo && echo "$logs"
;
const run_script_fmt = try std.fmt.allocPrint(b.allocator, run_script, .{ function_name, payload });
defer b.allocator.free(run_script_fmt);
const run_cmd = b.addSystemCommand(&.{ "/bin/sh", "-c", run_script_fmt });
run_cmd.step.dependOn(deploy_step);
if (b.args) |args| {
run_cmd.addArgs(args);
}
const run_step = b.step("remoterun", "Run the app in AWS lambda");
run_step.dependOn(&run_cmd.step);
}

View File

@ -1,36 +1,46 @@
const std = @import("std");
const builtin = @import("builtin");
const HandlerFn = *const fn (std.mem.Allocator, []const u8) anyerror![]const u8;
pub fn run(event_handler: HandlerFn) !void { // TODO: remove inferred error set?
const log = std.log.scoped(.lambda);
/// Starts the lambda framework. Handler will be called when an event is processing
/// If an allocator is not provided, an approrpriate allocator will be selected and used
pub fn run(allocator: ?std.mem.Allocator, event_handler: HandlerFn) !void { // TODO: remove inferred error set?
const prefix = "http://";
const postfix = "/2018-06-01/runtime/invocation";
const lambda_runtime_uri = std.os.getenv("AWS_LAMBDA_RUNTIME_API").?;
const lambda_runtime_uri = std.os.getenv("AWS_LAMBDA_RUNTIME_API") orelse test_lambda_runtime_uri.?;
// TODO: If this is null, go into single use command line mode
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
const alloc = allocator orelse gpa.allocator();
const url = try std.fmt.allocPrint(allocator, "{s}{s}{s}/next", .{ prefix, lambda_runtime_uri, postfix });
defer allocator.free(url);
const url = try std.fmt.allocPrint(alloc, "{s}{s}{s}/next", .{ prefix, lambda_runtime_uri, postfix });
defer alloc.free(url);
const uri = try std.Uri.parse(url);
var client: std.http.Client = .{ .allocator = allocator };
var client: std.http.Client = .{ .allocator = alloc };
defer client.deinit();
var empty_headers = std.http.Headers.init(allocator);
var empty_headers = std.http.Headers.init(alloc);
defer empty_headers.deinit();
std.log.info("Bootstrap initializing with event url: {s}", .{url});
log.info("tid {d} (lambda): Bootstrap initializing with event url: {s}", .{ std.Thread.getCurrentId(), url });
while (true) {
var req_alloc = std.heap.ArenaAllocator.init(allocator);
while (lambda_remaining_requests == null or lambda_remaining_requests.? > 0) {
if (lambda_remaining_requests) |*r| {
// we're under test
log.debug("lambda remaining requests: {d}", .{r.*});
r.* -= 1;
}
var req_alloc = std.heap.ArenaAllocator.init(alloc);
defer req_alloc.deinit();
const req_allocator = req_alloc.allocator();
var req = try client.request(.GET, uri, empty_headers, .{});
defer req.deinit();
req.start() catch |err| { // Well, at this point all we can do is shout at the void
std.log.err("Get fail (start): {}", .{err});
log.err("Get fail (start): {}", .{err});
std.os.exit(0);
continue;
};
@ -38,7 +48,7 @@ pub fn run(event_handler: HandlerFn) !void { // TODO: remove inferred error set?
// Lambda freezes the process at this line of code. During warm start,
// the process will unfreeze and data will be sent in response to client.get
req.wait() catch |err| { // Well, at this point all we can do is shout at the void
std.log.err("Get fail (wait): {}", .{err});
log.err("Get fail (wait): {}", .{err});
std.os.exit(0);
continue;
};
@ -47,7 +57,7 @@ pub fn run(event_handler: HandlerFn) !void { // TODO: remove inferred error set?
// Lambda infrastrucutre restarts, so it's unclear if that's necessary.
// It seems as though a continue should be fine, and slightly faster
// std.os.exit(1);
std.log.err("Get fail: {} {s}", .{
log.err("Get fail: {} {s}", .{
@intFromEnum(req.response.status),
req.response.status.phrase() orelse "",
});
@ -56,13 +66,13 @@ pub fn run(event_handler: HandlerFn) !void { // TODO: remove inferred error set?
var request_id: ?[]const u8 = null;
var content_length: ?usize = null;
for (req.headers.list.items) |h| {
for (req.response.headers.list.items) |h| {
if (std.ascii.eqlIgnoreCase(h.name, "Lambda-Runtime-Aws-Request-Id"))
request_id = h.value;
if (std.ascii.eqlIgnoreCase(h.name, "Content-Length")) {
content_length = std.fmt.parseUnsigned(usize, h.value, 10) catch null;
if (content_length == null)
std.log.warn("Error parsing content length value: '{s}'", .{h.value});
log.warn("Error parsing content length value: '{s}'", .{h.value});
}
// TODO: XRay uses an environment variable to do its magic. It's our
// responsibility to set this, but no zig-native setenv(3)/putenv(3)
@ -76,10 +86,11 @@ pub fn run(event_handler: HandlerFn) !void { // TODO: remove inferred error set?
// We can't report back an issue because the runtime error reporting endpoint
// uses request id in its path. So the best we can do is log the error and move
// on here.
std.log.err("Could not find request id: skipping request", .{});
log.err("Could not find request id: skipping request", .{});
continue;
}
const req_id = request_id.?;
log.debug("got lambda request with id {s}", .{req_id});
const reader = req.reader();
var buf: [65535]u8 = undefined;
@ -87,7 +98,7 @@ pub fn run(event_handler: HandlerFn) !void { // TODO: remove inferred error set?
var resp_payload = std.ArrayList(u8).init(req_allocator);
if (content_length) |len| {
resp_payload.ensureTotalCapacity(len) catch {
std.log.err("Could not allocate memory for body of request id: {s}", .{request_id.?});
log.err("Could not allocate memory for body of request id: {s}", .{request_id.?});
continue;
};
}
@ -103,9 +114,9 @@ pub fn run(event_handler: HandlerFn) !void { // TODO: remove inferred error set?
// Stack trace will return null if stripped
const return_trace = @errorReturnTrace();
if (return_trace) |rt|
std.log.err("Caught error: {}. Return Trace: {any}", .{ err, rt })
log.err("Caught error: {}. Return Trace: {any}", .{ err, rt })
else
std.log.err("Caught error: {}. No return trace available", .{err});
log.err("Caught error: {}. No return trace available", .{err});
const err_url = try std.fmt.allocPrint(req_allocator, "{s}{s}/runtime/invocation/{s}/error", .{ prefix, lambda_runtime_uri, req_id });
defer req_allocator.free(err_url);
const err_uri = try std.Uri.parse(err_url);
@ -121,7 +132,7 @@ pub fn run(event_handler: HandlerFn) !void { // TODO: remove inferred error set?
else
try std.fmt.allocPrint(req_allocator, content, .{ "{", @errorName(err), "no return trace available", "}" });
defer req_allocator.free(content_fmt);
std.log.err("Posting to {s}: Data {s}", .{ err_url, content_fmt });
log.err("Posting to {s}: Data {s}", .{ err_url, content_fmt });
var err_headers = std.http.Headers.init(req_allocator);
defer err_headers.deinit();
@ -129,20 +140,20 @@ pub fn run(event_handler: HandlerFn) !void { // TODO: remove inferred error set?
"Lambda-Runtime-Function-Error-Type",
"HandlerReturned",
) catch |append_err| {
std.log.err("Error appending error header to post response for request id {s}: {}", .{ req_id, append_err });
log.err("Error appending error header to post response for request id {s}: {}", .{ req_id, append_err });
std.os.exit(0);
continue;
};
var err_req = try client.request(.POST, err_uri, empty_headers, .{});
defer err_req.deinit();
err_req.start() catch |post_err| { // Well, at this point all we can do is shout at the void
std.log.err("Error posting response for request id {s}: {}", .{ req_id, post_err });
log.err("Error posting response for request id {s}: {}", .{ req_id, post_err });
std.os.exit(0);
continue;
};
err_req.wait() catch |post_err| { // Well, at this point all we can do is shout at the void
std.log.err("Error posting response for request id {s}: {}", .{ req_id, post_err });
log.err("Error posting response for request id {s}: {}", .{ req_id, post_err });
std.os.exit(0);
continue;
};
@ -152,29 +163,214 @@ pub fn run(event_handler: HandlerFn) !void { // TODO: remove inferred error set?
// Lambda infrastrucutre restarts, so it's unclear if that's necessary.
// It seems as though a continue should be fine, and slightly faster
// std.os.exit(1);
std.log.err("Get fail: {} {s}", .{
log.err("Get fail: {} {s}", .{
@intFromEnum(err_req.response.status),
err_req.response.status.phrase() orelse "",
});
continue;
}
std.log.err("Post complete", .{});
log.err("Post complete", .{});
continue;
};
// TODO: We should catch these potential alloc errors too
// TODO: This whole loop should be in another function so we can catch everything at once
const response_url = try std.fmt.allocPrint(req_allocator, "{s}{s}{s}/{s}/response", .{ prefix, lambda_runtime_uri, postfix, req_id });
defer allocator.free(response_url);
defer alloc.free(response_url);
const response_uri = try std.Uri.parse(response_url);
const response_content = try std.fmt.allocPrint(req_allocator, "{s} \"content\": \"{s}\" {s}", .{ "{", event_response, "}" });
var resp_req = try client.request(.POST, response_uri, empty_headers, .{});
defer resp_req.deinit();
resp_req.transfer_encoding = .{ .content_length = response_content.len };
try resp_req.start();
try resp_req.writeAll(response_content); // TODO: AllocPrint + writeAll makes no sense
try resp_req.finish();
resp_req.wait() catch |err| {
// TODO: report error
std.log.err("Error posting response for request id {s}: {}", .{ req_id, err });
log.err("Error posting response for request id {s}: {}", .{ req_id, err });
continue;
};
}
}
////////////////////////////////////////////////////////////////////////
// All code below this line is for testing
////////////////////////////////////////////////////////////////////////
var server_port: ?u16 = null;
var server_remaining_requests: usize = 0;
var lambda_remaining_requests: ?usize = null;
var server_response: []const u8 = "unset";
var server_request_aka_lambda_response: []u8 = "";
var test_lambda_runtime_uri: ?[]u8 = null;
var server_ready = false;
/// This starts a test server. We're not testing the server itself,
/// so the main tests will start this thing up and create an arena around the
/// whole thing so we can just deallocate everything at once at the end,
/// leaks be damned
fn startServer(allocator: std.mem.Allocator) !std.Thread {
return try std.Thread.spawn(
.{},
threadMain,
.{allocator},
);
}
fn threadMain(allocator: std.mem.Allocator) !void {
var server = std.http.Server.init(allocator, .{ .reuse_address = true });
// defer server.deinit();
const address = try std.net.Address.parseIp("127.0.0.1", 0);
try server.listen(address);
server_port = server.socket.listen_address.in.getPort();
test_lambda_runtime_uri = try std.fmt.allocPrint(allocator, "127.0.0.1:{d}", .{server_port.?});
log.debug("server listening at {s}", .{test_lambda_runtime_uri.?});
defer server.deinit();
defer test_lambda_runtime_uri = null;
defer server_port = null;
log.info("starting server thread, tid {d}", .{std.Thread.getCurrentId()});
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
var aa = arena.allocator();
// We're in control of all requests/responses, so this flag will tell us
// when it's time to shut down
while (server_remaining_requests > 0) {
server_remaining_requests -= 1;
// defer {
// if (!arena.reset(.{ .retain_capacity = {} })) {
// // reallocation failed, arena is degraded
// log.warn("Arena reset failed and is degraded. Resetting arena", .{});
// arena.deinit();
// arena = std.heap.ArenaAllocator.init(allocator);
// aa = arena.allocator();
// }
// }
processRequest(aa, &server) catch |e| {
log.err("Unexpected error processing request: {any}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
};
}
}
fn processRequest(allocator: std.mem.Allocator, server: *std.http.Server) !void {
server_ready = true;
errdefer server_ready = false;
log.debug(
"tid {d} (server): server waiting to accept. requests remaining: {d}",
.{ std.Thread.getCurrentId(), server_remaining_requests + 1 },
);
var res = try server.accept(.{ .allocator = allocator });
server_ready = false;
defer res.deinit();
defer _ = res.reset();
try res.wait(); // wait for client to send a complete request head
const errstr = "Internal Server Error\n";
var errbuf: [errstr.len]u8 = undefined;
@memcpy(&errbuf, errstr);
var response_bytes: []const u8 = errbuf[0..];
if (res.request.content_length) |l|
server_request_aka_lambda_response = try res.reader().readAllAlloc(allocator, @as(usize, l));
log.debug(
"tid {d} (server): {d} bytes read from request",
.{ std.Thread.getCurrentId(), server_request_aka_lambda_response.len },
);
// try response.headers.append("content-type", "text/plain");
response_bytes = serve(allocator, &res) catch |e| brk: {
res.status = .internal_server_error;
// TODO: more about this particular request
log.err("Unexpected error from executor processing request: {any}", .{e});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
break :brk "Unexpected error generating request to lambda";
};
res.transfer_encoding = .{ .content_length = response_bytes.len };
try res.do();
_ = try res.writer().writeAll(response_bytes);
try res.finish();
log.debug(
"tid {d} (server): sent response",
.{std.Thread.getCurrentId()},
);
}
fn serve(allocator: std.mem.Allocator, res: *std.http.Server.Response) ![]const u8 {
_ = allocator;
// try res.headers.append("content-length", try std.fmt.allocPrint(allocator, "{d}", .{server_response.len}));
try res.headers.append("Lambda-Runtime-Aws-Request-Id", "69");
return server_response;
}
fn handler(allocator: std.mem.Allocator, event_data: []const u8) ![]const u8 {
_ = allocator;
return event_data;
}
fn test_run(allocator: std.mem.Allocator, event_handler: HandlerFn) !std.Thread {
return try std.Thread.spawn(
.{},
run,
.{ allocator, event_handler },
);
}
fn lambda_request(allocator: std.mem.Allocator, request: []const u8) ![]u8 {
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
var aa = arena.allocator();
// Setup our server to run, and set the response for the server to the
// request. There is a cognitive disconnect here between mental model and
// physical model.
//
// Mental model:
//
// Lambda request -> λ -> Lambda response
//
// Physcial Model:
//
// 1. λ requests instructions from server
// 2. server provides "Lambda request"
// 3. λ posts response back to server
//
// So here we are setting up our server, then our lambda request loop,
// but it all needs to be in seperate threads so we can control startup
// and shut down. Both server and Lambda are set up to watch global variable
// booleans to know when to shut down. This function is designed for a
// single request/response pair only
server_remaining_requests = 2; // Tell our server to run for just two requests
server_response = request; // set our instructions to lambda, which in our
// physical model above, is the server response
defer server_response = "unset"; // set it back so we don't get confused later
// when subsequent tests fail
const server_thread = try startServer(aa); // start the server, get it ready
while (!server_ready)
std.time.sleep(100);
log.debug("tid {d} (main): server reports ready", .{std.Thread.getCurrentId()});
// we aren't testing the server,
// so we'll use the arena allocator
defer server_thread.join(); // we'll be shutting everything down before we exit
// Now we need to start the lambda framework, following a siimilar pattern
lambda_remaining_requests = 1; // in case anyone messed with this, we will make sure we start
const lambda_thread = try test_run(allocator, handler); // We want our function under test to report leaks
lambda_thread.join();
return server_request_aka_lambda_response;
}
test "basic request" {
// std.testing.log_level = .debug;
const allocator = std.testing.allocator;
const request =
\\{"foo": "bar", "baz": "qux"}
;
const lambda_response = try lambda_request(allocator, request);
try std.testing.expectEqualStrings(lambda_response, request);
}

View File

@ -2,7 +2,7 @@ const std = @import("std");
const lambda = @import("lambda.zig");
pub fn main() anyerror!void {
try lambda.run(handler);
try lambda.run(null, handler);
}
fn handler(allocator: std.mem.Allocator, event_data: []const u8) ![]const u8 {