implement package caching
All checks were successful
Generic zig build / build (push) Successful in 52s

This commit is contained in:
Emil Lerch 2024-08-28 15:15:19 -07:00
parent 8a70f19ae5
commit 12dd33db15
Signed by: lobo
GPG Key ID: A7B62D657EF764F8
3 changed files with 46 additions and 20 deletions

View File

@ -102,12 +102,11 @@ pub fn configureBuild(b: *std.Build, exe: *std.Build.Step.Compile, function_name
// Deployment // Deployment
const deploy = Deploy.create(b, .{ const deploy = Deploy.create(b, .{
.name = function_name, .name = function_name,
.package = package_step.packagedFileLazyPath(),
.arch = exe.root_module.resolved_target.?.result.cpu.arch, .arch = exe.root_module.resolved_target.?.result.cpu.arch,
.iam_step = iam, .iam_step = iam,
.package_step = package_step,
.region = region, .region = region,
}); });
deploy.step.dependOn(&package_step.step);
const deploy_step = b.step("awslambda_deploy", "Deploy the function"); const deploy_step = b.step("awslambda_deploy", "Deploy the function");
deploy_step.dependOn(&deploy.step); deploy_step.dependOn(&deploy.step);

View File

@ -13,15 +13,15 @@ pub const Options = struct {
/// Function name to be used for the function /// Function name to be used for the function
name: []const u8, name: []const u8,
/// LazyPath for the function package (zip file)
package: std.Build.LazyPath,
/// Architecture for Lambda function /// Architecture for Lambda function
arch: std.Target.Cpu.Arch, arch: std.Target.Cpu.Arch,
/// Iam step. This will be a dependency of the deployment /// Iam step. This will be a dependency of the deployment
iam_step: *@import("Iam.zig"), iam_step: *@import("Iam.zig"),
/// Packaging step. This will be a dependency of the deployment
package_step: *@import("Package.zig"),
/// Region for deployment /// Region for deployment
region: *Region, region: *Region,
}; };
@ -30,7 +30,7 @@ pub fn create(owner: *std.Build, options: Options) *Deploy {
const name = owner.dupe(options.name); const name = owner.dupe(options.name);
const step_name = owner.fmt("{s} {s}{s}", .{ const step_name = owner.fmt("{s} {s}{s}", .{
"aws lambda", "aws lambda",
"deploy", "deploy ",
name, name,
}); });
const self = owner.allocator.create(Deploy) catch @panic("OOM"); const self = owner.allocator.create(Deploy) catch @panic("OOM");
@ -45,6 +45,7 @@ pub fn create(owner: *std.Build, options: Options) *Deploy {
}; };
self.step.dependOn(&options.iam_step.step); self.step.dependOn(&options.iam_step.step);
self.step.dependOn(&options.package_step.step);
return self; return self;
} }
@ -73,13 +74,12 @@ fn make(step: *std.Build.Step, node: std.Progress.Node) anyerror!void {
if (self.options.arch != .aarch64 and self.options.arch != .x86_64) if (self.options.arch != .aarch64 and self.options.arch != .x86_64)
return step.fail("AWS Lambda can only deploy aarch64 and x86_64 functions ({} not allowed)", .{self.options.arch}); return step.fail("AWS Lambda can only deploy aarch64 and x86_64 functions ({} not allowed)", .{self.options.arch});
// TODO: Work out cache. HOWEVER...this cannot be done until the caching const last_packaged_sha256 = blk: {
// for the Deploy command works properly. Right now, it regenerates // file should always be there, but we shouldn't break if the cache doesn't exist
// the zip file every time const last_deployed_id_file = std.fs.openFileAbsolute(try self.options.package_step.shasumFilePath(), .{}) catch break :blk null;
// if (try getIamArnFromName(step, self.options.role_name)) |_| { defer last_deployed_id_file.close();
// step.result_cached = true; break :blk try last_deployed_id_file.readToEndAlloc(step.owner.allocator, 2048);
// return; // exists in cache - nothing to do };
// }
var client = aws.Client.init(self.step.owner.allocator, .{}); var client = aws.Client.init(self.step.owner.allocator, .{});
defer client.deinit(); defer client.deinit();
@ -112,17 +112,22 @@ fn make(step: *std.Build.Step, node: std.Progress.Node) anyerror!void {
}; };
defer call.deinit(); defer call.deinit();
// TODO: Write call.response.configuration.last_modified to cache
// std.debug.print("Function found. Last modified: {s}, revision id: {s}\n", .{ call.response.configuration.?.last_modified.?, call.response.configuration.?.revision_id.? });
break :blk .{ break :blk .{
.last_modified = try step.owner.allocator.dupe(u8, call.response.configuration.?.last_modified.?), .last_modified = try step.owner.allocator.dupe(u8, call.response.configuration.?.last_modified.?),
.revision_id = try step.owner.allocator.dupe(u8, call.response.configuration.?.revision_id.?), .revision_id = try step.owner.allocator.dupe(u8, call.response.configuration.?.revision_id.?),
.sha256 = try step.owner.allocator.dupe(u8, call.response.configuration.?.code_sha256.?),
}; };
}; };
if (last_packaged_sha256) |s|
if (function) |f|
if (std.mem.eql(u8, s, f.sha256)) {
step.result_cached = true;
return;
};
const encoder = std.base64.standard.Encoder; const encoder = std.base64.standard.Encoder;
const file = try std.fs.openFileAbsolute(self.options.package.getPath2(step.owner, step), .{}); const file = try std.fs.openFileAbsolute(self.options.package_step.packagedFileLazyPath().getPath2(step.owner, step), .{});
defer file.close(); defer file.close();
const bytes = try file.readToEndAlloc(step.owner.allocator, 100 * 1024 * 1024); const bytes = try file.readToEndAlloc(step.owner.allocator, 100 * 1024 * 1024);
const base64_buf = try step.owner.allocator.alloc(u8, encoder.calcSize(bytes.len)); const base64_buf = try step.owner.allocator.alloc(u8, encoder.calcSize(bytes.len));
@ -144,8 +149,6 @@ fn make(step: *std.Build.Step, node: std.Progress.Node) anyerror!void {
.zip_file = base64_bytes, .zip_file = base64_bytes,
}, options); }, options);
defer update_call.deinit(); defer update_call.deinit();
// TODO: Write call.response.last_modified to cache
// TODO: Write call.response.revision_id to cache?
} else { } else {
// New function - we need to create from scratch // New function - we need to create from scratch
const create_call = try aws.Request(services.lambda.create_function).call(.{ const create_call = try aws.Request(services.lambda.create_function).call(.{

View File

@ -8,6 +8,8 @@ options: Options,
/// This is set as part of the make phase, and is the location in the cache /// This is set as part of the make phase, and is the location in the cache
/// for the lambda package. The package will also be copied to the output /// for the lambda package. The package will also be copied to the output
/// directory, but this location makes for a good cache key for deployments /// directory, but this location makes for a good cache key for deployments
zipfile_cache_dest: ?[]const u8 = null,
zipfile_dest: ?[]const u8 = null, zipfile_dest: ?[]const u8 = null,
const base_id: std.Build.Step.Id = .install_file; const base_id: std.Build.Step.Id = .install_file;
@ -38,7 +40,13 @@ pub fn create(owner: *std.Build, options: Options) *Package {
return package; return package;
} }
pub fn shasumFilePath(self: Package) ![]const u8 {
return try std.fmt.allocPrint(
self.step.owner.allocator,
"{s}{s}{s}",
.{ std.fs.path.dirname(self.zipfile_cache_dest.?).?, std.fs.path.sep_str, "sha256sum.txt" },
);
}
pub fn packagedFilePath(self: Package) []const u8 { pub fn packagedFilePath(self: Package) []const u8 {
return self.step.owner.getInstallPath(.prefix, self.options.zipfile_name); return self.step.owner.getInstallPath(.prefix, self.options.zipfile_name);
} }
@ -66,6 +74,7 @@ fn make(step: *std.Build.Step, node: std.Progress.Node) anyerror!void {
}; };
const bootstrap_dirname = std.fs.path.dirname(bootstrap).?; const bootstrap_dirname = std.fs.path.dirname(bootstrap).?;
const zipfile_src = try std.fs.path.join(step.owner.allocator, &[_][]const u8{ bootstrap_dirname, self.options.zipfile_name }); const zipfile_src = try std.fs.path.join(step.owner.allocator, &[_][]const u8{ bootstrap_dirname, self.options.zipfile_name });
self.zipfile_cache_dest = zipfile_src;
self.zipfile_dest = self.step.owner.getInstallPath(.prefix, self.options.zipfile_name); self.zipfile_dest = self.step.owner.getInstallPath(.prefix, self.options.zipfile_name);
if (std.fs.copyFileAbsolute(zipfile_src, self.zipfile_dest.?, .{})) |_| { if (std.fs.copyFileAbsolute(zipfile_src, self.zipfile_dest.?, .{})) |_| {
// we're good here. The zip file exists in cache and has been copied // we're good here. The zip file exists in cache and has been copied
@ -93,6 +102,21 @@ fn make(step: *std.Build.Step, node: std.Progress.Node) anyerror!void {
} }
try std.fs.copyFileAbsolute(zipfile_src, self.zipfile_dest.?, .{}); // It better be there now try std.fs.copyFileAbsolute(zipfile_src, self.zipfile_dest.?, .{}); // It better be there now
// One last thing. We want to get a Sha256 sum of the zip file, and
// store it in cache. This will help the deployment process compare
// to what's out in AWS, since revision id is apparently trash for these
// purposes
const zipfile = try std.fs.openFileAbsolute(zipfile_src, .{});
defer zipfile.close();
const zip_bytes = try zipfile.readToEndAlloc(step.owner.allocator, 100 * 1024 * 1024);
var hash: [std.crypto.hash.sha2.Sha256.digest_length]u8 = undefined;
std.crypto.hash.sha2.Sha256.hash(zip_bytes, &hash, .{});
const base64 = std.base64.standard.Encoder;
var encoded: [base64.calcSize(std.crypto.hash.sha2.Sha256.digest_length)]u8 = undefined;
const shaoutput = try std.fs.createFileAbsolute(try self.shasumFilePath(), .{});
defer shaoutput.close();
try shaoutput.writeAll(base64.encode(encoded[0..], hash[0..]));
} }
} }