better codegen but without caching
Some checks failed
AWS-Zig Build / build-zig-0.11.0-amd64-host (push) Failing after 2m4s

This commit is contained in:
Emil Lerch 2023-08-14 22:38:37 -07:00
parent 6b97fed499
commit 3b249d62b9
Signed by: lobo
GPG Key ID: A7B62D657EF764F8
2 changed files with 103 additions and 44 deletions

View File

@ -35,14 +35,19 @@ pub fn build(b: *Builder) !void {
.optimize = optimize, .optimize = optimize,
}); });
exe.addModule("smithy", smithy_dep.module("smithy")); exe.addModule("smithy", smithy_dep.module("smithy"));
// TODO: Smithy needs to be in a different repo
// https://github.com/ziglang/zig/issues/855
// exe.addModulePath("smithy", "smithy/src/smithy.zig");
if (target.getOs().tag != .macos) exe.linkage = .static; // TODO: This does not work correctly due to https://github.com/ziglang/zig/issues/16354
//
// Strip is controlled by optimize options // We are working here with kind of a weird dependency though. So we can do this
// exe.strip = b.option(bool, "strip", "strip exe [true]") orelse true; // another way
//
// TODO: These target/optimize are not correct, as we need to run the thing
// const codegen = b.anonymousDependency("codegen/", @import("codegen/build.zig"), .{
// .target = target,
// .optimize = optimize,
// });
// const codegen_cmd = b.addRunArtifact(codegen.artifact("codegen"));
// exe.step.dependOn(&codegen_cmd.step);
const run_cmd = b.addRunArtifact(exe); const run_cmd = b.addRunArtifact(exe);
run_cmd.step.dependOn(b.getInstallStep()); run_cmd.step.dependOn(b.getInstallStep());
@ -53,14 +58,23 @@ pub fn build(b: *Builder) !void {
const run_step = b.step("run", "Run the app"); const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step); run_step.dependOn(&run_cmd.step);
// TODO: Proper testing {
const cg = b.step("gen", "Generate zig service code from smithy models");
const cg_exe = b.addExecutable(.{
.name = "codegen",
.root_source_file = .{ .path = "codegen/src/main.zig" },
// We need this generated for the host, not the real target
// .target = target,
// .optimize = optimize,
});
cg_exe.addModule("smithy", smithy_dep.module("smithy"));
var cg_cmd = b.addRunArtifact(cg_exe);
cg_cmd.addArg("--models");
cg_cmd.addDirectoryArg(std.Build.FileSource.relative("codegen/models"));
cg_cmd.addArg("--output");
cg_cmd.addDirectoryArg(std.Build.FileSource.relative("src/models"));
var codegen: ?*std.build.Step = null;
if (target.getOs().tag == .linux and false) {
// TODO: Support > linux with RunStep
// std.build.RunStep.create(null,null).cwd(std.fs.path.resolve(b.build_root, "codegen")).addArgs(...)
codegen = b.step("gen", "Generate zig service code from smithy models");
const cg = codegen.?;
// TODO: this should use zig_exe from std.Build // TODO: this should use zig_exe from std.Build
// codegen should store a hash in a comment // codegen should store a hash in a comment
// this would be hash of the exe that created the file // this would be hash of the exe that created the file
@ -78,21 +92,14 @@ pub fn build(b: *Builder) !void {
// this scheme would permit cross plat codegen and maybe // this scheme would permit cross plat codegen and maybe
// we can have codegen added in a seperate repo, // we can have codegen added in a seperate repo,
// though not sure how necessary that is // though not sure how necessary that is
cg.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", "cd codegen && zig build" }).step); // cg.dependOn(&b.addSystemCommand(&.{
// b.zig_exe,
// "build",
// "run",
// "-Doptimize=ReleaseSafe",
// }).step);
// triggering the re-gen cg.dependOn(&cg_cmd.step);
cg.dependOn(&b.addSystemCommand(&.{
"/bin/sh", "-c",
\\ [ ! -f src/models/service_manifest.zig ] || \
\\ [ $(find codegen -type f -newer src/models/service_manifest.zig -print -quit |wc -c) = '0' ] || \
\\ rm src/models/service_manifest.zig
}).step);
cg.dependOn(&b.addSystemCommand(&.{
"/bin/sh", "-c",
\\ mkdir -p src/models/ && \
\\ [ -f src/models/service_manifest.zig ] || \
\\ ( cd codegen/models && ../codegen *.json && mv *.zig ../../src/models )
}).step);
exe.step.dependOn(cg); exe.step.dependOn(cg);
} }

View File

@ -11,27 +11,74 @@ pub fn main() anyerror!void {
const args = try std.process.argsAlloc(allocator); const args = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, args); defer std.process.argsFree(allocator, args);
const stdout = std.io.getStdOut().writer(); const stdout = std.io.getStdOut().writer();
const json_file = try std.fs.cwd().createFile("json.zig", .{});
var output_dir = std.fs.cwd();
defer output_dir.close();
var models_dir: ?std.fs.IterableDir = null;
defer if (models_dir) |*m| m.close();
for (args, 0..) |arg, i| {
if (std.mem.eql(u8, "--help", arg) or
std.mem.eql(u8, "-h", arg))
{
try stdout.print("usage: {s} [--models dir] [--output dir] [file...]\n\n", .{args[0]});
try stdout.print(" --models specifies a directory with all model files (do not specify files if --models is used)\n", .{});
try stdout.print(" --output specifies an output directory, otherwise the current working directory will be used\n", .{});
std.process.exit(0);
}
if (std.mem.eql(u8, "--output", arg))
output_dir = try output_dir.openDir(args[i + 1], .{});
if (std.mem.eql(u8, "--models", arg))
models_dir = try std.fs.cwd().openIterableDir(args[i + 1], .{});
}
// TODO: Seems like we should remove this in favor of a package
const json_file = try output_dir.createFile("json.zig", .{});
defer json_file.close(); defer json_file.close();
try json_file.writer().writeAll(json_zig); try json_file.writer().writeAll(json_zig);
const manifest_file = try std.fs.cwd().createFile("service_manifest.zig", .{}); const manifest_file = try output_dir.createFile("service_manifest.zig", .{});
defer manifest_file.close(); defer manifest_file.close();
const manifest = manifest_file.writer(); const manifest = manifest_file.writer();
var inx: u32 = 0; var files_processed: usize = 0;
var skip_next = true;
for (args) |arg| { for (args) |arg| {
if (inx == 0) { if (skip_next) {
inx = inx + 1; skip_next = false;
continue; continue;
} }
try processFile(arg, stdout, manifest); if (std.mem.eql(u8, "--models", arg) or
inx = inx + 1; std.mem.eql(u8, "--output", arg))
{
skip_next = true;
continue;
}
try processFile(arg, stdout, output_dir, manifest);
files_processed += 1;
}
if (files_processed == 0) {
// no files specified, look for json files in models directory or cwd
if (models_dir) |m| {
var cwd = try std.fs.cwd().openDir(".", .{});
defer cwd.close();
defer cwd.setAsCwd() catch unreachable;
try stdout.print("orig cwd: {any}\n", .{cwd});
try m.dir.setAsCwd();
try stdout.print("cwd: {any}\n", .{m.dir});
// TODO: this is throwing an error?
// _ = cwd;
var mi = m.iterate();
while (try mi.next()) |e| {
if ((e.kind == .file or e.kind == .sym_link) and
std.mem.endsWith(u8, e.name, ".json"))
try processFile(e.name, stdout, output_dir, manifest);
}
}
} }
if (args.len == 0) if (args.len == 0)
_ = try generateServices(allocator, ";", std.io.getStdIn(), stdout); _ = try generateServices(allocator, ";", std.io.getStdIn(), stdout);
} }
fn processFile(arg: []const u8, stdout: anytype, manifest: anytype) !void { fn processFile(file_name: []const u8, stdout: anytype, output_dir: std.fs.Dir, manifest: anytype) !void {
// It's probably best to create our own allocator here so we can deint at the end and // It's probably best to create our own allocator here so we can deint at the end and
// toss all allocations related to the services in this file // toss all allocations related to the services in this file
// I can't guarantee we're not leaking something, and at the end of the // I can't guarantee we're not leaking something, and at the end of the
@ -41,17 +88,17 @@ fn processFile(arg: []const u8, stdout: anytype, manifest: anytype) !void {
const allocator = arena.allocator(); const allocator = arena.allocator();
var writer = &stdout; var writer = &stdout;
var file: std.fs.File = undefined; var file: std.fs.File = undefined;
const filename = try std.fmt.allocPrint(allocator, "{s}.zig", .{arg}); const output_file_name = try std.fmt.allocPrint(allocator, "{s}.zig", .{file_name});
defer allocator.free(filename); defer allocator.free(output_file_name);
file = try std.fs.cwd().createFile(filename, .{ .truncate = true }); file = try output_dir.createFile(output_file_name, .{ .truncate = true });
errdefer file.close(); errdefer file.close();
writer = &file.writer(); writer = &file.writer();
_ = try writer.write("const std = @import(\"std\");\n"); _ = try writer.write("const std = @import(\"std\");\n");
_ = try writer.write("const serializeMap = @import(\"json.zig\").serializeMap;\n"); _ = try writer.write("const serializeMap = @import(\"json.zig\").serializeMap;\n");
_ = try writer.write("const smithy = @import(\"smithy\");\n\n"); _ = try writer.write("const smithy = @import(\"smithy\");\n\n");
std.log.info("Processing file: {s}", .{arg}); std.log.info("Processing file: {s}", .{file_name});
const service_names = generateServicesForFilePath(allocator, ";", arg, writer) catch |err| { const service_names = generateServicesForFilePath(allocator, ";", file_name, writer) catch |err| {
std.log.err("Error processing file: {s}", .{arg}); std.log.err("Error processing file: {s}", .{file_name});
return err; return err;
}; };
defer { defer {
@ -60,11 +107,16 @@ fn processFile(arg: []const u8, stdout: anytype, manifest: anytype) !void {
} }
file.close(); file.close();
for (service_names) |name| { for (service_names) |name| {
try manifest.print("pub const {s} = @import(\"{s}\");\n", .{ name, std.fs.path.basename(filename) }); try manifest.print("pub const {s} = @import(\"{s}\");\n", .{ name, std.fs.path.basename(output_file_name) });
} }
} }
fn generateServicesForFilePath(allocator: std.mem.Allocator, comptime terminator: []const u8, path: []const u8, writer: anytype) ![][]const u8 { fn generateServicesForFilePath(
allocator: std.mem.Allocator,
comptime terminator: []const u8,
path: []const u8,
writer: anytype,
) ![][]const u8 {
const file = try std.fs.cwd().openFile(path, .{}); const file = try std.fs.cwd().openFile(path, .{});
defer file.close(); defer file.close();
return try generateServices(allocator, terminator, file, writer); return try generateServices(allocator, terminator, file, writer);