Merge cli progress bar for codegen (PR #22)
Some checks failed
AWS-Zig Build / build-zig-amd64-host (push) Has been cancelled

This commit is contained in:
Emil Lerch 2025-05-29 14:50:20 -07:00
commit d6be1d4c5c
Signed by: lobo
GPG key ID: A7B62D657EF764F8
2 changed files with 41 additions and 17 deletions

View file

@ -92,8 +92,12 @@ pub fn build(b: *Builder) !void {
)); ));
cg_cmd.addArg("--output"); cg_cmd.addArg("--output");
const cg_output_dir = cg_cmd.addOutputDirectoryArg("src/models"); const cg_output_dir = cg_cmd.addOutputDirectoryArg("src/models");
if (b.verbose) if (b.verbose) {
cg_cmd.addArg("--verbose"); cg_cmd.addArg("--verbose");
}
if (!no_bin) {
b.installArtifact(cg_exe);
}
// cg_cmd.step.dependOn(&fetch_step.step); // cg_cmd.step.dependOn(&fetch_step.step);
// TODO: this should use zig_exe from std.Build // TODO: this should use zig_exe from std.Build
// codegen should store a hash in a comment // codegen should store a hash in a comment

View file

@ -6,6 +6,9 @@ const case = @import("case");
var verbose = false; var verbose = false;
pub fn main() anyerror!void { pub fn main() anyerror!void {
const root_progress_node = std.Progress.start(.{});
defer root_progress_node.end();
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit(); defer arena.deinit();
const allocator = arena.allocator(); const allocator = arena.allocator();
@ -67,13 +70,14 @@ pub fn main() anyerror!void {
// no files specified, look for json files in models directory or cwd // no files specified, look for json files in models directory or cwd
// this is our normal mode of operation and where initial optimizations // this is our normal mode of operation and where initial optimizations
// can be made // can be made
if (models_dir) |m| { if (models_dir) |m| {
var cwd = try std.fs.cwd().openDir(".", .{}); var cwd = try std.fs.cwd().openDir(".", .{});
defer cwd.close(); defer cwd.close();
defer cwd.setAsCwd() catch unreachable; defer cwd.setAsCwd() catch unreachable;
try m.setAsCwd(); try m.setAsCwd();
try processDirectories(m, output_dir); try processDirectories(m, output_dir, &root_progress_node);
} }
} }
@ -85,7 +89,7 @@ const OutputManifest = struct {
model_dir_hash_digest: [Hasher.hex_multihash_len]u8, model_dir_hash_digest: [Hasher.hex_multihash_len]u8,
output_dir_hash_digest: [Hasher.hex_multihash_len]u8, output_dir_hash_digest: [Hasher.hex_multihash_len]u8,
}; };
fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir) !void { fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir, parent_progress: *const std.Progress.Node) !void {
// Let's get ready to hash!! // Let's get ready to hash!!
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit(); defer arena.deinit();
@ -93,7 +97,8 @@ fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir) !void {
var thread_pool: std.Thread.Pool = undefined; var thread_pool: std.Thread.Pool = undefined;
try thread_pool.init(.{ .allocator = allocator }); try thread_pool.init(.{ .allocator = allocator });
defer thread_pool.deinit(); defer thread_pool.deinit();
var calculated_manifest = try calculateDigests(models_dir, output_dir, &thread_pool);
const count, var calculated_manifest = try calculateDigests(models_dir, output_dir, &thread_pool);
const output_stored_manifest = output_dir.readFileAlloc(allocator, "output_manifest.json", std.math.maxInt(usize)) catch null; const output_stored_manifest = output_dir.readFileAlloc(allocator, "output_manifest.json", std.math.maxInt(usize)) catch null;
if (output_stored_manifest) |o| { if (output_stored_manifest) |o| {
// we have a stored manifest. Parse it and compare to our calculations // we have a stored manifest. Parse it and compare to our calculations
@ -113,14 +118,19 @@ fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir) !void {
defer manifest_file.close(); defer manifest_file.close();
const manifest = manifest_file.writer(); const manifest = manifest_file.writer();
var mi = models_dir.iterate(); var mi = models_dir.iterate();
const generating_models_progress = parent_progress.start("generating models", count);
defer generating_models_progress.end();
while (try mi.next()) |e| { while (try mi.next()) |e| {
if ((e.kind == .file or e.kind == .sym_link) and if ((e.kind == .file or e.kind == .sym_link) and std.mem.endsWith(u8, e.name, ".json")) {
std.mem.endsWith(u8, e.name, ".json"))
try processFile(e.name, output_dir, manifest); try processFile(e.name, output_dir, manifest);
generating_models_progress.completeOne();
}
} }
// re-calculate so we can store the manifest // re-calculate so we can store the manifest
model_digest = calculated_manifest.model_dir_hash_digest; model_digest = calculated_manifest.model_dir_hash_digest;
calculated_manifest = try calculateDigests(models_dir, output_dir, &thread_pool); _, calculated_manifest = try calculateDigests(models_dir, output_dir, &thread_pool);
try output_dir.writeFile(.{ .sub_path = "output_manifest.json", .data = try std.json.stringifyAlloc( try output_dir.writeFile(.{ .sub_path = "output_manifest.json", .data = try std.json.stringifyAlloc(
allocator, allocator,
calculated_manifest, calculated_manifest,
@ -129,13 +139,18 @@ fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir) !void {
} }
var model_digest: ?[Hasher.hex_multihash_len]u8 = null; var model_digest: ?[Hasher.hex_multihash_len]u8 = null;
fn calculateDigests(models_dir: std.fs.Dir, output_dir: std.fs.Dir, thread_pool: *std.Thread.Pool) !OutputManifest { fn calculateDigests(models_dir: std.fs.Dir, output_dir: std.fs.Dir, thread_pool: *std.Thread.Pool) !struct { usize, OutputManifest } {
const model_hash = if (model_digest) |m| m[0..Hasher.digest_len].* else try Hasher.computeDirectoryHash(thread_pool, models_dir, @constCast(&Hasher.ComputeDirectoryOptions{ const Include = struct {
.isIncluded = struct { threadlocal var count: usize = 0;
pub fn include(entry: std.fs.Dir.Walker.Entry) bool { pub fn include(entry: std.fs.Dir.Walker.Entry) bool {
return std.mem.endsWith(u8, entry.basename, ".json"); const included = std.mem.endsWith(u8, entry.basename, ".json");
if (included) count += 1;
return included;
} }
}.include, };
const model_hash = if (model_digest) |m| m[0..Hasher.digest_len].* else try Hasher.computeDirectoryHash(thread_pool, models_dir, @constCast(&Hasher.ComputeDirectoryOptions{
.isIncluded = Include.include,
.isExcluded = struct { .isExcluded = struct {
pub fn exclude(entry: std.fs.Dir.Walker.Entry) bool { pub fn exclude(entry: std.fs.Dir.Walker.Entry) bool {
_ = entry; _ = entry;
@ -162,8 +177,10 @@ fn calculateDigests(models_dir: std.fs.Dir, output_dir: std.fs.Dir, thread_pool:
})); }));
if (verbose) std.log.info("Output directory hash: {s}", .{Hasher.hexDigest(output_hash)}); if (verbose) std.log.info("Output directory hash: {s}", .{Hasher.hexDigest(output_hash)});
return .{ return .{
Include.count, .{
.model_dir_hash_digest = model_digest orelse Hasher.hexDigest(model_hash), .model_dir_hash_digest = model_digest orelse Hasher.hexDigest(model_hash),
.output_dir_hash_digest = Hasher.hexDigest(output_hash), .output_dir_hash_digest = Hasher.hexDigest(output_hash),
},
}; };
} }
fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: anytype) !void { fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: anytype) !void {
@ -729,7 +746,10 @@ fn generateTypeFor(shape_id: []const u8, writer: anytype, state: GenerationState
// must be blocking deep recursion somewhere or this would be a great // must be blocking deep recursion somewhere or this would be a great
// DOS attack // DOS attack
try generateSimpleTypeFor("nothing", "[]const u8", writer); try generateSimpleTypeFor("nothing", "[]const u8", writer);
if (verbose) {
std.log.warn("Type cycle detected, limiting depth. Type: {s}", .{shape_id}); std.log.warn("Type cycle detected, limiting depth. Type: {s}", .{shape_id});
}
// if (std.mem.eql(u8, "com.amazonaws.workmail#Timestamp", shape_id)) { // if (std.mem.eql(u8, "com.amazonaws.workmail#Timestamp", shape_id)) {
// std.log.info(" Type stack:\n", .{}); // std.log.info(" Type stack:\n", .{});
// for (state.type_stack.items) |i| // for (state.type_stack.items) |i|
@ -884,7 +904,7 @@ fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, ty
// Don't assert as that will be optimized for Release* builds // Don't assert as that will be optimized for Release* builds
// We'll continue here and treat the above as a warning // We'll continue here and treat the above as a warning
if (payload) |first| { if (payload) |first| {
std.log.err("Found multiple httpPayloads in violation of smithy spec! Ignoring '{s}' and using '{s}'", .{ first, snake_case_member }); std.log.warn("Found multiple httpPayloads in violation of smithy spec! Ignoring '{s}' and using '{s}'", .{ first, snake_case_member });
} }
payload = try allocator.dupe(u8, snake_case_member); payload = try allocator.dupe(u8, snake_case_member);
}, },