Merge cli progress bar for codegen (PR #22)
Some checks failed
AWS-Zig Build / build-zig-amd64-host (push) Has been cancelled
Some checks failed
AWS-Zig Build / build-zig-amd64-host (push) Has been cancelled
This commit is contained in:
commit
d6be1d4c5c
2 changed files with 41 additions and 17 deletions
|
@ -92,8 +92,12 @@ pub fn build(b: *Builder) !void {
|
|||
));
|
||||
cg_cmd.addArg("--output");
|
||||
const cg_output_dir = cg_cmd.addOutputDirectoryArg("src/models");
|
||||
if (b.verbose)
|
||||
if (b.verbose) {
|
||||
cg_cmd.addArg("--verbose");
|
||||
}
|
||||
if (!no_bin) {
|
||||
b.installArtifact(cg_exe);
|
||||
}
|
||||
// cg_cmd.step.dependOn(&fetch_step.step);
|
||||
// TODO: this should use zig_exe from std.Build
|
||||
// codegen should store a hash in a comment
|
||||
|
|
|
@ -6,6 +6,9 @@ const case = @import("case");
|
|||
var verbose = false;
|
||||
|
||||
pub fn main() anyerror!void {
|
||||
const root_progress_node = std.Progress.start(.{});
|
||||
defer root_progress_node.end();
|
||||
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena.deinit();
|
||||
const allocator = arena.allocator();
|
||||
|
@ -67,13 +70,14 @@ pub fn main() anyerror!void {
|
|||
// no files specified, look for json files in models directory or cwd
|
||||
// this is our normal mode of operation and where initial optimizations
|
||||
// can be made
|
||||
|
||||
if (models_dir) |m| {
|
||||
var cwd = try std.fs.cwd().openDir(".", .{});
|
||||
defer cwd.close();
|
||||
defer cwd.setAsCwd() catch unreachable;
|
||||
|
||||
try m.setAsCwd();
|
||||
try processDirectories(m, output_dir);
|
||||
try processDirectories(m, output_dir, &root_progress_node);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,7 +89,7 @@ const OutputManifest = struct {
|
|||
model_dir_hash_digest: [Hasher.hex_multihash_len]u8,
|
||||
output_dir_hash_digest: [Hasher.hex_multihash_len]u8,
|
||||
};
|
||||
fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir) !void {
|
||||
fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir, parent_progress: *const std.Progress.Node) !void {
|
||||
// Let's get ready to hash!!
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena.deinit();
|
||||
|
@ -93,7 +97,8 @@ fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir) !void {
|
|||
var thread_pool: std.Thread.Pool = undefined;
|
||||
try thread_pool.init(.{ .allocator = allocator });
|
||||
defer thread_pool.deinit();
|
||||
var calculated_manifest = try calculateDigests(models_dir, output_dir, &thread_pool);
|
||||
|
||||
const count, var calculated_manifest = try calculateDigests(models_dir, output_dir, &thread_pool);
|
||||
const output_stored_manifest = output_dir.readFileAlloc(allocator, "output_manifest.json", std.math.maxInt(usize)) catch null;
|
||||
if (output_stored_manifest) |o| {
|
||||
// we have a stored manifest. Parse it and compare to our calculations
|
||||
|
@ -113,14 +118,19 @@ fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir) !void {
|
|||
defer manifest_file.close();
|
||||
const manifest = manifest_file.writer();
|
||||
var mi = models_dir.iterate();
|
||||
|
||||
const generating_models_progress = parent_progress.start("generating models", count);
|
||||
defer generating_models_progress.end();
|
||||
|
||||
while (try mi.next()) |e| {
|
||||
if ((e.kind == .file or e.kind == .sym_link) and
|
||||
std.mem.endsWith(u8, e.name, ".json"))
|
||||
if ((e.kind == .file or e.kind == .sym_link) and std.mem.endsWith(u8, e.name, ".json")) {
|
||||
try processFile(e.name, output_dir, manifest);
|
||||
generating_models_progress.completeOne();
|
||||
}
|
||||
}
|
||||
// re-calculate so we can store the manifest
|
||||
model_digest = calculated_manifest.model_dir_hash_digest;
|
||||
calculated_manifest = try calculateDigests(models_dir, output_dir, &thread_pool);
|
||||
_, calculated_manifest = try calculateDigests(models_dir, output_dir, &thread_pool);
|
||||
try output_dir.writeFile(.{ .sub_path = "output_manifest.json", .data = try std.json.stringifyAlloc(
|
||||
allocator,
|
||||
calculated_manifest,
|
||||
|
@ -129,13 +139,18 @@ fn processDirectories(models_dir: std.fs.Dir, output_dir: std.fs.Dir) !void {
|
|||
}
|
||||
|
||||
var model_digest: ?[Hasher.hex_multihash_len]u8 = null;
|
||||
fn calculateDigests(models_dir: std.fs.Dir, output_dir: std.fs.Dir, thread_pool: *std.Thread.Pool) !OutputManifest {
|
||||
fn calculateDigests(models_dir: std.fs.Dir, output_dir: std.fs.Dir, thread_pool: *std.Thread.Pool) !struct { usize, OutputManifest } {
|
||||
const Include = struct {
|
||||
threadlocal var count: usize = 0;
|
||||
pub fn include(entry: std.fs.Dir.Walker.Entry) bool {
|
||||
const included = std.mem.endsWith(u8, entry.basename, ".json");
|
||||
if (included) count += 1;
|
||||
return included;
|
||||
}
|
||||
};
|
||||
|
||||
const model_hash = if (model_digest) |m| m[0..Hasher.digest_len].* else try Hasher.computeDirectoryHash(thread_pool, models_dir, @constCast(&Hasher.ComputeDirectoryOptions{
|
||||
.isIncluded = struct {
|
||||
pub fn include(entry: std.fs.Dir.Walker.Entry) bool {
|
||||
return std.mem.endsWith(u8, entry.basename, ".json");
|
||||
}
|
||||
}.include,
|
||||
.isIncluded = Include.include,
|
||||
.isExcluded = struct {
|
||||
pub fn exclude(entry: std.fs.Dir.Walker.Entry) bool {
|
||||
_ = entry;
|
||||
|
@ -162,8 +177,10 @@ fn calculateDigests(models_dir: std.fs.Dir, output_dir: std.fs.Dir, thread_pool:
|
|||
}));
|
||||
if (verbose) std.log.info("Output directory hash: {s}", .{Hasher.hexDigest(output_hash)});
|
||||
return .{
|
||||
.model_dir_hash_digest = model_digest orelse Hasher.hexDigest(model_hash),
|
||||
.output_dir_hash_digest = Hasher.hexDigest(output_hash),
|
||||
Include.count, .{
|
||||
.model_dir_hash_digest = model_digest orelse Hasher.hexDigest(model_hash),
|
||||
.output_dir_hash_digest = Hasher.hexDigest(output_hash),
|
||||
},
|
||||
};
|
||||
}
|
||||
fn processFile(file_name: []const u8, output_dir: std.fs.Dir, manifest: anytype) !void {
|
||||
|
@ -729,7 +746,10 @@ fn generateTypeFor(shape_id: []const u8, writer: anytype, state: GenerationState
|
|||
// must be blocking deep recursion somewhere or this would be a great
|
||||
// DOS attack
|
||||
try generateSimpleTypeFor("nothing", "[]const u8", writer);
|
||||
std.log.warn("Type cycle detected, limiting depth. Type: {s}", .{shape_id});
|
||||
|
||||
if (verbose) {
|
||||
std.log.warn("Type cycle detected, limiting depth. Type: {s}", .{shape_id});
|
||||
}
|
||||
// if (std.mem.eql(u8, "com.amazonaws.workmail#Timestamp", shape_id)) {
|
||||
// std.log.info(" Type stack:\n", .{});
|
||||
// for (state.type_stack.items) |i|
|
||||
|
@ -884,7 +904,7 @@ fn generateComplexTypeFor(shape_id: []const u8, members: []smithy.TypeMember, ty
|
|||
// Don't assert as that will be optimized for Release* builds
|
||||
// We'll continue here and treat the above as a warning
|
||||
if (payload) |first| {
|
||||
std.log.err("Found multiple httpPayloads in violation of smithy spec! Ignoring '{s}' and using '{s}'", .{ first, snake_case_member });
|
||||
std.log.warn("Found multiple httpPayloads in violation of smithy spec! Ignoring '{s}' and using '{s}'", .{ first, snake_case_member });
|
||||
}
|
||||
payload = try allocator.dupe(u8, snake_case_member);
|
||||
},
|
||||
|
|
Loading…
Add table
Reference in a new issue