Compare commits

...

1 commit

Author SHA1 Message Date
512eab0db0
upgrade to zig 0.16.0
All checks were successful
Generic zig build / build (push) Successful in 25s
2026-04-14 14:30:55 -07:00
5 changed files with 38 additions and 34 deletions

View file

@ -1,5 +1,5 @@
[tools] [tools]
prek = "0.3.1" prek = "0.3.1"
"ubi:DonIsaac/zlint" = "0.7.9" "ubi:DonIsaac/zlint" = "0.7.9"
zig = "0.15.2" zig = "0.16.0"
zls = "0.15.1" zls = "0.15.1"

View file

@ -250,6 +250,7 @@ const BenchmarkStep = struct {
const b = step.owner; const b = step.owner;
const self: *BenchmarkStep = @fieldParentPtr("step", step); const self: *BenchmarkStep = @fieldParentPtr("step", step);
const io = b.graph.io;
const gen_path = b.getInstallPath(.bin, self.gen_exe.name); const gen_path = b.getInstallPath(.bin, self.gen_exe.name);
const exe_path = b.getInstallPath(.bin, self.srf_exe.name); const exe_path = b.getInstallPath(.bin, self.srf_exe.name);
const count_str = b.fmt("{d}", .{self.record_count}); const count_str = b.fmt("{d}", .{self.record_count});
@ -271,29 +272,32 @@ const BenchmarkStep = struct {
const hash_str = b.fmt("{x}", .{hash}); const hash_str = b.fmt("{x}", .{hash});
const cache_dir = b.cache_root.join(b.allocator, &.{ "o", hash_str }) catch @panic("OOM"); const cache_dir = b.cache_root.join(b.allocator, &.{ "o", hash_str }) catch @panic("OOM");
std.fs.cwd().makePath(cache_dir) catch {}; b.cache_root.handle.createDirPath(io, cache_dir) catch @panic("Could not create cache path");
const filename = b.fmt("test-{s}.{s}", .{ fmt.name, fmt.ext }); const filename = b.fmt("test-{s}.{s}", .{ fmt.name, fmt.ext });
const filepath = b.pathJoin(&.{ cache_dir, filename }); const filepath = b.pathJoin(&.{ cache_dir, filename });
test_files[i] = filepath; test_files[i] = filepath;
// Check if file exists // Check if file exists
if (std.fs.cwd().access(filepath, .{})) { if (b.cache_root.handle.access(io, filepath, .{})) {
continue; // File exists, skip generation continue; // File exists, skip generation
} else |_| {} } else |_| {}
// Generate file // Generate file
var child = std.process.Child.init(&.{ gen_path, fmt.name, count_str }, b.allocator); var child = try std.process.spawn(io, .{
child.stdout_behavior = .Pipe; .argv = &.{ gen_path, fmt.name, count_str },
try child.spawn(); .stdout = .pipe,
});
const output = try child.stdout.?.readToEndAlloc(b.allocator, 100 * 1024 * 1024); var buf: [4096]u8 = undefined;
var file_reader = child.stdout.?.reader(io, &buf);
var reader = &file_reader.interface;
const output = try reader.allocRemaining(b.allocator, .unlimited);
defer b.allocator.free(output); defer b.allocator.free(output);
const term = try child.wait(io);
if (term != .exited or term.exited != 0) return error.GenerationFailed;
const term = try child.wait(); try b.cache_root.handle.writeFile(io, .{ .sub_path = filepath, .data = output });
if (term != .Exited or term.Exited != 0) return error.GenerationFailed;
try std.fs.cwd().writeFile(.{ .sub_path = filepath, .data = output });
} }
// Run hyperfine // Run hyperfine
@ -308,16 +312,19 @@ const BenchmarkStep = struct {
try argv.append(b.allocator, b.fmt("{s} jsonl <{s}", .{ exe_path, test_files[2] })); try argv.append(b.allocator, b.fmt("{s} jsonl <{s}", .{ exe_path, test_files[2] }));
} }
var child = std.process.Child.init(argv.items, b.allocator);
// We need to lock stderror so hyperfine can output progress in place // We need to lock stderror so hyperfine can output progress in place
std.debug.lockStdErr(); // SAFETY: buffer for locking
defer std.debug.unlockStdErr(); var buf: [1024]u8 = undefined; // I have no idea what the right size buffer should be
_ = try io.lockStderr(&buf, null);
defer io.unlockStderr();
try child.spawn(); var child = try std.process.spawn(io, .{
const term = try child.wait(); .argv = argv.items,
});
if (term != .Exited or term.Exited != 0) const term = try child.wait(io);
if (term != .exited or term.exited != 0)
return error.BenchmarkFailed; return error.BenchmarkFailed;
} }
}; };

View file

@ -25,7 +25,7 @@
.fingerprint = 0x102ed002eff998a9, // Changing this has security and trust implications. .fingerprint = 0x102ed002eff998a9, // Changing this has security and trust implications.
// Tracks the earliest Zig version that the package considers to be a // Tracks the earliest Zig version that the package considers to be a
// supported use case. // supported use case.
.minimum_zig_version = "0.15.2", .minimum_zig_version = "0.16.0",
// This field is optional. // This field is optional.
// Each dependency must either provide a `url` and `hash`, or a `path`. // Each dependency must either provide a `url` and `hash`, or a `path`.
// `zig build --fetch` can be used to fetch all dependencies of a package, recursively. // `zig build --fetch` can be used to fetch all dependencies of a package, recursively.

View file

@ -46,13 +46,10 @@ const CountingAllocator = struct {
} }
}; };
pub fn main() !void { pub fn main(init: std.process.Init) !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const gpa = init.gpa;
defer _ = gpa.deinit();
const base_allocator = gpa.allocator();
const args = try std.process.argsAlloc(base_allocator); const args = try init.minimal.args.toSlice(init.arena.allocator());
defer std.process.argsFree(base_allocator, args);
if (args.len < 2) { if (args.len < 2) {
std.debug.print("Usage: {s} <srf|json|jsonl>\n", .{args[0]}); std.debug.print("Usage: {s} <srf|json|jsonl>\n", .{args[0]});
@ -61,19 +58,19 @@ pub fn main() !void {
const format = args[1]; const format = args[1];
const debug_allocs = std.process.hasEnvVarConstant("DEBUG_ALLOCATIONS"); const debug_allocs = init.environ_map.contains("DEBUG_ALLOCATIONS");
var counting = CountingAllocator{ .child_allocator = base_allocator }; var counting = CountingAllocator{ .child_allocator = gpa };
const allocator = if (debug_allocs) counting.allocator() else base_allocator; const allocator = if (debug_allocs) counting.allocator() else gpa;
var stdin_buffer: [1024]u8 = undefined; var stdin_buffer: [1024]u8 = undefined;
var stdin_reader = std.fs.File.stdin().reader(&stdin_buffer); var stdin_reader = std.Io.File.stdin().reader(init.io, &stdin_buffer);
const stdin = &stdin_reader.interface; const stdin = &stdin_reader.interface;
// Load all data into memory first for fair comparison // Load all data into memory first for fair comparison
var data: std.ArrayList(u8) = .empty; var data: std.ArrayList(u8) = .empty;
defer data.deinit(base_allocator); defer data.deinit(gpa);
try stdin.appendRemaining(base_allocator, &data, @enumFromInt(100 * 1024 * 1024)); try stdin.appendRemaining(gpa, &data, @enumFromInt(100 * 1024 * 1024));
if (std.mem.eql(u8, format, "srf")) { if (std.mem.eql(u8, format, "srf")) {
var reader = std.Io.Reader.fixed(data.items); var reader = std.Io.Reader.fixed(data.items);

View file

@ -1018,9 +1018,9 @@ pub const RecordIterator = struct {
/// use or refresh cached data. Note that data will be returned by parse/ /// use or refresh cached data. Note that data will be returned by parse/
/// iterator regardless of freshness. This enables callers to use cached /// iterator regardless of freshness. This enables callers to use cached
/// data temporarily while refreshing it /// data temporarily while refreshing it
pub fn isFresh(self: RecordIterator) bool { pub fn isFresh(self: RecordIterator, io: std.Io) bool {
if (self.expires) |exp| if (self.expires) |exp|
return std.time.timestamp() < exp; return std.Io.Timestamp.now(io, .real).toSeconds() < exp;
// no expiry: always fresh, never frozen // no expiry: always fresh, never frozen
return true; return true;
@ -1038,7 +1038,7 @@ pub const RecordIterator = struct {
defer ri.deinit(); defer ri.deinit();
// No expiry set, so always fresh // No expiry set, so always fresh
try std.testing.expect(ri.isFresh()); try std.testing.expect(ri.isFresh(std.testing.io));
} }
}; };