upgrade to zig 0.15.2

This commit is contained in:
Emil Lerch 2025-10-15 12:27:04 -07:00
parent 1cb86f085d
commit 3c5edacf26
Signed by: lobo
GPG key ID: A7B62D657EF764F8
7 changed files with 164 additions and 38 deletions

View file

@ -1,6 +1,8 @@
[tools]
zig = "0.14.1"
zls = "0.14.0"
pre-commit = "latest"
"ubi:DonIsaac/zlint" = "latest"
zig = "0.15.2"
zls = "0.15.0"
[hooks]
enter = 'echo use "nix develop" if you want to build'

30
.pre-commit-config.yaml Normal file
View file

@ -0,0 +1,30 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- repo: https://github.com/batmac/pre-commit-zig
rev: v0.3.0
hooks:
- id: zig-fmt
- id: zig-build
- repo: local
hooks:
- id: smoke-test
name: Run zig build test
entry: zig
args: ["build", "--verbose", "test"]
language: system
types: [file]
pass_filenames: false
- id: zlint
name: Run zlint
entry: zlint
args: ["--deny-warnings", "--fix"]
language: system
types: [zig]

80
PLAN.md Normal file
View file

@ -0,0 +1,80 @@
# Zetviel Development Plan
## Project Rules
1. **Always run `zig fmt .` after any change to a zig file**
2. **Before considering a task complete: `zig build` must have no errors/output**
3. **Before considering a task complete: all tests must pass with `zig build test`**
## Goal
Create a netviel clone with improvements:
- Visual indication that server is working
- URL changes with UI state for deep linking
- Custom frontend (not copying netviel's JavaScript)
## Phase 1: Upgrade Zig ✅ COMPLETE
- [x] Update `build.zig.zon` to Zig 0.15.2
- [x] Update `.mise.toml` to use Zig 0.15.2
- [x] Fix breaking changes in `build.zig` (Module API, alignment issues)
- [x] Fix breaking changes in `src/main.zig` (stdout API)
- [x] Fix JSON API changes in `src/root.zig` (converted OutOfMemory to WriteFailed)
- [x] Verify all tests pass
- [x] Run `zig fmt .`
## Phase 2: Complete Email Parsing API
- [ ] Finish `Email.zig` implementation:
- [ ] Extract HTML/plain text content with preference (html > plain)
- [ ] Parse and list attachments (filename, content-type)
- [ ] Extract all standard headers (from, to, cc, bcc, date, subject)
- [ ] Add attachment retrieval by index
- [ ] Integrate Email parsing into `root.zig` Thread API (uncomment TODOs)
- [ ] Add HTML sanitization (simple allowlist approach)
- [ ] Add tests for new functionality
- [ ] Run `zig fmt .`
## Phase 3: HTTP Server & REST API
- [ ] Research and choose HTTP framework (defer decision)
- [ ] Add HTTP server dependency
- [ ] Implement REST endpoints:
- [ ] `GET /api/query/<query_string>` - search threads
- [ ] `GET /api/thread/<thread_id>` - get thread messages
- [ ] `GET /api/attachment/<message_id>/<num>` - download attachment
- [ ] `GET /api/message/<message_id>` - download raw .eml file
- [ ] Complete JSON serialization (extend existing in root.zig)
- [ ] Add security headers (CORS, X-Frame-Options, etc.)
- [ ] Add tests for API endpoints
- [ ] Run `zig fmt .`
## Phase 4: Static File Serving
- [ ] Implement static file serving:
- [ ] Serve `index.html` at `/`
- [ ] Serve static assets (JS, CSS)
- [ ] Handle SPA routing (all paths → index.html)
- [ ] Add `--port` CLI argument
- [ ] Run `zig fmt .`
## Phase 5: Frontend Development
- [ ] Design minimal UI (list threads, view messages, search)
- [ ] Implement frontend features:
- [ ] Thread list view
- [ ] Message detail view
- [ ] Search functionality
- [ ] Visual server status indicator
- [ ] URL-based routing for deep linking
- [ ] Attachment download links
- [ ] Ensure API compatibility
## Phase 6: Polish
- [ ] Add proper error handling throughout
- [ ] Add logging
- [ ] Update README with usage instructions
- [ ] Add configuration options (NOTMUCH_PATH env var)
- [ ] Security audit and warnings (local-only usage)
- [ ] Run `zig fmt .`
## Notes
- Frontend will be custom-built, not copied from netviel
- HTTP framework choice deferred to Phase 3
- HTML sanitization will use simple allowlist approach (not porting bleach)
## Current Status
Ready to begin Phase 1: Zig upgrade to 0.15.2

View file

@ -19,27 +19,34 @@ pub fn build(b: *std.Build) !void {
// set a preferred release mode, allowing the user to decide how to optimize.
const optimize = b.standardOptimizeOption(.{});
const lib = b.addStaticLibrary(.{
.name = "zetviel",
// In this case the main source file is merely a path, however, in more
// complicated build scripts, this could be a generated file.
const lib_module = b.createModule(.{
.root_source_file = b.path("src/root.zig"),
.target = target,
.optimize = optimize,
});
const lib = b.addLibrary(.{
.name = "zetviel",
.linkage = .static,
.root_module = lib_module,
});
// This declares intent for the library to be installed into the standard
// location when the user invokes the "install" step (the default step when
// running `zig build`).
b.installArtifact(lib);
const exe = b.addExecutable(.{
.name = "zetviel",
const exe_module = b.createModule(.{
.root_source_file = b.path("src/main.zig"),
.target = target,
.optimize = optimize,
});
const exe = b.addExecutable(.{
.name = "zetviel",
.root_module = exe_module,
});
configure(exe, paths, reload_discovered_native_paths);
// This declares intent for the executable to be installed into the
// standard location when the user invokes the "install" step (the default
@ -71,20 +78,28 @@ pub fn build(b: *std.Build) !void {
// Creates a step for unit testing. This only builds the test executable
// but does not run it.
const lib_unit_tests = b.addTest(.{
const lib_test_module = b.createModule(.{
.root_source_file = b.path("src/root.zig"),
.target = target,
.optimize = optimize,
});
const lib_unit_tests = b.addTest(.{
.root_module = lib_test_module,
});
configure(lib_unit_tests, paths, reload_discovered_native_paths);
const run_lib_unit_tests = b.addRunArtifact(lib_unit_tests);
const exe_unit_tests = b.addTest(.{
const exe_test_module = b.createModule(.{
.root_source_file = b.path("src/main.zig"),
.target = target,
.optimize = optimize,
});
const exe_unit_tests = b.addTest(.{
.root_module = exe_test_module,
});
const valgrind = b.option(bool, "valgrind", "Check for leaks with valgrind") orelse false;
if (valgrind)
exe_unit_tests.setExecCmd(&[_]?[]const u8{
@ -123,7 +138,7 @@ fn configure(compile: *std.Build.Step.Compile, paths: std.zig.system.NativePaths
fn checkNix(b: *std.Build, target_query: *std.Target.Query) !std.zig.system.NativePaths {
const native_result = b.resolveTargetQuery(target_query.*);
const paths = try std.zig.system.NativePaths.detect(b.allocator, native_result.result);
const paths = try std.zig.system.NativePaths.detect(b.allocator, &native_result.result);
// If we are not using nix, we can build anywhere provided the system dependencies exist
if (!std.process.hasEnvVarConstant("NIX_BINTOOLS")) return paths;
@ -181,19 +196,19 @@ fn getDynamicLinker(elf_path: []const u8) !std.Target.DynamicLinker {
return error.FileNotExpectedElf;
}
// Section header table
const e_shoff = std.mem.littleToNative(u64, @as(*u64, @ptrFromInt(@intFromPtr(file_contents[0x28 .. 0x29 + 8]))).*); // E8 9D 00 00 00 00 00 00
const e_shoff = std.mem.readInt(u64, file_contents[0x28..][0..8], .little); // E8 9D 00 00 00 00 00 00
// Number of sections
const e_shnum = std.mem.littleToNative(u16, @as(*u16, @ptrFromInt(@intFromPtr(file_contents[0x3c .. 0x3d + 2]))).*); // 1d
const e_shnum = std.mem.readInt(u16, file_contents[0x3c..][0..2], .little); // 1d
// Index of section header that contains section header names
const e_shstrndx = std.mem.littleToNative(u16, @as(*u16, @ptrFromInt(@intFromPtr(file_contents[0x3e .. 0x3f + 2]))).*); // 1c
const e_shstrndx = std.mem.readInt(u16, file_contents[0x3e..][0..2], .little); // 1c
// Beginning of section 0x1c (28) that contains header names
const e_shstroff = e_shoff + (64 * e_shstrndx); // 0xa4e8
const shstrtab_contents = file_contents[e_shstroff .. e_shstroff + 1 + (e_shnum * 64)];
// Offset for my set of null terminated strings
const shstrtab_sh_offset = std.mem.littleToNative(u64, @as(*u64, @ptrFromInt(@intFromPtr(shstrtab_contents[0x18 .. 0x19 + 8]))).*); // 0x9cec
const shstrtab_sh_offset = std.mem.readInt(u64, shstrtab_contents[0x18..][0..8], .little); // 0x9cec
// Total size of section
const shstrtab_sh_size = std.mem.littleToNative(u64, @as(*u64, @ptrFromInt(@intFromPtr(shstrtab_contents[0x20 .. 0x21 + 8]))).*); // 250
const shstrtab_sh_size = std.mem.readInt(u64, shstrtab_contents[0x20..][0..8], .little); // 250
// std.debug.print("e_shoff: {x}, e_shstrndx: {x}, e_shstroff: {x}, e_shnum: {x}, shstrtab_sh_offset: {x}, shstrtab_sh_size: {}\n", .{ e_shoff, e_shstrndx, e_shstroff, e_shnum, shstrtab_sh_offset, shstrtab_sh_size });
const shstrtab_strings = file_contents[shstrtab_sh_offset .. shstrtab_sh_offset + 1 + shstrtab_sh_size];
var interp: ?[]const u8 = null;
@ -201,10 +216,10 @@ fn getDynamicLinker(elf_path: []const u8) !std.Target.DynamicLinker {
// get section offset. Look for type == SHT_PROGBITS, then go fetch name
const sh_off = e_shoff + (64 * shndx);
const sh_contents = file_contents[sh_off .. sh_off + 1 + 64];
const sh_type = std.mem.littleToNative(u16, @as(*u16, @ptrFromInt(@intFromPtr(sh_contents[0x04 .. 0x05 + 2]))).*);
const sh_type = std.mem.readInt(u16, sh_contents[0x04..][0..2], .little);
if (sh_type != 0x01) continue;
// This is an offset to the null terminated string in our string content
const sh_name_offset = std.mem.littleToNative(u16, @as(*u16, @ptrFromInt(@intFromPtr(sh_contents[0x00 .. 0x01 + 2]))).*);
const sh_name_offset = std.mem.readInt(u16, sh_contents[0x00..][0..2], .little);
const sentinel = std.mem.indexOfScalar(u8, shstrtab_strings[sh_name_offset..], 0);
if (sentinel == null) {
std.log.err("Invalid ELF file", .{});
@ -214,8 +229,8 @@ fn getDynamicLinker(elf_path: []const u8) !std.Target.DynamicLinker {
// std.debug.print("section name: {s}\n", .{sh_name});
if (std.mem.eql(u8, ".interp", sh_name)) {
// found interpreter
const interp_offset = std.mem.littleToNative(u64, @as(*u64, @ptrFromInt(@intFromPtr(sh_contents[0x18 .. 0x19 + 8]))).*); // 0x9218
const interp_size = std.mem.littleToNative(u64, @as(*u64, @ptrFromInt(@intFromPtr(sh_contents[0x20 .. 0x21 + 8]))).*); // 2772
const interp_offset = std.mem.readInt(u64, sh_contents[0x18..][0..8], .little); // 0x9218
const interp_size = std.mem.readInt(u64, sh_contents[0x20..][0..8], .little); // 2772
// std.debug.print("Found interpreter at {x}, size: {}\n", .{ interp_offset, interp_size });
interp = file_contents[interp_offset .. interp_offset + interp_size];
// std.debug.print("Interp: {s}\n", .{interp});
@ -226,7 +241,11 @@ fn getDynamicLinker(elf_path: []const u8) !std.Target.DynamicLinker {
return error.CouldNotLocateInterpreter;
}
var dl = std.Target.DynamicLinker{ .buffer = undefined, .len = 0 };
// SAFETY: buffer is set in shortly in dl.set() call
var dl = std.Target.DynamicLinker{
.buffer = undefined,
.len = 0,
};
// The .interp section contains a null-terminated string, so we need to trim the null terminator
const trimmed_interp = std.mem.trimRight(u8, interp.?, &[_]u8{0});
dl.set(trimmed_interp);

View file

@ -7,7 +7,7 @@
// This field is optional.
// This is currently advisory only; Zig does not yet do anything
// with this value.
.minimum_zig_version = "0.14.0",
.minimum_zig_version = "0.15.2",
.fingerprint = 0xd4c335836acc5e4e,

View file

@ -8,11 +8,8 @@ pub fn main() !void {
// stdout is for the actual output of your application, for example if you
// are implementing gzip, then only the compressed bytes should be sent to
// stdout, not any debugging messages.
const stdout_file = std.io.getStdOut().writer();
var bw = std.io.bufferedWriter(stdout_file);
const stdout = bw.writer();
try stdout.print("Run `zig build test` to run the tests.\n", .{});
const stdout_file = std.fs.File{ .handle = std.posix.STDOUT_FILENO };
try stdout_file.writeAll("Run `zig build test` to run the tests.\n");
// Example of using the root.zig functionality
const allocator = std.heap.page_allocator;
@ -23,6 +20,4 @@ pub fn main() !void {
defer db_result.close();
std.debug.print("Successfully opened notmuch database at: {s}\n", .{db_result.path});
try bw.flush(); // don't forget to flush!
}

View file

@ -30,21 +30,21 @@ pub const Thread = struct {
// }
//]
try jws.beginArray();
var mi = self.thread.getMessages() catch return error.OutOfMemory;
var mi = self.thread.getMessages() catch return error.WriteFailed;
while (mi.next()) |m| {
try jws.beginObject();
try jws.objectField("from");
try jws.write(m.getHeader("from") catch return error.OutOfMemory);
try jws.write(m.getHeader("from") catch return error.WriteFailed);
try jws.objectField("to");
try jws.write(m.getHeader("to") catch return error.OutOfMemory);
try jws.write(m.getHeader("to") catch return error.WriteFailed);
try jws.objectField("cc");
try jws.write(m.getHeader("cc") catch return error.OutOfMemory);
try jws.write(m.getHeader("cc") catch return error.WriteFailed);
try jws.objectField("bcc");
try jws.write(m.getHeader("bcc") catch return error.OutOfMemory);
try jws.write(m.getHeader("bcc") catch return error.WriteFailed);
try jws.objectField("date");
try jws.write(m.getHeader("date") catch return error.OutOfMemory);
try jws.write(m.getHeader("date") catch return error.WriteFailed);
try jws.objectField("subject");
try jws.write(m.getHeader("subject") catch return error.OutOfMemory);
try jws.write(m.getHeader("subject") catch return error.WriteFailed);
// content, content-type, and attachments are all based on the file itself
// TODO: init shouldn't fail
// var message = try Message.init(self.allocator, m.getFilename());
@ -146,7 +146,7 @@ pub const Threads = struct {
try jws.objectField("subject");
try jws.write(t.getSubject());
try jws.objectField("tags");
var tags = t.getTags() catch return error.OutOfMemory;
var tags = t.getTags() catch return error.WriteFailed;
try tags.jsonStringify(jws);
try jws.objectField("thread_id");
try jws.write(t.getThreadId());
@ -254,7 +254,7 @@ test "can stringify general queries" {
defer db.close();
var threads = try db.search("Tablets");
defer threads.deinit();
const actual = try std.json.stringifyAlloc(allocator, threads, .{ .whitespace = .indent_2 });
const actual = try std.fmt.allocPrint(allocator, "{f}", .{std.json.fmt(threads, .{ .whitespace = .indent_2 })});
defer allocator.free(actual);
try std.testing.expectEqualStrings(
\\[