upgrade to zig 0.11.0-dev.3312+ab37ab33c

This commit is contained in:
Emil Lerch 2023-05-31 16:19:31 -07:00
parent df5cb27e8e
commit 7b271a0698
Signed by: lobo
GPG Key ID: A7B62D657EF764F8
2 changed files with 7 additions and 8 deletions

View File

@ -73,12 +73,12 @@ pub fn startWatch(self: *Self) void {
self.watch_started = true; self.watch_started = true;
var fds = if (self.inotify_fd == null) var fds = if (self.inotify_fd == null)
&[_]std.os.pollfd{.{ .fd = self.control_socket.?, .events = std.os.POLL.IN, .revents = undefined }} @constCast(&[_]std.os.pollfd{.{ .fd = self.control_socket.?, .events = std.os.POLL.IN, .revents = undefined }})
else else
&[_]std.os.pollfd{ @constCast(&[_]std.os.pollfd{
.{ .fd = self.control_socket.?, .events = std.os.POLL.IN, .revents = undefined }, .{ .fd = self.control_socket.?, .events = std.os.POLL.IN, .revents = undefined },
.{ .fd = self.inotify_fd.?, .events = std.os.POLL.IN, .revents = undefined }, .{ .fd = self.inotify_fd.?, .events = std.os.POLL.IN, .revents = undefined },
}; });
const control_fd_inx = 0; const control_fd_inx = 0;
const inotify_fd_inx = 1; const inotify_fd_inx = 1;

View File

@ -392,12 +392,11 @@ fn loadConfig(allocator: std.mem.Allocator) ![]Executor {
/// with logs, connection accounting, etc. The work dealing with the request /// with logs, connection accounting, etc. The work dealing with the request
/// itself is delegated to the serve function to work with the executor /// itself is delegated to the serve function to work with the executor
fn processRequest(allocator: *std.mem.Allocator, server: *std.http.Server, writer: anytype) !void { fn processRequest(allocator: *std.mem.Allocator, server: *std.http.Server, writer: anytype) !void {
const max_header_size = 8192;
if (timer == null) timer = try std.time.Timer.start(); if (timer == null) timer = try std.time.Timer.start();
var tm = timer.?; var tm = timer.?;
const res = try server.accept(.{ .dynamic = max_header_size }); var res = try server.accept(.{ .allocator = allocator.* });
defer res.deinit(); defer res.deinit();
defer res.reset(); defer _ = res.reset();
try res.wait(); // wait for client to send a complete request head try res.wait(); // wait for client to send a complete request head
// I believe it's fair to start our timer after this is done // I believe it's fair to start our timer after this is done
tm.reset(); tm.reset();
@ -415,7 +414,7 @@ fn processRequest(allocator: *std.mem.Allocator, server: *std.http.Server, write
var errbuf: [errstr.len]u8 = undefined; var errbuf: [errstr.len]u8 = undefined;
var response_bytes = try std.fmt.bufPrint(&errbuf, errstr, .{}); var response_bytes = try std.fmt.bufPrint(&errbuf, errstr, .{});
var full_response = serve(allocator, res) catch |e| brk: { var full_response = serve(allocator, &res) catch |e| brk: {
res.status = .internal_server_error; res.status = .internal_server_error;
// TODO: more about this particular request // TODO: more about this particular request
log.err("Unexpected error from executor processing request: {any}", .{e}); log.err("Unexpected error from executor processing request: {any}", .{e});
@ -435,7 +434,7 @@ fn processRequest(allocator: *std.mem.Allocator, server: *std.http.Server, write
res.transfer_encoding = .{ .content_length = response_bytes.len }; res.transfer_encoding = .{ .content_length = response_bytes.len };
try res.headers.append("connection", "close"); try res.headers.append("connection", "close");
try writer.print(" {d} ttfb {d:.3}ms", .{ @enumToInt(res.status), @intToFloat(f64, tm.read()) / std.time.ns_per_ms }); try writer.print(" {d} ttfb {d:.3}ms", .{ @enumToInt(res.status), @intToFloat(f64, tm.read()) / std.time.ns_per_ms });
if (builtin.is_test) writeToTestBuffers(response_bytes, res); if (builtin.is_test) writeToTestBuffers(response_bytes, &res);
try res.do(); try res.do();
_ = try res.writer().writeAll(response_bytes); _ = try res.writer().writeAll(response_bytes);
try res.finish(); try res.finish();