diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index ec3e3ab945..76da5c98c5 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -106,9 +106,19 @@ fn accept(context: *Context, connection: std.net.Server.Connection) void { return; }, }; - serveRequest(&request, context) catch |err| { - std.log.err("unable to serve {s}: {s}", .{ request.head.target, @errorName(err) }); - return; + serveRequest(&request, context) catch |err| switch (err) { + error.WriteFailed => { + if (conn_writer.err) |e| { + std.log.err("unable to serve {s}: {s}", .{ request.head.target, @errorName(e) }); + } else { + std.log.err("unable to serve {s}: {s}", .{ request.head.target, @errorName(err) }); + } + return; + }, + else => { + std.log.err("unable to serve {s}: {s}", .{ request.head.target, @errorName(err) }); + return; + }, }; } } diff --git a/lib/std/http.zig b/lib/std/http.zig index 640ac2e208..b06ee245a8 100644 --- a/lib/std/http.zig +++ b/lib/std/http.zig @@ -760,22 +760,14 @@ pub const BodyWriter = struct { /// As a debugging utility, counts down to zero as bytes are written. content_length: u64, /// Each chunk is wrapped in a header and trailer. - chunked: Chunked, + /// This length is the the number of bytes to be written before the + /// next header. This includes +2 for the `\r\n` trailer and is zero + /// for the beginning of the stream. + chunk_len: usize, /// Cleanly finished stream; connection can be reused. end, - pub const Chunked = union(enum) { - /// Index to the start of the hex-encoded chunk length in the chunk - /// header within the buffer of `BodyWriter.http_protocol_output`. - /// Buffered chunk data starts here plus length of `chunk_header_template`. - offset: usize, - /// We are in the middle of a chunk and this is how many bytes are - /// left until the next header. This includes +2 for "\r"\n", and - /// is zero for the beginning of the stream. - chunk_len: usize, - - pub const init: Chunked = .{ .chunk_len = 0 }; - }; + pub const init_chunked: State = .{ .chunk_len = 0 }; }; pub fn isEliding(w: *const BodyWriter) bool { @@ -786,21 +778,7 @@ pub const BodyWriter = struct { pub fn flush(w: *BodyWriter) Error!void { const out = w.http_protocol_output; switch (w.state) { - .end, .none, .content_length => return out.flush(), - .chunked => |*chunked| switch (chunked.*) { - .offset => |offset| { - const chunk_len = out.end - offset - chunk_header_template.len; - if (chunk_len > 0) { - writeHex(out.buffer[offset..][0..chunk_len_digits], chunk_len); - chunked.* = .{ .chunk_len = 2 }; - } else { - out.end = offset; - chunked.* = .{ .chunk_len = 0 }; - } - try out.flush(); - }, - .chunk_len => return out.flush(), - }, + .end, .none, .content_length, .chunk_len => return out.flush(), } } @@ -843,7 +821,7 @@ pub const BodyWriter = struct { w.state = .end; }, .none => {}, - .chunked => return endChunkedUnflushed(w, .{}), + .chunk_len => return endChunkedUnflushed(w, .{}), } } @@ -879,24 +857,16 @@ pub const BodyWriter = struct { /// * `endUnflushed` /// * `end` pub fn endChunkedUnflushed(w: *BodyWriter, options: EndChunkedOptions) Error!void { - const chunked = &w.state.chunked; if (w.isEliding()) { w.state = .end; return; } const bw = w.http_protocol_output; - switch (chunked.*) { - .offset => |offset| { - const chunk_len = bw.end - offset - chunk_header_template.len; - writeHex(bw.buffer[offset..][0..chunk_len_digits], chunk_len); - try bw.writeAll("\r\n"); - }, - .chunk_len => |chunk_len| switch (chunk_len) { - 0 => {}, - 1 => try bw.writeByte('\n'), - 2 => try bw.writeAll("\r\n"), - else => unreachable, // An earlier write call indicated more data would follow. - }, + switch (w.state.chunk_len) { + 0 => {}, + 1 => try bw.writeByte('\n'), + 2 => try bw.writeAll("\r\n"), + else => unreachable, // An earlier write call indicated more data would follow. } try bw.writeAll("0\r\n"); for (options.trailers) |trailer| { @@ -993,44 +963,32 @@ pub const BodyWriter = struct { return error.Unimplemented; }; const out = bw.http_protocol_output; - const chunked = &bw.state.chunked; - state: switch (chunked.*) { - .offset => |off| { - // TODO: is it better perf to read small files into the buffer? - const buffered_len = out.end - off - chunk_header_template.len; - const chunk_len = data_len + buffered_len; - writeHex(out.buffer[off..][0..chunk_len_digits], chunk_len); + switch (bw.state.chunk_len) { + 0 => { + const header_buf = try out.writableArray(chunk_header_template.len); + @memcpy(header_buf, chunk_header_template); + writeHex(header_buf[0..chunk_len_digits], data_len); const n = try out.sendFileHeader(w.buffered(), file_reader, limit); - chunked.* = .{ .chunk_len = data_len + 2 - n }; - return w.consume(n); + bw.state.chunk_len = data_len + 2 - n; + const ret = w.consume(n); + return ret; }, - .chunk_len => |chunk_len| l: switch (chunk_len) { - 0 => { - const off = out.end; - const header_buf = try out.writableArray(chunk_header_template.len); - @memcpy(header_buf, chunk_header_template); - chunked.* = .{ .offset = off }; - continue :state .{ .offset = off }; - }, - 1 => { - try out.writeByte('\n'); - chunked.chunk_len = 0; - continue :l 0; - }, - 2 => { - try out.writeByte('\r'); - chunked.chunk_len = 1; - continue :l 1; - }, - else => { - const chunk_limit: std.Io.Limit = .limited(chunk_len - 2); - const n = if (chunk_limit.subtract(w.buffered().len)) |sendfile_limit| - try out.sendFileHeader(w.buffered(), file_reader, sendfile_limit.min(limit)) - else - try out.write(chunk_limit.slice(w.buffered())); - chunked.chunk_len = chunk_len - n; - return w.consume(n); - }, + 1 => unreachable, + 2 => { + try out.writeAll("\r\n"); + bw.state.chunk_len = 0; + assert(file_reader.atEnd()); + return error.EndOfStream; + }, + else => { + const chunk_limit: std.Io.Limit = .limited(bw.state.chunk_len - 2); + const n = if (chunk_limit.subtract(w.buffered().len)) |sendfile_limit| + try out.sendFileHeader(w.buffered(), file_reader, sendfile_limit.min(limit)) + else + try out.write(chunk_limit.slice(w.buffered())); + bw.state.chunk_len -= n; + const ret = w.consume(n); + return ret; }, } } @@ -1040,42 +998,25 @@ pub const BodyWriter = struct { assert(!bw.isEliding()); const out = bw.http_protocol_output; const data_len = w.end + Writer.countSplat(data, splat); - const chunked = &bw.state.chunked; - state: switch (chunked.*) { - .offset => |offset| { - if (out.unusedCapacityLen() >= data_len) { - return w.consume(out.writeSplatHeader(w.buffered(), data, splat) catch unreachable); - } - const buffered_len = out.end - offset - chunk_header_template.len; - const chunk_len = data_len + buffered_len; - writeHex(out.buffer[offset..][0..chunk_len_digits], chunk_len); + l: switch (bw.state.chunk_len) { + 0 => { + const header_buf = try out.writableArray(chunk_header_template.len); + @memcpy(header_buf, chunk_header_template); + writeHex(header_buf[0..chunk_len_digits], data_len); const n = try out.writeSplatHeader(w.buffered(), data, splat); - chunked.* = .{ .chunk_len = data_len + 2 - n }; + bw.state.chunk_len = data_len + 2 - n; return w.consume(n); }, - .chunk_len => |chunk_len| l: switch (chunk_len) { - 0 => { - const offset = out.end; - const header_buf = try out.writableArray(chunk_header_template.len); - @memcpy(header_buf, chunk_header_template); - chunked.* = .{ .offset = offset }; - continue :state .{ .offset = offset }; - }, - 1 => { - try out.writeByte('\n'); - chunked.chunk_len = 0; - continue :l 0; - }, - 2 => { - try out.writeByte('\r'); - chunked.chunk_len = 1; - continue :l 1; - }, - else => { - const n = try out.writeSplatHeaderLimit(w.buffered(), data, splat, .limited(chunk_len - 2)); - chunked.chunk_len = chunk_len - n; - return w.consume(n); - }, + 1 => unreachable, + 2 => { + try out.writeAll("\r\n"); + bw.state.chunk_len = 0; + continue :l 0; + }, + else => { + const n = try out.writeSplatHeaderLimit(w.buffered(), data, splat, .limited(bw.state.chunk_len - 2)); + bw.state.chunk_len -= n; + return w.consume(n); }, } } diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig index f052943816..626036234c 100644 --- a/lib/std/http/Client.zig +++ b/lib/std/http/Client.zig @@ -912,7 +912,7 @@ pub const Request = struct { return switch (r.transfer_encoding) { .chunked => .{ .http_protocol_output = http_protocol_output, - .state = .{ .chunked = .init }, + .state = .init_chunked, .writer = .{ .buffer = buffer, .vtable = &.{ diff --git a/lib/std/http/Server.zig b/lib/std/http/Server.zig index c62906827a..b64253f975 100644 --- a/lib/std/http/Server.zig +++ b/lib/std/http/Server.zig @@ -450,11 +450,11 @@ pub const Request = struct { try out.writeAll("\r\n"); const elide_body = request.head.method == .HEAD; const state: http.BodyWriter.State = if (o.transfer_encoding) |te| switch (te) { - .chunked => .{ .chunked = .init }, + .chunked => .init_chunked, .none => .none, } else if (options.content_length) |len| .{ .content_length = len, - } else .{ .chunked = .init }; + } else .init_chunked; return if (elide_body) .{ .http_protocol_output = request.server.out, @@ -480,7 +480,7 @@ pub const Request = struct { .drain = http.BodyWriter.contentLengthDrain, .sendFile = http.BodyWriter.contentLengthSendFile, }, - .chunked => &.{ + .chunk_len => &.{ .drain = http.BodyWriter.chunkedDrain, .sendFile = http.BodyWriter.chunkedSendFile, },