http.BodyWriter: handle EOF in chunkedSendFile, simplify

With these changes, the `zig std` command now works again and doesn't
trigger assertion failures or mess up the chunked transfer encoding.
This commit is contained in:
Isaac Freund 2025-08-16 13:11:19 +02:00
parent ce4e8a991f
commit 0cfd07bc86
No known key found for this signature in database
GPG Key ID: 86DED400DDFD7A11
3 changed files with 56 additions and 115 deletions

View File

@ -758,22 +758,14 @@ pub const BodyWriter = struct {
/// As a debugging utility, counts down to zero as bytes are written. /// As a debugging utility, counts down to zero as bytes are written.
content_length: u64, content_length: u64,
/// Each chunk is wrapped in a header and trailer. /// Each chunk is wrapped in a header and trailer.
chunked: Chunked, /// This length is the the number of bytes to be written before the
/// next header. This includes +2 for the `\r\n` trailer and is zero
/// for the beginning of the stream.
chunk_len: usize,
/// Cleanly finished stream; connection can be reused. /// Cleanly finished stream; connection can be reused.
end, end,
pub const Chunked = union(enum) { pub const init_chunked: State = .{ .chunk_len = 0 };
/// Index to the start of the hex-encoded chunk length in the chunk
/// header within the buffer of `BodyWriter.http_protocol_output`.
/// Buffered chunk data starts here plus length of `chunk_header_template`.
offset: usize,
/// We are in the middle of a chunk and this is how many bytes are
/// left until the next header. This includes +2 for "\r"\n", and
/// is zero for the beginning of the stream.
chunk_len: usize,
pub const init: Chunked = .{ .chunk_len = 0 };
};
}; };
pub fn isEliding(w: *const BodyWriter) bool { pub fn isEliding(w: *const BodyWriter) bool {
@ -784,21 +776,7 @@ pub const BodyWriter = struct {
pub fn flush(w: *BodyWriter) Error!void { pub fn flush(w: *BodyWriter) Error!void {
const out = w.http_protocol_output; const out = w.http_protocol_output;
switch (w.state) { switch (w.state) {
.end, .none, .content_length => return out.flush(), .end, .none, .content_length, .chunk_len => return out.flush(),
.chunked => |*chunked| switch (chunked.*) {
.offset => |offset| {
const chunk_len = out.end - offset - chunk_header_template.len;
if (chunk_len > 0) {
writeHex(out.buffer[offset..][0..chunk_len_digits], chunk_len);
chunked.* = .{ .chunk_len = 2 };
} else {
out.end = offset;
chunked.* = .{ .chunk_len = 0 };
}
try out.flush();
},
.chunk_len => return out.flush(),
},
} }
} }
@ -841,7 +819,7 @@ pub const BodyWriter = struct {
w.state = .end; w.state = .end;
}, },
.none => {}, .none => {},
.chunked => return endChunkedUnflushed(w, .{}), .chunk_len => return endChunkedUnflushed(w, .{}),
} }
} }
@ -877,24 +855,16 @@ pub const BodyWriter = struct {
/// * `endUnflushed` /// * `endUnflushed`
/// * `end` /// * `end`
pub fn endChunkedUnflushed(w: *BodyWriter, options: EndChunkedOptions) Error!void { pub fn endChunkedUnflushed(w: *BodyWriter, options: EndChunkedOptions) Error!void {
const chunked = &w.state.chunked;
if (w.isEliding()) { if (w.isEliding()) {
w.state = .end; w.state = .end;
return; return;
} }
const bw = w.http_protocol_output; const bw = w.http_protocol_output;
switch (chunked.*) { switch (w.state.chunk_len) {
.offset => |offset| { 0 => {},
const chunk_len = bw.end - offset - chunk_header_template.len; 1 => try bw.writeByte('\n'),
writeHex(bw.buffer[offset..][0..chunk_len_digits], chunk_len); 2 => try bw.writeAll("\r\n"),
try bw.writeAll("\r\n"); else => unreachable, // An earlier write call indicated more data would follow.
},
.chunk_len => |chunk_len| switch (chunk_len) {
0 => {},
1 => try bw.writeByte('\n'),
2 => try bw.writeAll("\r\n"),
else => unreachable, // An earlier write call indicated more data would follow.
},
} }
try bw.writeAll("0\r\n"); try bw.writeAll("0\r\n");
for (options.trailers) |trailer| { for (options.trailers) |trailer| {
@ -991,44 +961,32 @@ pub const BodyWriter = struct {
return error.Unimplemented; return error.Unimplemented;
}; };
const out = bw.http_protocol_output; const out = bw.http_protocol_output;
const chunked = &bw.state.chunked; switch (bw.state.chunk_len) {
state: switch (chunked.*) { 0 => {
.offset => |off| { const header_buf = try out.writableArray(chunk_header_template.len);
// TODO: is it better perf to read small files into the buffer? @memcpy(header_buf, chunk_header_template);
const buffered_len = out.end - off - chunk_header_template.len; writeHex(header_buf[0..chunk_len_digits], data_len);
const chunk_len = data_len + buffered_len;
writeHex(out.buffer[off..][0..chunk_len_digits], chunk_len);
const n = try out.sendFileHeader(w.buffered(), file_reader, limit); const n = try out.sendFileHeader(w.buffered(), file_reader, limit);
chunked.* = .{ .chunk_len = data_len + 2 - n }; bw.state.chunk_len = data_len + 2 - n;
return w.consume(n); const ret = w.consume(n);
return ret;
}, },
.chunk_len => |chunk_len| l: switch (chunk_len) { 1 => unreachable,
0 => { 2 => {
const off = out.end; try out.writeAll("\r\n");
const header_buf = try out.writableArray(chunk_header_template.len); bw.state.chunk_len = 0;
@memcpy(header_buf, chunk_header_template); assert(file_reader.atEnd());
chunked.* = .{ .offset = off }; return error.EndOfStream;
continue :state .{ .offset = off }; },
}, else => {
1 => { const chunk_limit: std.Io.Limit = .limited(bw.state.chunk_len - 2);
try out.writeByte('\n'); const n = if (chunk_limit.subtract(w.buffered().len)) |sendfile_limit|
chunked.chunk_len = 0; try out.sendFileHeader(w.buffered(), file_reader, sendfile_limit.min(limit))
continue :l 0; else
}, try out.write(chunk_limit.slice(w.buffered()));
2 => { bw.state.chunk_len -= n;
try out.writeByte('\r'); const ret = w.consume(n);
chunked.chunk_len = 1; return ret;
continue :l 1;
},
else => {
const chunk_limit: std.Io.Limit = .limited(chunk_len - 2);
const n = if (chunk_limit.subtract(w.buffered().len)) |sendfile_limit|
try out.sendFileHeader(w.buffered(), file_reader, sendfile_limit.min(limit))
else
try out.write(chunk_limit.slice(w.buffered()));
chunked.chunk_len = chunk_len - n;
return w.consume(n);
},
}, },
} }
} }
@ -1038,42 +996,25 @@ pub const BodyWriter = struct {
assert(!bw.isEliding()); assert(!bw.isEliding());
const out = bw.http_protocol_output; const out = bw.http_protocol_output;
const data_len = w.end + Writer.countSplat(data, splat); const data_len = w.end + Writer.countSplat(data, splat);
const chunked = &bw.state.chunked; l: switch (bw.state.chunk_len) {
state: switch (chunked.*) { 0 => {
.offset => |offset| { const header_buf = try out.writableArray(chunk_header_template.len);
if (out.unusedCapacityLen() >= data_len) { @memcpy(header_buf, chunk_header_template);
return w.consume(out.writeSplatHeader(w.buffered(), data, splat) catch unreachable); writeHex(header_buf[0..chunk_len_digits], data_len);
}
const buffered_len = out.end - offset - chunk_header_template.len;
const chunk_len = data_len + buffered_len;
writeHex(out.buffer[offset..][0..chunk_len_digits], chunk_len);
const n = try out.writeSplatHeader(w.buffered(), data, splat); const n = try out.writeSplatHeader(w.buffered(), data, splat);
chunked.* = .{ .chunk_len = data_len + 2 - n }; bw.state.chunk_len = data_len + 2 - n;
return w.consume(n); return w.consume(n);
}, },
.chunk_len => |chunk_len| l: switch (chunk_len) { 1 => unreachable,
0 => { 2 => {
const offset = out.end; try out.writeAll("\r\n");
const header_buf = try out.writableArray(chunk_header_template.len); bw.state.chunk_len = 0;
@memcpy(header_buf, chunk_header_template); continue :l 0;
chunked.* = .{ .offset = offset }; },
continue :state .{ .offset = offset }; else => {
}, const n = try out.writeSplatHeaderLimit(w.buffered(), data, splat, .limited(bw.state.chunk_len - 2));
1 => { bw.state.chunk_len -= n;
try out.writeByte('\n'); return w.consume(n);
chunked.chunk_len = 0;
continue :l 0;
},
2 => {
try out.writeByte('\r');
chunked.chunk_len = 1;
continue :l 1;
},
else => {
const n = try out.writeSplatHeaderLimit(w.buffered(), data, splat, .limited(chunk_len - 2));
chunked.chunk_len = chunk_len - n;
return w.consume(n);
},
}, },
} }
} }

View File

@ -907,7 +907,7 @@ pub const Request = struct {
return switch (r.transfer_encoding) { return switch (r.transfer_encoding) {
.chunked => .{ .chunked => .{
.http_protocol_output = http_protocol_output, .http_protocol_output = http_protocol_output,
.state = .{ .chunked = .init }, .state = .init_chunked,
.writer = .{ .writer = .{
.buffer = buffer, .buffer = buffer,
.vtable = &.{ .vtable = &.{

View File

@ -448,11 +448,11 @@ pub const Request = struct {
try out.writeAll("\r\n"); try out.writeAll("\r\n");
const elide_body = request.head.method == .HEAD; const elide_body = request.head.method == .HEAD;
const state: http.BodyWriter.State = if (o.transfer_encoding) |te| switch (te) { const state: http.BodyWriter.State = if (o.transfer_encoding) |te| switch (te) {
.chunked => .{ .chunked = .init }, .chunked => .init_chunked,
.none => .none, .none => .none,
} else if (options.content_length) |len| .{ } else if (options.content_length) |len| .{
.content_length = len, .content_length = len,
} else .{ .chunked = .init }; } else .init_chunked;
return if (elide_body) .{ return if (elide_body) .{
.http_protocol_output = request.server.out, .http_protocol_output = request.server.out,
@ -478,7 +478,7 @@ pub const Request = struct {
.drain = http.BodyWriter.contentLengthDrain, .drain = http.BodyWriter.contentLengthDrain,
.sendFile = http.BodyWriter.contentLengthSendFile, .sendFile = http.BodyWriter.contentLengthSendFile,
}, },
.chunked => &.{ .chunk_len => &.{
.drain = http.BodyWriter.chunkedDrain, .drain = http.BodyWriter.chunkedDrain,
.sendFile = http.BodyWriter.chunkedSendFile, .sendFile = http.BodyWriter.chunkedSendFile,
}, },