std: update some http to new reader/writer

This commit is contained in:
Andrew Kelley 2025-04-21 21:15:54 -07:00
parent 4004c8058e
commit d9b9e3c272
10 changed files with 223 additions and 123 deletions

View File

@ -236,7 +236,7 @@ pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) AddCertsFrom
try cb.bytes.ensureUnusedCapacity(gpa, needed_capacity);
const end_reserved: u32 = @intCast(cb.bytes.items.len + decoded_size_upper_bound);
const buffer = cb.bytes.allocatedSlice()[end_reserved..];
const end_index = try file.readAll(buffer);
const end_index = try file.readShort(buffer);
const encoded_bytes = buffer[0..end_index];
const begin_marker = "-----BEGIN CERTIFICATE-----";

View File

@ -916,7 +916,7 @@ pub fn writer(c: *Client) std.io.Writer {
.context = c,
.vtable = &.{
.writeSplat = writeSplat,
.writeFile = std.io.Writer.unimplemented_writeFile,
.writeFile = std.io.Writer.unimplementedWriteFile,
},
};
}

View File

@ -839,6 +839,20 @@ pub fn read(self: File, buffer: []u8) ReadError!usize {
return posix.read(self.handle, buffer);
}
/// One-shot alternative to `std.io.BufferedReader.readShort` via `reader`.
///
/// Returns the number of bytes read. If the number read is smaller than `buffer.len`, it
/// means the file reached the end.
pub fn readShort(self: File, buffer: []u8) ReadError!usize {
var index: usize = 0;
while (index != buffer.len) {
const n = try self.read(buffer[index..]);
if (n == 0) break;
index += n;
}
return index;
}
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn pread(self: File, buffer: []u8, offset: u64) PReadError!usize {
@ -884,7 +898,7 @@ pub fn write(self: File, bytes: []const u8) WriteError!usize {
return posix.write(self.handle, bytes);
}
/// One-shot alternative to `writer`.
/// One-shot alternative to `std.io.BufferedWriter.writeAll` via `writer`.
pub fn writeAll(self: File, bytes: []const u8) WriteError!void {
var index: usize = 0;
while (index < bytes.len) {

View File

@ -38,9 +38,12 @@ next_https_rescan_certs: bool = true,
/// The pool of connections that can be reused (and currently in use).
connection_pool: ConnectionPool = .{},
/// Each `Connection` allocates this amount for the reader buffer.
read_buffer_size: usize,
///
/// If the entire HTTP header cannot fit in this amount of bytes,
/// `error.HttpHeadersOversize` will be returned from `Request.wait`.
read_buffer_size: usize = 4096,
/// Each `Connection` allocates this amount for the writer buffer.
write_buffer_size: usize,
write_buffer_size: usize = 1024,
/// If populated, all http traffic travels through this third party.
/// This field cannot be modified while the client has active connections.
@ -177,23 +180,21 @@ pub const ConnectionPool = struct {
/// All future operations on the connection pool will deadlock.
///
/// Threadsafe.
pub fn deinit(pool: *ConnectionPool, allocator: Allocator) void {
pub fn deinit(pool: *ConnectionPool) void {
pool.mutex.lock();
var next = pool.free.first;
while (next) |node| {
const connection: *Connection = @fieldParentPtr("pool_node", node);
next = node.next;
connection.close(allocator);
allocator.destroy(connection);
connection.destroy();
}
next = pool.used.first;
while (next) |node| {
const connection: *Connection = @fieldParentPtr("pool_node", node);
next = node.next;
connection.close(allocator);
allocator.destroy(node);
connection.destroy();
}
pool.* = undefined;
@ -324,8 +325,9 @@ pub const Connection = struct {
return tls;
}
fn destroy(tls: *Tls, gpa: Allocator) void {
fn destroy(tls: *Tls) void {
const c = &tls.connection;
const gpa = c.client.allocator;
const base: [*]u8 = @ptrCast(tls);
gpa.free(base[0..allocLen(c.client, c.host_len)]);
}
@ -1262,10 +1264,8 @@ pub const Proxy = struct {
pub fn deinit(client: *Client) void {
assert(client.connection_pool.used.first == null); // There are still active requests.
client.connection_pool.deinit(client.allocator);
if (!disable_tls)
client.ca_bundle.deinit(client.allocator);
client.connection_pool.deinit();
if (!disable_tls) client.ca_bundle.deinit(client.allocator);
client.* = undefined;
}
@ -1574,8 +1574,7 @@ pub fn connect(
/// TODO collapse each error set into its own meta error code, and store
/// the underlying error code as a field on Request
pub const RequestError = ConnectTcpError || ConnectErrorPartial || std.io.Writer.Error ||
std.fmt.ParseIntError || Connection.WriteError ||
pub const RequestError = ConnectTcpError || ConnectErrorPartial || std.io.Writer.Error || std.fmt.ParseIntError ||
error{
UnsupportedUriScheme,
UriMissingHost,

View File

@ -20,7 +20,8 @@ out: *std.io.BufferedWriter,
/// same connection, and makes invalid API usage cause assertion failures
/// rather than HTTP protocol violations.
state: State,
head_parse_err: Request.Head.ParseError,
/// Populated when `receiveHead` returns `ReceiveHeadError.HttpHeadersInvalid`.
head_parse_err: ?Request.Head.ParseError = null,
pub const State = enum {
/// The connection is available to be used for the first time, or reused.
@ -46,7 +47,6 @@ pub fn init(in: *std.io.BufferedReader, out: *std.io.BufferedWriter) Server {
.in = in,
.out = out,
.state = .ready,
.head_parse_err = undefined,
};
}
@ -92,8 +92,6 @@ pub fn receiveHead(s: *Server) ReceiveHeadError!Request {
if (hp.state == .finished) return .{
.server = s,
.head_end = head_end,
.trailers_len = 0,
.read_err = null,
.head = Request.Head.parse(buf[0..head_end]) catch |err| {
s.head_parse_err = err;
return error.HttpHeadersInvalid;
@ -109,13 +107,13 @@ pub const Request = struct {
head_end: usize,
/// Number of bytes of HTTP trailers. These are at the end of a
/// transfer-encoding: chunked message.
trailers_len: usize,
trailers_len: usize = 0,
head: Head,
reader_state: union {
remaining_content_length: u64,
remaining_chunk_len: RemainingChunkLen,
},
read_err: ?ReadError,
read_err: ?ReadError = null,
pub const ReadError = error{
HttpChunkInvalid,
@ -330,12 +328,12 @@ pub const Request = struct {
.in = &br,
.out = undefined,
.state = .ready,
.in_err = undefined,
};
var request: Request = .{
.server = &server,
.head_end = request_bytes.len,
.trailers_len = 0,
.head = undefined,
.reader_state = undefined,
};
@ -513,15 +511,16 @@ pub const Request = struct {
respond_options: RespondOptions = .{},
};
/// The header is buffered but not sent until `Response.flush` is called.
/// The header is not guaranteed to be sent until `Response.flush` is called.
///
/// If the request contains a body and the connection is to be reused,
/// discards the request body, leaving the Server in the `ready` state. If
/// this discarding fails, the connection is marked as not to be reused and
/// no error is surfaced.
///
/// HEAD requests are handled transparently by setting a flag on the
/// returned Response to omit the body. However it may be worth noticing
/// HEAD requests are handled transparently by setting the
/// `Response.elide_body` flag on the returned `Response`, causing
/// the response stream to omit the body. However, it may be worth noticing
/// that flag and skipping any expensive work that would otherwise need to
/// be done to satisfy the request.
///
@ -922,6 +921,9 @@ pub const Response = struct {
///
/// This is the underlying stream; use `buffered` to create a
/// `BufferedWriter` for this `Response`.
///
/// Until the lifetime of `Response` ends, it is illegal to modify the
/// state of this other than via methods of `Response`.
server_output: *std.io.BufferedWriter,
/// `null` means transfer-encoding: chunked.
/// As a debugging utility, counts down to zero as bytes are written.
@ -966,12 +968,30 @@ pub const Response = struct {
};
};
/// When using content-length, asserts that the amount of data sent matches
/// the value sent in the header, then calls `flush`.
/// Sends all buffered data across `Response.server_output`.
///
/// Otherwise, transfer-encoding: chunked is being used, and it writes the
/// end-of-stream message with empty trailers, then flushes the stream to
/// the system.
/// Some buffered data will remain if transfer-encoding is chunked and the
/// response is mid-chunk.
pub fn flush(r: *Response) WriteError!void {
switch (r.transfer_encoding) {
.none, .content_length => return r.server_output.flush(),
.chunked => |*chunked| switch (chunked.*) {
.offset => |*offset| {
try r.server_output.flushLimit(.limited(r.server_output.end - offset.*));
offset.* = 0;
},
.chunk_len => return r.server_output.flush(),
},
}
}
/// When using content-length, asserts that the amount of data sent matches
/// the value sent in the header, then flushes. Asserts the amount of bytes
/// sent matches the content-length value provided in the HTTP header.
///
/// When using transfer-encoding: chunked, writes the end-of-stream message
/// with empty trailers, then flushes the stream to the system. Asserts any
/// started chunk has been completely finished.
///
/// Respects the value of `elide_body` to omit all data after the headers.
///

View File

@ -11,13 +11,18 @@ const expectError = std.testing.expectError;
test "trailers" {
const test_server = try createTestServer(struct {
fn run(net_server: *std.net.Server) anyerror!void {
var header_buffer: [1024]u8 = undefined;
var recv_buffer: [1024]u8 = undefined;
var send_buffer: [1024]u8 = undefined;
var remaining: usize = 1;
while (remaining != 0) : (remaining -= 1) {
const conn = try net_server.accept();
defer conn.stream.close();
const connection = try net_server.accept();
defer connection.stream.close();
var server = http.Server.init(conn, &header_buffer);
var stream_reader = connection.stream.reader();
var stream_writer = connection.stream.writer();
var connection_br = stream_reader.interface().buffered(&recv_buffer);
var connection_bw = stream_writer.interface().buffered(&send_buffer);
var server = http.Server.init(&connection_br, &connection_bw);
try expectEqual(.ready, server.state);
var request = try server.receiveHead();
@ -29,13 +34,11 @@ test "trailers" {
fn serve(request: *http.Server.Request) !void {
try expectEqualStrings(request.head.target, "/trailer");
var send_buffer: [1024]u8 = undefined;
var response = request.respondStreaming(.{
.send_buffer = &send_buffer,
});
try response.writeAll("Hello, ");
var response = try request.respondStreaming(.{});
var bw = response.writer().unbuffered();
try bw.writeAll("Hello, ");
try response.flush();
try response.writeAll("World!\n");
try bw.writeAll("World!\n");
try response.flush();
try response.endChunked(.{
.trailers = &.{
@ -95,11 +98,16 @@ test "trailers" {
test "HTTP server handles a chunked transfer coding request" {
const test_server = try createTestServer(struct {
fn run(net_server: *std.net.Server) !void {
var header_buffer: [8192]u8 = undefined;
const conn = try net_server.accept();
defer conn.stream.close();
var recv_buffer: [8192]u8 = undefined;
var send_buffer: [500]u8 = undefined;
const connection = try net_server.accept();
defer connection.stream.close();
var server = http.Server.init(conn, &header_buffer);
var stream_reader = connection.stream.reader();
var stream_writer = connection.stream.writer();
var connection_br = stream_reader.interface().buffered(&recv_buffer);
var connection_bw = stream_writer.interface().buffered(&send_buffer);
var server = http.Server.init(&connection_br, &connection_bw);
var request = try server.receiveHead();
try expect(request.head.transfer_encoding == .chunked);
@ -153,13 +161,18 @@ test "HTTP server handles a chunked transfer coding request" {
test "echo content server" {
const test_server = try createTestServer(struct {
fn run(net_server: *std.net.Server) anyerror!void {
var read_buffer: [1024]u8 = undefined;
var recv_buffer: [1024]u8 = undefined;
var send_buffer: [100]u8 = undefined;
accept: while (true) {
const conn = try net_server.accept();
defer conn.stream.close();
const connection = try net_server.accept();
defer connection.stream.close();
var http_server = http.Server.init(conn, &read_buffer);
var stream_reader = connection.stream.reader();
var stream_writer = connection.stream.writer();
var connection_br = stream_reader.interface().buffered(&recv_buffer);
var connection_bw = stream_writer.interface().buffered(&send_buffer);
var http_server = http.Server.init(&connection_br, &connection_bw);
while (http_server.state == .ready) {
var request = http_server.receiveHead() catch |err| switch (err) {
@ -200,9 +213,7 @@ test "echo content server" {
try expectEqualStrings("Hello, World!\n", body);
try expectEqualStrings("text/plain", request.head.content_type.?);
var send_buffer: [100]u8 = undefined;
var response = request.respondStreaming(.{
.send_buffer = &send_buffer,
var response = try request.respondStreaming(.{
.content_length = switch (request.head.transfer_encoding) {
.chunked => null,
.none => len: {
@ -211,11 +222,10 @@ test "echo content server" {
},
},
});
try response.flush(); // Test an early flush to send the HTTP headers before the body.
const w = response.writer();
try w.writeAll("Hello, ");
try w.writeAll("World!\n");
var bw = response.writer().unbuffered();
try bw.writeAll("Hello, ");
try bw.writeAll("World!\n");
try response.end();
//std.debug.print(" server finished responding\n", .{});
}
@ -240,32 +250,33 @@ test "Server.Request.respondStreaming non-chunked, unknown content-length" {
// closed, indicating the end of the body.
const test_server = try createTestServer(struct {
fn run(net_server: *std.net.Server) anyerror!void {
var header_buffer: [1000]u8 = undefined;
var recv_buffer: [1000]u8 = undefined;
var send_buffer: [500]u8 = undefined;
var remaining: usize = 1;
while (remaining != 0) : (remaining -= 1) {
const conn = try net_server.accept();
defer conn.stream.close();
const connection = try net_server.accept();
defer connection.stream.close();
var server = http.Server.init(conn, &header_buffer);
var stream_reader = connection.stream.reader();
var stream_writer = connection.stream.writer();
var connection_br = stream_reader.interface().buffered(&recv_buffer);
var connection_bw = stream_writer.interface().buffered(&send_buffer);
var server = http.Server.init(&connection_br, &connection_bw);
try expectEqual(.ready, server.state);
var request = try server.receiveHead();
try expectEqualStrings(request.head.target, "/foo");
var send_buffer: [500]u8 = undefined;
var response = request.respondStreaming(.{
.send_buffer = &send_buffer,
var response = try request.respondStreaming(.{
.respond_options = .{
.transfer_encoding = .none,
},
});
var total: usize = 0;
var buf: [30]u8 = undefined;
var bw = response.writer().buffered(&buf);
for (0..500) |i| {
var buf: [30]u8 = undefined;
const line = try std.fmt.bufPrint(&buf, "{d}, ah ha ha!\n", .{i});
try response.writeAll(line);
total += line.len;
try bw.print("{d}, ah ha ha!\n", .{i});
}
try expectEqual(7390, total);
try expectEqual(7390, bw.count);
try response.end();
try expectEqual(.closing, server.state);
}
@ -305,13 +316,19 @@ test "Server.Request.respondStreaming non-chunked, unknown content-length" {
test "receiving arbitrary http headers from the client" {
const test_server = try createTestServer(struct {
fn run(net_server: *std.net.Server) anyerror!void {
var read_buffer: [666]u8 = undefined;
var recv_buffer: [666]u8 = undefined;
var send_buffer: [777]u8 = undefined;
var remaining: usize = 1;
while (remaining != 0) : (remaining -= 1) {
const conn = try net_server.accept();
defer conn.stream.close();
const connection = try net_server.accept();
defer connection.stream.close();
var stream_reader = connection.stream.reader();
var stream_writer = connection.stream.writer();
var connection_br = stream_reader.interface().buffered(&recv_buffer);
var connection_bw = stream_writer.interface().buffered(&send_buffer);
var server = http.Server.init(&connection_br, &connection_bw);
var server = http.Server.init(conn, &read_buffer);
try expectEqual(.ready, server.state);
var request = try server.receiveHead();
try expectEqualStrings("/bar", request.head.target);
@ -341,7 +358,8 @@ test "receiving arbitrary http headers from the client" {
const gpa = std.testing.allocator;
const stream = try std.net.tcpConnectToHost(gpa, "127.0.0.1", test_server.port());
defer stream.close();
var writer = stream.writer().unbuffered();
var stream_writer = stream.writer();
var writer = stream_writer.interface().unbuffered();
try writer.writeAll(request_bytes);
const response = try stream.reader().readAllAlloc(gpa, 8192);
@ -367,12 +385,18 @@ test "general client/server API coverage" {
};
const test_server = try createTestServer(struct {
fn run(net_server: *std.net.Server) anyerror!void {
var client_header_buffer: [1024]u8 = undefined;
var recv_buffer: [1024]u8 = undefined;
var send_buffer: [100]u8 = undefined;
outer: while (global.handle_new_requests) {
var connection = try net_server.accept();
defer connection.stream.close();
var http_server = http.Server.init(connection, &client_header_buffer);
var stream_reader = connection.stream.reader();
var stream_writer = connection.stream.writer();
var connection_br = stream_reader.interface().buffered(&recv_buffer);
var connection_bw = stream_writer.interface().buffered(&send_buffer);
var http_server = http.Server.init(&connection_br, &connection_bw);
while (http_server.state == .ready) {
var request = http_server.receiveHead() catch |err| switch (err) {
@ -398,11 +422,8 @@ test "general client/server API coverage" {
const body = try (try request.reader()).readAllAlloc(gpa, 8192);
defer gpa.free(body);
var send_buffer: [100]u8 = undefined;
if (mem.startsWith(u8, request.head.target, "/get")) {
var response = request.respondStreaming(.{
.send_buffer = &send_buffer,
var response = try request.respondStreaming(.{
.content_length = if (mem.indexOf(u8, request.head.target, "?chunked") == null)
14
else
@ -413,37 +434,35 @@ test "general client/server API coverage" {
},
},
});
const w = response.writer();
try w.writeAll("Hello, ");
try w.writeAll("World!\n");
var bw = response.writer().unbuffered();
try bw.writeAll("Hello, ");
try bw.writeAll("World!\n");
try response.end();
// Writing again would cause an assertion failure.
} else if (mem.startsWith(u8, request.head.target, "/large")) {
var response = request.respondStreaming(.{
.send_buffer = &send_buffer,
var response = try request.respondStreaming(.{
.content_length = 14 * 1024 + 14 * 10,
});
try response.flush(); // Test an early flush to send the HTTP headers before the body.
const w = response.writer();
var bw = response.writer().unbuffered();
var i: u32 = 0;
while (i < 5) : (i += 1) {
try w.writeAll("Hello, World!\n");
try bw.writeAll("Hello, World!\n");
}
try w.writeAll("Hello, World!\n" ** 1024);
try bw.writeAll("Hello, World!\n" ** 1024);
i = 0;
while (i < 5) : (i += 1) {
try w.writeAll("Hello, World!\n");
try bw.writeAll("Hello, World!\n");
}
try response.end();
} else if (mem.eql(u8, request.head.target, "/redirect/1")) {
var response = request.respondStreaming(.{
.send_buffer = &send_buffer,
var response = try request.respondStreaming(.{
.respond_options = .{
.status = .found,
.extra_headers = &.{
@ -452,9 +471,9 @@ test "general client/server API coverage" {
},
});
const w = response.writer();
try w.writeAll("Hello, ");
try w.writeAll("Redirected!\n");
var bw = response.writer().unbuffered();
try bw.writeAll("Hello, ");
try bw.writeAll("Redirected!\n");
try response.end();
} else if (mem.eql(u8, request.head.target, "/redirect/2")) {
try request.respond("Hello, Redirected!\n", .{
@ -914,31 +933,36 @@ test "general client/server API coverage" {
test "Server streams both reading and writing" {
const test_server = try createTestServer(struct {
fn run(net_server: *std.net.Server) anyerror!void {
var header_buffer: [1024]u8 = undefined;
const conn = try net_server.accept();
defer conn.stream.close();
var server = http.Server.init(conn, &header_buffer);
var request = try server.receiveHead();
const reader = try request.reader();
var recv_buffer: [1024]u8 = undefined;
var send_buffer: [777]u8 = undefined;
var response = request.respondStreaming(.{
.send_buffer = &send_buffer,
const connection = try net_server.accept();
defer connection.stream.close();
var stream_reader = connection.stream.reader();
var stream_writer = connection.stream.writer();
var connection_br = stream_reader.interface().buffered(&recv_buffer);
var connection_bw = stream_writer.interface().buffered(&send_buffer);
var server = http.Server.init(&connection_br, &connection_bw);
var request = try server.receiveHead();
var read_buffer: [100]u8 = undefined;
var br = try request.reader().buffered(&read_buffer);
var response = try request.respondStreaming(.{
.respond_options = .{
.transfer_encoding = .none, // Causes keep_alive=false
},
});
const writer = response.writer();
var bw = response.writer().unbuffered();
while (true) {
try response.flush();
var buf: [100]u8 = undefined;
const n = try reader.read(&buf);
if (n == 0) break;
const sub_buf = buf[0..n];
for (sub_buf) |*b| b.* = std.ascii.toUpper(b.*);
try writer.writeAll(sub_buf);
const buf = br.peekGreedy(1) catch |err| switch (err) {
error.EndOfStream => break,
error.ReadFailed => return error.ReadFailed,
};
br.toss(buf.len);
for (buf) |*b| b.* = std.ascii.toUpper(b.*);
try bw.writeAll(buf);
}
try response.end();
}
@ -1161,12 +1185,17 @@ fn createTestServer(S: type) !*TestServer {
test "redirect to different connection" {
const test_server_new = try createTestServer(struct {
fn run(net_server: *std.net.Server) anyerror!void {
var header_buffer: [888]u8 = undefined;
var recv_buffer: [888]u8 = undefined;
var send_buffer: [777]u8 = undefined;
const conn = try net_server.accept();
defer conn.stream.close();
const connection = try net_server.accept();
defer connection.stream.close();
var server = http.Server.init(conn, &header_buffer);
var stream_reader = connection.stream.reader();
var stream_writer = connection.stream.writer();
var connection_br = stream_reader.interface().buffered(&recv_buffer);
var connection_bw = stream_writer.interface().buffered(&send_buffer);
var server = http.Server.init(&connection_br, &connection_bw);
var request = try server.receiveHead();
try expectEqualStrings(request.head.target, "/ok");
try request.respond("good job, you pass", .{});
@ -1181,17 +1210,21 @@ test "redirect to different connection" {
const test_server_orig = try createTestServer(struct {
fn run(net_server: *std.net.Server) anyerror!void {
var header_buffer: [999]u8 = undefined;
var recv_buffer: [999]u8 = undefined;
var send_buffer: [100]u8 = undefined;
const conn = try net_server.accept();
defer conn.stream.close();
const connection = try net_server.accept();
defer connection.stream.close();
const new_loc = try std.fmt.bufPrint(&send_buffer, "http://127.0.0.1:{d}/ok", .{
global.other_port.?,
});
var server = http.Server.init(conn, &header_buffer);
var stream_reader = connection.stream.reader();
var stream_writer = connection.stream.writer();
var connection_br = stream_reader.interface().buffered(&recv_buffer);
var connection_bw = stream_writer.interface().buffered(&send_buffer);
var server = http.Server.init(&connection_br, &connection_bw);
var request = try server.receiveHead();
try expectEqualStrings(request.head.target, "/help");
try request.respond("", .{

View File

@ -227,6 +227,8 @@ pub fn peekGreedy(br: *BufferedReader, n: usize) Reader.Error![]u8 {
///
/// Asserts that the number of bytes buffered is at least as many as `n`.
///
/// The "tossed" memory remains alive until a "peek" operation occurs.
///
/// See also:
/// * `peek`.
/// * `discard`.

View File

@ -78,6 +78,15 @@ pub fn flush(bw: *BufferedWriter) Writer.Error!void {
bw.end = 0;
}
pub fn flushLimit(bw: *BufferedWriter, limit: Writer.Limit) Writer.Error!void {
const buffer = limit.slice(bw.buffer[0..bw.end]);
var index: usize = 0;
while (index < buffer.len) index += try bw.unbuffered_writer.writeVec(&.{buffer[index..]});
const remainder = bw.buffer[index..];
std.mem.copyForwards(u8, bw.buffer[0..remainder.len], remainder);
bw.end = remainder.len;
}
pub fn unusedCapacitySlice(bw: *const BufferedWriter) []u8 {
return bw.buffer[bw.end..];
}
@ -1852,3 +1861,7 @@ test "fixed output" {
try bw.seekTo((try bw.getEndPos()) + 1);
try testing.expectError(error.WriteStreamEnd, bw.writeAll("H"));
}
test flushLimit {
return error.Unimplemented;
}

View File

@ -156,6 +156,23 @@ pub const failing: Writer = .{
},
};
pub fn unimplementedWriteFile(
context: ?*anyopaque,
file: std.fs.File,
offset: std.io.Writer.Offset,
limit: std.io.Writer.Limit,
headers_and_trailers: []const []const u8,
headers_len: usize,
) Error!usize {
_ = context;
_ = file;
_ = offset;
_ = limit;
_ = headers_and_trailers;
_ = headers_len;
return error.Unimplemented;
}
test {
_ = Null;
}

View File

@ -1357,7 +1357,8 @@ fn linuxLookupNameFromHosts(
defer file.close();
var line_buf: [512]u8 = undefined;
var br = file.reader().buffered(&line_buf);
var file_reader = file.reader();
var br = file_reader.interface().buffered(&line_buf);
while (br.takeSentinel('\n')) |line| {
var split_it = mem.splitScalar(u8, line, '#');
const no_comment_line = split_it.first();
@ -1530,7 +1531,7 @@ const ResolvConf = struct {
/// Returns `error.StreamTooLong` if a line is longer than 512 bytes.
/// TODO: https://github.com/ziglang/zig/issues/2765 and https://github.com/ziglang/zig/issues/2761
fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void {
rc.* = ResolvConf{
rc.* = .{
.ns = std.ArrayList(LookupAddr).init(allocator),
.search = std.ArrayList(u8).init(allocator),
.ndots = 1,
@ -1549,7 +1550,8 @@ fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void {
defer file.close();
var line_buf: [512]u8 = undefined;
var br = file.reader().buffered(&line_buf);
var file_reader = file.reader();
var br = file_reader.interface().buffered(&line_buf);
while (br.takeSentinel('\n')) |line_with_comment| {
const line = line: {
var split = mem.splitScalar(u8, line_with_comment, '#');