build_runner: fix compile errors

This commit is contained in:
Jacob Young 2025-04-17 12:29:32 -04:00 committed by Andrew Kelley
parent 3b390e4f13
commit ffb0e283d7
27 changed files with 466 additions and 365 deletions

View File

@ -379,9 +379,11 @@ pub fn main() !void {
validateSystemLibraryOptions(builder);
{
var stdout_bw = std.fs.File.stdout().writer().buffered(&stdio_buffer);
if (help_menu) return usage(builder, &stdout_bw);
if (steps_menu) return steps(builder, &stdout_bw);
var fw = std.fs.File.stdout().writer();
var bw = fw.interface().buffered(&stdio_buffer);
defer bw.flush() catch {};
if (help_menu) return usage(builder, &bw);
if (steps_menu) return steps(builder, &bw);
}
var run: Run = .{
@ -694,16 +696,13 @@ fn runStepNames(
const ttyconf = run.ttyconf;
if (run.summary != .none) {
var bw = std.debug.lockStdErr2(&stdio_buffer);
defer {
bw.flush() catch {};
std.debug.unlockStdErr();
}
const bw = std.debug.lockStderrWriter(&stdio_buffer);
defer std.debug.unlockStderrWriter();
const total_count = success_count + failure_count + pending_count + skipped_count;
ttyconf.setColor(&bw, .cyan) catch {};
ttyconf.setColor(bw, .cyan) catch {};
bw.writeAll("Build Summary:") catch {};
ttyconf.setColor(&bw, .reset) catch {};
ttyconf.setColor(bw, .reset) catch {};
bw.print(" {d}/{d} steps succeeded", .{ success_count, total_count }) catch {};
if (skipped_count > 0) bw.print("; {d} skipped", .{skipped_count}) catch {};
if (failure_count > 0) bw.print("; {d} failed", .{failure_count}) catch {};
@ -713,8 +712,6 @@ fn runStepNames(
if (test_fail_count > 0) bw.print("; {d} failed", .{test_fail_count}) catch {};
if (test_leak_count > 0) bw.print("; {d} leaked", .{test_leak_count}) catch {};
bw.writeByte('\n') catch {};
// Print a fancy tree with build results.
var step_stack_copy = try step_stack.clone(gpa);
defer step_stack_copy.deinit(gpa);
@ -722,7 +719,7 @@ fn runStepNames(
var print_node: PrintNode = .{ .parent = null };
if (step_names.len == 0) {
print_node.last = true;
printTreeStep(b, b.default_step, run, &bw, ttyconf, &print_node, &step_stack_copy) catch {};
printTreeStep(b, b.default_step, run, bw, ttyconf, &print_node, &step_stack_copy) catch {};
} else {
const last_index = if (run.summary == .all) b.top_level_steps.count() else blk: {
var i: usize = step_names.len;
@ -741,9 +738,10 @@ fn runStepNames(
for (step_names, 0..) |step_name, i| {
const tls = b.top_level_steps.get(step_name).?;
print_node.last = i + 1 == last_index;
printTreeStep(b, &tls.step, run, &bw, ttyconf, &print_node, &step_stack_copy) catch {};
printTreeStep(b, &tls.step, run, bw, ttyconf, &print_node, &step_stack_copy) catch {};
}
}
bw.writeByte('\n') catch {};
}
if (failure_count == 0) {
@ -1129,11 +1127,11 @@ fn workerMakeOneStep(
const show_stderr = s.result_stderr.len > 0;
if (show_error_msgs or show_compile_errors or show_stderr) {
var bw = std.debug.lockStdErr2(&stdio_buffer);
defer std.debug.unlockStdErr();
const bw = std.debug.lockStderrWriter(&stdio_buffer);
defer std.debug.unlockStderrWriter();
const gpa = b.allocator;
printErrorMessages(gpa, s, .{ .ttyconf = run.ttyconf }, &bw, run.prominent_compile_errors) catch {};
printErrorMessages(gpa, s, .{ .ttyconf = run.ttyconf }, bw, run.prominent_compile_errors) catch {};
}
handle_result: {

View File

@ -2766,7 +2766,7 @@ fn dumpBadDirnameHelp(
comptime msg: []const u8,
args: anytype,
) anyerror!void {
const w = debug.lockStderrWriter();
const w = debug.lockStderrWriter(&.{});
defer debug.unlockStderrWriter();
const stderr: fs.File = .stderr();
@ -2802,9 +2802,9 @@ pub fn dumpBadGetPathHelp(
src_builder: *Build,
asking_step: ?*Step,
) anyerror!void {
var buffered_writer = stderr.writer().unbuffered();
const w = &buffered_writer;
try w.print(
var fw = stderr.writer();
var bw = fw.interface().unbuffered();
try bw.print(
\\getPath() was called on a GeneratedFile that wasn't built yet.
\\ source package path: {s}
\\ Is there a missing Step dependency on step '{s}'?
@ -2815,21 +2815,21 @@ pub fn dumpBadGetPathHelp(
});
const tty_config = std.io.tty.detectConfig(stderr);
tty_config.setColor(w, .red) catch {};
tty_config.setColor(&bw, .red) catch {};
try stderr.writeAll(" The step was created by this stack trace:\n");
tty_config.setColor(w, .reset) catch {};
tty_config.setColor(&bw, .reset) catch {};
s.dump(stderr);
if (asking_step) |as| {
tty_config.setColor(w, .red) catch {};
try w.print(" The step '{s}' that is missing a dependency on the above step was created by this stack trace:\n", .{as.name});
tty_config.setColor(w, .reset) catch {};
tty_config.setColor(&bw, .red) catch {};
try bw.print(" The step '{s}' that is missing a dependency on the above step was created by this stack trace:\n", .{as.name});
tty_config.setColor(&bw, .reset) catch {};
as.dump(stderr);
}
tty_config.setColor(w, .red) catch {};
tty_config.setColor(&bw, .red) catch {};
try stderr.writeAll(" Hope that helps. Proceeding to panic.\n");
tty_config.setColor(w, .reset) catch {};
tty_config.setColor(&bw, .reset) catch {};
}
pub const InstallDir = union(enum) {

View File

@ -68,7 +68,7 @@ const PrefixedPath = struct {
fn findPrefix(cache: *const Cache, file_path: []const u8) !PrefixedPath {
const gpa = cache.gpa;
const resolved_path = try fs.path.resolve(gpa, &[_][]const u8{file_path});
const resolved_path = try fs.path.resolve(gpa, &.{file_path});
errdefer gpa.free(resolved_path);
return findPrefixResolved(cache, resolved_path);
}
@ -132,7 +132,7 @@ pub const Hasher = crypto.auth.siphash.SipHash128(1, 3);
/// Initial state with random bytes, that can be copied.
/// Refresh this with new random bytes when the manifest
/// format is modified in a non-backwards-compatible way.
pub const hasher_init: Hasher = Hasher.init(&[_]u8{
pub const hasher_init: Hasher = Hasher.init(&.{
0x33, 0x52, 0xa2, 0x84,
0xcf, 0x17, 0x56, 0x57,
0x01, 0xbb, 0xcd, 0xe4,
@ -1143,7 +1143,8 @@ pub const Manifest = struct {
}
try manifest_file.setEndPos(contents.items.len);
try manifest_file.pwriteAll(contents.items, 0);
var pos: usize = 0;
while (pos < contents.items.len) pos += try manifest_file.pwrite(contents.items[pos..], pos);
}
if (self.want_shared_lock) {

View File

@ -124,7 +124,7 @@ fn rebuildTestsWorkerRunFallible(run: *Step.Run, ttyconf: std.io.tty.Config, par
const show_stderr = compile.step.result_stderr.len > 0;
if (show_error_msgs or show_compile_errors or show_stderr) {
const bw = std.debug.lockStderrWriter();
const bw = std.debug.lockStderrWriter(&.{});
defer std.debug.unlockStderrWriter();
build_runner.printErrorMessages(gpa, &compile.step, .{ .ttyconf = ttyconf }, bw, false) catch {};
}
@ -151,7 +151,7 @@ fn fuzzWorkerRun(
run.rerunInFuzzMode(web_server, unit_test_index, prog_node) catch |err| switch (err) {
error.MakeFailed => {
const bw = std.debug.lockStderrWriter();
const bw = std.debug.lockStderrWriter(&.{});
defer std.debug.unlockStderrWriter();
build_runner.printErrorMessages(gpa, &run.step, .{ .ttyconf = ttyconf }, bw, false) catch {};
return;

View File

@ -98,8 +98,16 @@ fn now(s: *const WebServer) i64 {
fn accept(ws: *WebServer, connection: std.net.Server.Connection) void {
defer connection.stream.close();
var read_buffer: [0x4000]u8 = undefined;
var server = std.http.Server.init(connection, &read_buffer);
var sr = connection.stream.reader();
var rb: [0x4000]u8 = undefined;
var br: std.io.BufferedReader = undefined;
br.init(sr.interface(), &rb);
var sw = connection.stream.writer();
var wb: [0x4000]u8 = undefined;
var bw = sw.interface().buffered(&wb);
var server: std.http.Server = .init(&br, &bw);
var web_socket: std.http.WebSocket = undefined;
var send_buffer: [0x4000]u8 = undefined;
var ws_recv_buffer: [0x4000]u8 align(4) = undefined;

View File

@ -287,7 +287,8 @@ pub fn cast(step: *Step, comptime T: type) ?*T {
/// For debugging purposes, prints identifying information about this Step.
pub fn dump(step: *Step, file: std.fs.File) void {
var bw = file.writer().unbuffered();
var fw = file.writer();
var bw = fw.interface().unbuffered();
const tty_config = std.io.tty.detectConfig(file);
const debug_info = std.debug.getSelfDebugInfo() catch |err| {
bw.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{
@ -469,7 +470,7 @@ pub fn evalZigProcess(
// This is intentionally printed for failure on the first build but not for
// subsequent rebuilds.
if (s.result_error_bundle.errorMessageCount() > 0) {
return s.fail("the following command failed with {d} compilation errors:\n{s}\n", .{
return s.fail("the following command failed with {d} compilation errors:\n{s}", .{
s.result_error_bundle.errorMessageCount(),
try allocPrintCmd(arena, null, argv),
});
@ -689,7 +690,7 @@ pub inline fn handleChildProcUnsupported(
) error{ OutOfMemory, MakeFailed }!void {
if (!std.process.can_spawn) {
return s.fail(
"unable to execute the following command: host cannot spawn child processes\n{s}\n",
"unable to execute the following command: host cannot spawn child processes\n{s}",
.{try allocPrintCmd(s.owner.allocator, opt_cwd, argv)},
);
}
@ -706,14 +707,14 @@ pub fn handleChildProcessTerm(
.Exited => |code| {
if (code != 0) {
return s.fail(
"the following command exited with error code {d}:\n{s}\n",
"the following command exited with error code {d}:\n{s}",
.{ code, try allocPrintCmd(arena, opt_cwd, argv) },
);
}
},
.Signal, .Stopped, .Unknown => {
return s.fail(
"the following command terminated unexpectedly:\n{s}\n",
"the following command terminated unexpectedly:\n{s}",
.{try allocPrintCmd(arena, opt_cwd, argv)},
);
},

View File

@ -1791,7 +1791,7 @@ const ElfDumper = struct {
.p64 => @sizeOf(u64),
};
try br.discard(num * ptr_size);
const strtab = try br.peekAll(0);
const strtab = try br.peekGreedy(0);
assert(ctx.symtab.len == 0);
ctx.symtab = try ctx.gpa.alloc(ArSymtabEntry, num);

View File

@ -73,9 +73,12 @@ skip_foreign_checks: bool,
/// external executor (such as qemu) but not fail if the executor is unavailable.
failing_to_execute_foreign_is_an_error: bool,
/// Deprecated for `stdio_limit`.
max_stdio_size: usize,
/// If stderr or stdout exceeds this amount, the child process is killed and
/// the step fails.
max_stdio_size: usize,
stdio_limit: std.io.Reader.Limit,
captured_stdout: ?*Output,
captured_stderr: ?*Output,
@ -186,6 +189,7 @@ pub fn create(owner: *std.Build, name: []const u8) *Run {
.skip_foreign_checks = false,
.failing_to_execute_foreign_is_an_error = true,
.max_stdio_size = 10 * 1024 * 1024,
.stdio_limit = .unlimited,
.captured_stdout = null,
.captured_stderr = null,
.dep_output_file = null,
@ -1772,6 +1776,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !StdIoResult {
var stdout_bytes: ?[]const u8 = null;
var stderr_bytes: ?[]const u8 = null;
run.stdio_limit = .limited(run.stdio_limit.min(run.max_stdio_size));
if (child.stdout) |stdout| {
if (child.stderr) |stderr| {
var poller = std.io.poll(arena, enum { stdout, stderr }, .{
@ -1781,19 +1786,21 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !StdIoResult {
defer poller.deinit();
while (try poller.poll()) {
if (poller.fifo(.stdout).count > run.max_stdio_size)
return error.StdoutStreamTooLong;
if (poller.fifo(.stderr).count > run.max_stdio_size)
return error.StderrStreamTooLong;
if (run.stdio_limit.toInt()) |limit| {
if (poller.fifo(.stderr).count > limit)
return error.StdoutStreamTooLong;
if (poller.fifo(.stderr).count > limit)
return error.StderrStreamTooLong;
}
}
stdout_bytes = try poller.fifo(.stdout).toOwnedSlice();
stderr_bytes = try poller.fifo(.stderr).toOwnedSlice();
} else {
stdout_bytes = try stdout.reader().readAlloc(arena, run.max_stdio_size);
stdout_bytes = try stdout.readToEndAlloc(arena, run.stdio_limit);
}
} else if (child.stderr) |stderr| {
stderr_bytes = try stderr.reader().readAlloc(arena, run.max_stdio_size);
stderr_bytes = try stderr.readToEndAlloc(arena, run.stdio_limit);
}
if (stderr_bytes) |bytes| if (bytes.len > 0) {

View File

@ -239,16 +239,41 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
return State.hash(msg, key);
}
pub const Error = error{};
pub const Writer = std.io.Writer(*Self, Error, write);
fn write(self: *Self, bytes: []const u8) Error!usize {
self.update(bytes);
return bytes.len;
pub fn writer(self: *Self) std.io.Writer {
return .{
.context = self,
.vtable = &.{
.writeSplat = &writeSplat,
.writeFile = &writeFile,
},
};
}
pub fn writer(self: *Self) Writer {
return .{ .context = self };
fn writeSplat(ctx: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
const self: *Self = @alignCast(@ptrCast(ctx));
var len: usize = 0;
for (0..splat) |_| for (data) |slice| {
self.update(slice);
len += slice.len;
};
return len;
}
fn writeFile(
ctx: ?*anyopaque,
file: std.fs.File,
offset: std.io.Writer.Offset,
limit: std.io.Writer.Limit,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
_ = ctx;
_ = file;
_ = offset;
_ = limit;
_ = headers_and_trailers;
_ = headers_len;
return error.Unimplemented;
}
};
}

View File

@ -2052,7 +2052,7 @@ pub fn readFileIntoArrayList(
try list.ensureUnusedCapacity(gpa, std.math.cast(usize, size) orelse return error.FileTooBig);
} else |err| switch (err) {
// Ignore most errors; size hint is only an optimization.
error.Unseekable, error.Unexpected, error.AccessDenied, error.PermissionDenied => {},
error.Unexpected, error.AccessDenied, error.PermissionDenied => {},
else => |e| return e,
}

View File

@ -115,14 +115,14 @@ pub const ConnectionPool = struct {
///
/// Threadsafe.
pub fn release(pool: *ConnectionPool, allocator: Allocator, connection: *Connection) void {
if (connection.closing) return connection.destroy(allocator);
if (connection.closing) return connection.destroy();
pool.mutex.lock();
defer pool.mutex.unlock();
pool.used.remove(&connection.pool_node);
if (pool.free_size == 0) return connection.destroy(allocator);
if (pool.free_size == 0) return connection.destroy();
if (pool.free_len >= pool.free_size) {
const popped: *Connection = @fieldParentPtr("pool_node", pool.free.popFirst().?);

View File

@ -21,6 +21,9 @@ out: *std.io.BufferedWriter,
state: State,
head_parse_err: Request.Head.ParseError,
/// being deleted...
next_request_start: usize = 0,
pub const State = enum {
/// The connection is available to be used for the first time, or reused.
ready,
@ -45,6 +48,7 @@ pub fn init(in: *std.io.BufferedReader, out: *std.io.BufferedWriter) Server {
.in = in,
.out = out,
.state = .ready,
.head_parse_err = undefined,
};
}
@ -63,7 +67,7 @@ pub const ReceiveHeadError = error{
/// In other words, a keep-alive connection was finally closed.
HttpConnectionClosing,
/// Transitive error occurred reading from `in`.
ReadFailure,
ReadFailed,
};
/// The header bytes reference the internal storage of `in`, which are
@ -73,7 +77,7 @@ pub fn receiveHead(s: *Server) ReceiveHeadError!Request {
s.state = .received_head;
errdefer s.state = .receiving_head;
const in = &s.in;
const in = s.in;
var hp: http.HeadParser = .{};
var head_end: usize = 0;
@ -84,7 +88,7 @@ pub fn receiveHead(s: *Server) ReceiveHeadError!Request {
0 => return error.HttpConnectionClosing,
else => return error.HttpRequestTruncated,
},
error.ReadFailure => return error.ReadFailure,
error.ReadFailed => return error.ReadFailed,
};
head_end += hp.feed(buf[head_end..]);
if (hp.state == .finished) return .{
@ -279,7 +283,7 @@ pub const Request = struct {
};
pub fn iterateHeaders(r: *Request) http.HeaderIterator {
return http.HeaderIterator.init(r.in.bufferContents()[0..r.head_end]);
return http.HeaderIterator.init(r.server.in.bufferContents()[0..r.head_end]);
}
test iterateHeaders {
@ -398,8 +402,7 @@ pub const Request = struct {
h.appendSliceAssumeCapacity("HTTP/1.1 417 Expectation Failed\r\n");
if (!keep_alive) h.appendSliceAssumeCapacity("connection: close\r\n");
h.appendSliceAssumeCapacity("content-length: 0\r\n\r\n");
var w = request.server.connection.stream.writer().unbuffered();
try w.writeAll(h.items);
try request.server.out.writeAll(h.items);
return;
}
h.printAssumeCapacity("{s} {d} {s}\r\n", .{
@ -472,8 +475,7 @@ pub const Request = struct {
}
}
var w = request.server.connection.stream.writer().unbuffered();
try w.writevAll(iovecs[0..iovecs_len]);
try request.server.out.writeVecAll(iovecs[0..iovecs_len]);
}
pub const RespondStreamingOptions = struct {
@ -553,7 +555,7 @@ pub const Request = struct {
};
return .{
.stream = request.server.connection.stream,
.out = request.server.out,
.send_buffer = options.send_buffer,
.send_buffer_start = 0,
.send_buffer_end = h.items.len,
@ -577,7 +579,7 @@ pub const Request = struct {
ctx: ?*anyopaque,
bw: *std.io.BufferedWriter,
limit: std.io.Reader.Limit,
) std.io.Reader.Error!std.io.Reader.Status {
) std.io.Reader.Error!usize {
const request: *Request = @alignCast(@ptrCast(ctx));
_ = request;
_ = bw;
@ -585,13 +587,20 @@ pub const Request = struct {
@panic("TODO");
}
fn contentLengthReader_readv(ctx: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
fn contentLengthReader_readVec(ctx: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
const request: *Request = @alignCast(@ptrCast(ctx));
_ = request;
_ = data;
@panic("TODO");
}
fn contentLengthReader_discard(ctx: ?*anyopaque, limit: std.io.Reader.Limit) std.io.Reader.Error!usize {
const request: *Request = @alignCast(@ptrCast(ctx));
_ = request;
_ = limit;
@panic("TODO");
}
fn chunkedReader_read(
ctx: ?*anyopaque,
bw: *std.io.BufferedWriter,
@ -604,13 +613,20 @@ pub const Request = struct {
@panic("TODO");
}
fn chunkedReader_readv(ctx: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
fn chunkedReader_readVec(ctx: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
const request: *Request = @alignCast(@ptrCast(ctx));
_ = request;
_ = data;
@panic("TODO");
}
fn chunkedReader_discard(ctx: ?*anyopaque, limit: std.io.Reader.Limit) std.io.Reader.Error!usize {
const request: *Request = @alignCast(@ptrCast(ctx));
_ = request;
_ = limit;
@panic("TODO");
}
fn read_cl(context: *const anyopaque, buffer: []u8) ReadError!usize {
const request: *Request = @alignCast(@ptrCast(context));
const s = request.server;
@ -751,8 +767,7 @@ pub const Request = struct {
if (request.head.expect) |expect| {
if (mem.eql(u8, expect, "100-continue")) {
var w = request.server.connection.stream.writer().unbuffered();
try w.writeAll("HTTP/1.1 100 Continue\r\n\r\n");
try request.server.out.writeAll("HTTP/1.1 100 Continue\r\n\r\n");
request.head.expect = null;
} else {
return error.HttpExpectationFailed;
@ -766,7 +781,8 @@ pub const Request = struct {
.context = request,
.vtable = &.{
.read = &chunkedReader_read,
.readv = &chunkedReader_readv,
.readVec = &chunkedReader_readVec,
.discard = &chunkedReader_discard,
},
};
},
@ -778,7 +794,8 @@ pub const Request = struct {
.context = request,
.vtable = &.{
.read = &contentLengthReader_read,
.readv = &contentLengthReader_readv,
.readVec = &contentLengthReader_readVec,
.discard = &contentLengthReader_discard,
},
};
},
@ -801,7 +818,7 @@ pub const Request = struct {
if (keep_alive and request.head.keep_alive) switch (s.state) {
.received_head => {
const r = request.reader() catch return false;
_ = r.discardUntilEnd() catch return false;
_ = r.discardRemaining() catch return false;
assert(s.state == .ready);
return true;
},
@ -819,7 +836,7 @@ pub const Request = struct {
};
pub const Response = struct {
stream: net.Stream,
out: *std.io.BufferedWriter,
send_buffer: []u8,
/// Index of the first byte in `send_buffer`.
/// This is 0 unless a short write happens in `write`.
@ -909,7 +926,7 @@ pub const Response = struct {
_ = limit;
_ = headers_and_trailers;
_ = headers_len;
return error.Unimplemented;
@panic("TODO");
}
fn cl_write(context: ?*anyopaque, bytes: []const u8) std.io.Writer.Error!usize {
@ -932,8 +949,7 @@ pub const Response = struct {
r.send_buffer[r.send_buffer_start..][0..send_buffer_len],
bytes,
};
var w = r.stream.writer().unbuffered();
const n = try w.writev(&iovecs);
const n = try r.out.writeVec(&iovecs);
if (n >= send_buffer_len) {
// It was enough to reset the buffer.
@ -976,7 +992,7 @@ pub const Response = struct {
_ = limit;
_ = headers_and_trailers;
_ = headers_len;
return error.Unimplemented; // TODO lower to a call to writeFile on the output
@panic("TODO"); // TODO lower to a call to writeFile on the output
}
fn chunked_write(context: ?*anyopaque, bytes: []const u8) std.io.Writer.Error!usize {
@ -1001,8 +1017,7 @@ pub const Response = struct {
};
// TODO make this writev instead of writevAll, which involves
// complicating the logic of this function.
var w = r.stream.writer().unbuffered();
try w.writevAll(&iovecs);
try r.out.writeVecAll(&iovecs);
r.send_buffer_start = 0;
r.send_buffer_end = 0;
r.chunk_len = 0;
@ -1036,8 +1051,7 @@ pub const Response = struct {
}
fn flush_cl(r: *Response) std.io.Writer.Error!void {
var w = r.stream.writer().unbuffered();
try w.writeAll(r.send_buffer[r.send_buffer_start..r.send_buffer_end]);
try r.out.writeAll(r.send_buffer[r.send_buffer_start..r.send_buffer_end]);
r.send_buffer_start = 0;
r.send_buffer_end = 0;
}
@ -1050,8 +1064,7 @@ pub const Response = struct {
const http_headers = r.send_buffer[r.send_buffer_start .. r.send_buffer_end - r.chunk_len];
if (r.elide_body) {
var w = r.stream.writer().unbuffered();
try w.writeAll(http_headers);
try r.out.writeAll(http_headers);
r.send_buffer_start = 0;
r.send_buffer_end = 0;
r.chunk_len = 0;
@ -1102,8 +1115,7 @@ pub const Response = struct {
iovecs_len += 1;
}
var w = r.stream.writer().unbuffered();
try w.writevAll(iovecs[0..iovecs_len]);
try r.out.writeVecAll(iovecs[0..iovecs_len]);
r.send_buffer_start = 0;
r.send_buffer_end = 0;
r.chunk_len = 0;

View File

@ -194,16 +194,14 @@ fn recvReadInt(ws: *WebSocket, comptime I: type) !I {
};
}
pub const WriteError = std.http.Server.Response.WriteError;
pub fn writeMessage(ws: *WebSocket, message: []const u8, opcode: Opcode) WriteError!void {
pub fn writeMessage(ws: *WebSocket, message: []const u8, opcode: Opcode) std.io.Writer.Error!void {
const iovecs: [1]std.posix.iovec_const = .{
.{ .base = message.ptr, .len = message.len },
};
return writeMessagev(ws, &iovecs, opcode);
}
pub fn writeMessagev(ws: *WebSocket, message: []const std.posix.iovec_const, opcode: Opcode) WriteError!void {
pub fn writeMessagev(ws: *WebSocket, message: []const std.posix.iovec_const, opcode: Opcode) std.io.Writer.Error!void {
const total_len = l: {
var total_len: u64 = 0;
for (message) |iovec| total_len += iovec.len;

View File

@ -93,9 +93,8 @@ pub fn toArrayList(aw: *AllocatingWriter) std.ArrayListUnmanaged(u8) {
}
pub fn toOwnedSlice(aw: *AllocatingWriter) error{OutOfMemory}![]u8 {
const gpa = aw.allocator;
var list = toArrayList(aw);
return list.toOwnedSlice(gpa);
var list = aw.toArrayList();
return list.toOwnedSlice(aw.allocator);
}
pub fn toOwnedSliceSentinel(aw: *AllocatingWriter, comptime sentinel: u8) error{OutOfMemory}![:sentinel]u8 {

View File

@ -97,19 +97,20 @@ pub fn seekForwardBy(br: *BufferedReader, seek_by: u64) !void {
br.seek = seek;
}
/// Returns the next `n` bytes from `unbuffered_reader`, filling the buffer as
/// Returns the next `len` bytes from `unbuffered_reader`, filling the buffer as
/// necessary.
///
/// Invalidates previously returned values from `peek`.
///
/// Asserts that the `BufferedReader` was initialized with a buffer capacity at
/// least as big as `n`.
/// least as big as `len`.
///
/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream`
/// If there are fewer than `len` bytes left in the stream, `error.EndOfStream`
/// is returned instead.
///
/// See also:
/// * `peekGreedy`
/// * `peek`
/// * `tryPeekArray`
/// * `toss`
pub fn peek(br: *BufferedReader, n: usize) Reader.Error![]u8 {
const storage = &br.storage;
@ -119,18 +120,19 @@ pub fn peek(br: *BufferedReader, n: usize) Reader.Error![]u8 {
}
/// Returns all the next buffered bytes from `unbuffered_reader`, after filling
/// the buffer to ensure it contains at least `n` bytes.
/// the buffer to ensure it contains at least `min_len` bytes.
///
/// Invalidates previously returned values from `peek` and `peekGreedy`.
///
/// Asserts that the `BufferedReader` was initialized with a buffer capacity at
/// least as big as `n`.
/// least as big as `min_len`.
///
/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream`
/// If there are fewer than `min_len` bytes left in the stream, `error.EndOfStream`
/// is returned instead.
///
/// See also:
/// * `peek`
/// * `tryPeekGreedy`
/// * `toss`
pub fn peekGreedy(br: *BufferedReader, n: usize) Reader.Error![]u8 {
const storage = &br.storage;
@ -214,7 +216,7 @@ pub fn discardShort(br: *BufferedReader, n: usize) Reader.ShortError!usize {
storage.end = 0;
br.seek = 0;
while (true) {
const discard_len = br.unbuffered_reader.discard(remaining, .unlimited) catch |err| switch (err) {
const discard_len = br.unbuffered_reader.discard(.limited(remaining)) catch |err| switch (err) {
error.EndOfStream => return n - remaining,
error.ReadFailed => return error.ReadFailed,
};
@ -564,7 +566,7 @@ pub inline fn takeStructEndian(br: *BufferedReader, comptime T: type, endian: st
/// it. Otherwise, returns `error.InvalidEnumTag`.
///
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(Enum)`.
pub fn takeEnum(br: *BufferedReader, comptime Enum: type, endian: std.builtin.Endian) Reader.Error!Enum {
pub fn takeEnum(br: *BufferedReader, comptime Enum: type, endian: std.builtin.Endian) (Reader.Error || std.meta.IntToEnumError)!Enum {
const Tag = @typeInfo(Enum).@"enum".tag_type;
const int = try br.takeInt(Tag, endian);
return std.meta.intToEnum(Enum, int);

View File

@ -83,6 +83,11 @@ pub fn unusedCapacitySlice(bw: *const BufferedWriter) []u8 {
return bw.buffer[bw.end..];
}
/// Asserts the provided buffer has total capacity enough for `len`.
pub fn writableArray(bw: *BufferedWriter, comptime len: usize) anyerror!*[len]u8 {
return (try bw.writableSlice(len))[0..len];
}
/// Asserts the provided buffer has total capacity enough for `minimum_length`.
pub fn writableSlice(bw: *BufferedWriter, minimum_length: usize) Writer.Error![]u8 {
assert(bw.buffer.len >= minimum_length);

View File

@ -6,7 +6,7 @@ pub fn MultiWriter(comptime Writers: type) type {
comptime var ErrSet = error{};
inline for (@typeInfo(Writers).@"struct".fields) |field| {
const StreamType = field.type;
ErrSet = ErrSet || StreamType.Error;
ErrSet = ErrSet || if (@hasDecl(StreamType, "Error")) StreamType.Error else anyerror;
}
return struct {

View File

@ -1817,7 +1817,10 @@ pub const Stream = struct {
/// interchangeable with a file system file descriptor.
handle: Handle,
pub const Handle = if (native_os == .windows) windows.ws2_32.SOCKET else posix.fd_t;
pub const Handle = switch (native_os) {
.windows => windows.ws2_32.SOCKET,
else => posix.fd_t,
};
pub fn close(s: Stream) void {
switch (native_os) {
@ -1826,238 +1829,274 @@ pub const Stream = struct {
}
}
pub const ReadError = posix.ReadError;
pub const WriteError = posix.SendMsgError || error{
ConnectionResetByPeer,
SocketNotBound,
MessageTooBig,
NetworkSubsystemFailed,
SystemResources,
SocketNotConnected,
Unexpected,
pub const Reader = struct {
impl: switch (native_os) {
.windows => Stream,
else => struct {
fr: std.fs.File.Reader,
err: Error!void,
},
},
pub const Error = posix.ReadError;
pub fn interface(r: *Reader) std.io.Reader {
return switch (native_os) {
.windows => .{
.context = r.impl.stream.handle,
.vtable = &.{
.read = windows_read,
.readVec = windows_readVec,
.discard = windows_discard,
},
},
else => r.interface(),
};
}
fn windows_read(
context: ?*anyopaque,
bw: *std.io.BufferedWriter,
limit: std.io.Reader.Limit,
) std.io.Reader.Error!usize {
const buf = limit.slice(try bw.writableSlice(1));
const status = try windows_readVec(context, &.{buf});
bw.advance(status.len);
return status;
}
fn windows_readVec(context: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
var iovecs: [max_buffers_len]windows.WSABUF = undefined;
var iovecs_i: usize = 0;
for (data) |d| {
// In case Windows checks pointer address before length, we must omit
// length-zero vectors.
if (d.len == 0) continue;
iovecs[iovecs_i] = .{ .buf = d.ptr, .len = d.len };
iovecs_i += 1;
if (iovecs_i >= iovecs.len) break;
}
const bufs = iovecs[0..iovecs_i];
if (bufs.len == 0) return .{}; // Prevent false positive end detection on empty `data`.
var n: u32 = undefined;
var flags: u32 = 0;
const rc = windows.ws2_32.WSARecvFrom(context, bufs.ptr, bufs.len, &n, &flags, null, null, null, null);
if (rc != 0) switch (windows.ws2_32.WSAGetLastError()) {
.WSAECONNRESET => return error.ConnectionResetByPeer,
.WSAEFAULT => unreachable, // a pointer is not completely contained in user address space.
.WSAEINPROGRESS, .WSAEINTR => unreachable, // deprecated and removed in WSA 2.2
.WSAEINVAL => return error.SocketNotBound,
.WSAEMSGSIZE => return error.MessageTooBig,
.WSAENETDOWN => return error.NetworkSubsystemFailed,
.WSAENETRESET => return error.ConnectionResetByPeer,
.WSAENOTCONN => return error.SocketNotConnected,
.WSAEWOULDBLOCK => return error.WouldBlock,
.WSANOTINITIALISED => unreachable, // WSAStartup must be called before this function
.WSA_IO_PENDING => unreachable, // not using overlapped I/O
.WSA_OPERATION_ABORTED => unreachable, // not using overlapped I/O
else => |err| return windows.unexpectedWSAError(err),
};
return .{ .len = n, .end = n == 0 };
}
fn windows_discard(context: ?*anyopaque, limit: std.io.Reader.Limit) std.io.Reader.Error!usize {
_ = context;
_ = limit;
@panic("TODO");
}
};
pub fn reader(stream: Stream) std.io.Reader {
return .{
.context = handleToOpaque(stream.handle),
.vtable = switch (native_os) {
.windows => &.{
.read = windows_read,
.readv = windows_readv,
},
else => &.{
.read = std.fs.File.streamRead,
.readv = std.fs.File.streamReadVec,
},
},
pub const Writer = struct {
impl: switch (native_os) {
.windows => Stream,
else => PosixImpl,
},
const PosixImpl = struct {
fw: std.fs.File.Writer,
err: Error!void,
};
}
pub fn writer(stream: Stream) std.io.Writer {
return .{
.context = handleToOpaque(stream.handle),
.vtable = switch (native_os) {
.windows => &.{
.writeSplat = windows_writeSplat,
.writeFile = windows_writeFile,
},
else => &.{
.writeSplat = posix_writeSplat,
.writeFile = std.fs.File.writeFile,
},
},
pub const Error = posix.SendMsgError || error{
ConnectionResetByPeer,
SocketNotBound,
MessageTooBig,
NetworkSubsystemFailed,
SystemResources,
SocketNotConnected,
Unexpected,
};
}
fn windows_read(
context: ?*anyopaque,
bw: *std.io.BufferedWriter,
limit: std.io.Reader.Limit,
) std.io.Reader.Error!usize {
const buf = limit.slice(try bw.writableSlice(1));
const status = try windows_readv(context, &.{buf});
bw.advance(status.len);
return status;
}
fn windows_readv(context: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
var iovecs: [max_buffers_len]windows.WSABUF = undefined;
var iovecs_i: usize = 0;
for (data) |d| {
// In case Windows checks pointer address before length, we must omit
// length-zero vectors.
if (d.len == 0) continue;
iovecs[iovecs_i] = .{ .buf = d.ptr, .len = d.len };
iovecs_i += 1;
if (iovecs_i >= iovecs.len) break;
pub fn interface(w: *Writer) std.io.Writer {
return switch (native_os) {
.windows => .{
.context = w.impl.stream.handle,
.vtable = &.{
.writeSplat = windows_writeSplat,
.writeFile = windows_writeFile,
},
},
else => .{
.context = &w.impl,
.vtable = &.{
.writeSplat = posix_writeSplat,
.writeFile = std.fs.File.Writer.writeFile,
},
},
};
}
const bufs = iovecs[0..iovecs_i];
if (bufs.len == 0) return .{}; // Prevent false positive end detection on empty `data`.
const handle = opaqueToHandle(context);
var n: u32 = undefined;
var flags: u32 = 0;
const rc = windows.ws2_32.WSARecvFrom(handle, bufs.ptr, bufs.len, &n, &flags, null, null, null, null);
if (rc != 0) switch (windows.ws2_32.WSAGetLastError()) {
.WSAECONNRESET => return error.ConnectionResetByPeer,
.WSAEFAULT => unreachable, // a pointer is not completely contained in user address space.
.WSAEINPROGRESS, .WSAEINTR => unreachable, // deprecated and removed in WSA 2.2
.WSAEINVAL => return error.SocketNotBound,
.WSAEMSGSIZE => return error.MessageTooBig,
.WSAENETDOWN => return error.NetworkSubsystemFailed,
.WSAENETRESET => return error.ConnectionResetByPeer,
.WSAENOTCONN => return error.SocketNotConnected,
.WSAEWOULDBLOCK => return error.WouldBlock,
.WSANOTINITIALISED => unreachable, // WSAStartup must be called before this function
.WSA_IO_PENDING => unreachable, // not using overlapped I/O
.WSA_OPERATION_ABORTED => unreachable, // not using overlapped I/O
else => |err| return windows.unexpectedWSAError(err),
};
return .{ .len = n, .end = n == 0 };
}
fn windows_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
comptime assert(native_os == .windows);
if (data.len == 1 and splat == 0) return 0;
var splat_buffer: [256]u8 = undefined;
var iovecs: [max_buffers_len]windows.WSABUF = undefined;
var len: u32 = @min(iovecs.len, data.len);
for (iovecs[0..len], data[0..len]) |*v, d| v.* = .{
.buf = if (d.len == 0) "" else d.ptr, // TODO: does Windows allow ptr=undefined len=0 ?
.len = d.len,
};
switch (splat) {
0 => len -= 1,
1 => {},
else => {
const pattern = data[data.len - 1];
if (pattern.len == 1) {
const memset_len = @min(splat_buffer.len, splat);
const buf = splat_buffer[0..memset_len];
@memset(buf, pattern[0]);
iovecs[len - 1] = .{ .base = buf.ptr, .len = buf.len };
var remaining_splat = splat - buf.len;
while (remaining_splat > splat_buffer.len and len < iovecs.len) {
iovecs[len] = .{ .base = &splat_buffer, .len = splat_buffer.len };
remaining_splat -= splat_buffer.len;
len += 1;
fn windows_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
comptime assert(native_os == .windows);
if (data.len == 1 and splat == 0) return 0;
var splat_buffer: [256]u8 = undefined;
var iovecs: [max_buffers_len]windows.WSABUF = undefined;
var len: u32 = @min(iovecs.len, data.len);
for (iovecs[0..len], data[0..len]) |*v, d| v.* = .{
.buf = if (d.len == 0) "" else d.ptr, // TODO: does Windows allow ptr=undefined len=0 ?
.len = d.len,
};
switch (splat) {
0 => len -= 1,
1 => {},
else => {
const pattern = data[data.len - 1];
if (pattern.len == 1) {
const memset_len = @min(splat_buffer.len, splat);
const buf = splat_buffer[0..memset_len];
@memset(buf, pattern[0]);
iovecs[len - 1] = .{ .base = buf.ptr, .len = buf.len };
var remaining_splat = splat - buf.len;
while (remaining_splat > splat_buffer.len and len < iovecs.len) {
iovecs[len] = .{ .base = &splat_buffer, .len = splat_buffer.len };
remaining_splat -= splat_buffer.len;
len += 1;
}
if (remaining_splat > 0 and len < iovecs.len) {
iovecs[len] = .{ .base = &splat_buffer, .len = remaining_splat };
len += 1;
}
}
if (remaining_splat > 0 and len < iovecs.len) {
iovecs[len] = .{ .base = &splat_buffer, .len = remaining_splat };
len += 1;
}
}
},
},
}
var n: u32 = undefined;
const rc = windows.ws2_32.WSASend(context, &iovecs, len, &n, 0, null, null);
if (rc == windows.ws2_32.SOCKET_ERROR) switch (windows.ws2_32.WSAGetLastError()) {
.WSAECONNABORTED => return error.ConnectionResetByPeer,
.WSAECONNRESET => return error.ConnectionResetByPeer,
.WSAEFAULT => unreachable, // a pointer is not completely contained in user address space.
.WSAEINPROGRESS, .WSAEINTR => unreachable, // deprecated and removed in WSA 2.2
.WSAEINVAL => return error.SocketNotBound,
.WSAEMSGSIZE => return error.MessageTooBig,
.WSAENETDOWN => return error.NetworkSubsystemFailed,
.WSAENETRESET => return error.ConnectionResetByPeer,
.WSAENOBUFS => return error.SystemResources,
.WSAENOTCONN => return error.SocketNotConnected,
.WSAENOTSOCK => unreachable, // not a socket
.WSAEOPNOTSUPP => unreachable, // only for message-oriented sockets
.WSAESHUTDOWN => unreachable, // cannot send on a socket after write shutdown
.WSAEWOULDBLOCK => return error.WouldBlock,
.WSANOTINITIALISED => unreachable, // WSAStartup must be called before this function
.WSA_IO_PENDING => unreachable, // not using overlapped I/O
.WSA_OPERATION_ABORTED => unreachable, // not using overlapped I/O
else => |err| return windows.unexpectedWSAError(err),
};
return n;
}
const handle = opaqueToHandle(context);
var n: u32 = undefined;
const rc = windows.ws2_32.WSASend(handle, &iovecs, len, &n, 0, null, null);
if (rc == windows.ws2_32.SOCKET_ERROR) switch (windows.ws2_32.WSAGetLastError()) {
.WSAECONNABORTED => return error.ConnectionResetByPeer,
.WSAECONNRESET => return error.ConnectionResetByPeer,
.WSAEFAULT => unreachable, // a pointer is not completely contained in user address space.
.WSAEINPROGRESS, .WSAEINTR => unreachable, // deprecated and removed in WSA 2.2
.WSAEINVAL => return error.SocketNotBound,
.WSAEMSGSIZE => return error.MessageTooBig,
.WSAENETDOWN => return error.NetworkSubsystemFailed,
.WSAENETRESET => return error.ConnectionResetByPeer,
.WSAENOBUFS => return error.SystemResources,
.WSAENOTCONN => return error.SocketNotConnected,
.WSAENOTSOCK => unreachable, // not a socket
.WSAEOPNOTSUPP => unreachable, // only for message-oriented sockets
.WSAESHUTDOWN => unreachable, // cannot send on a socket after write shutdown
.WSAEWOULDBLOCK => return error.WouldBlock,
.WSANOTINITIALISED => unreachable, // WSAStartup must be called before this function
.WSA_IO_PENDING => unreachable, // not using overlapped I/O
.WSA_OPERATION_ABORTED => unreachable, // not using overlapped I/O
else => |err| return windows.unexpectedWSAError(err),
};
return n;
}
fn posix_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
const sock_fd = opaqueToHandle(context);
comptime assert(native_os != .windows);
var splat_buffer: [256]u8 = undefined;
var iovecs: [max_buffers_len]std.posix.iovec_const = undefined;
var len: usize = @min(iovecs.len, data.len);
for (iovecs[0..len], data[0..len]) |*v, d| v.* = .{
.base = if (d.len == 0) "" else d.ptr, // OS sadly checks ptr addr before length.
.len = d.len,
};
var msg: posix.msghdr_const = .{
.name = null,
.namelen = 0,
.iov = &iovecs,
.iovlen = len,
.control = null,
.controllen = 0,
.flags = 0,
};
switch (splat) {
0 => msg.iovlen = len - 1,
1 => {},
else => {
const pattern = data[data.len - 1];
if (pattern.len == 1) {
const memset_len = @min(splat_buffer.len, splat);
const buf = splat_buffer[0..memset_len];
@memset(buf, pattern[0]);
iovecs[len - 1] = .{ .base = buf.ptr, .len = buf.len };
var remaining_splat = splat - buf.len;
while (remaining_splat > splat_buffer.len and len < iovecs.len) {
iovecs[len] = .{ .base = &splat_buffer, .len = splat_buffer.len };
remaining_splat -= splat_buffer.len;
len += 1;
fn posix_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
const fw: *std.fs.File.Writer = @alignCast(@ptrCast(context));
const impl: *PosixImpl = @fieldParentPtr("fw", fw);
comptime assert(native_os != .windows);
var splat_buffer: [256]u8 = undefined;
var iovecs: [max_buffers_len]std.posix.iovec_const = undefined;
var len: usize = @min(iovecs.len, data.len);
for (iovecs[0..len], data[0..len]) |*v, d| v.* = .{
.base = if (d.len == 0) "" else d.ptr, // OS sadly checks ptr addr before length.
.len = d.len,
};
var msg: posix.msghdr_const = .{
.name = null,
.namelen = 0,
.iov = &iovecs,
.iovlen = len,
.control = null,
.controllen = 0,
.flags = 0,
};
switch (splat) {
0 => msg.iovlen = len - 1,
1 => {},
else => {
const pattern = data[data.len - 1];
if (pattern.len == 1) {
const memset_len = @min(splat_buffer.len, splat);
const buf = splat_buffer[0..memset_len];
@memset(buf, pattern[0]);
iovecs[len - 1] = .{ .base = buf.ptr, .len = buf.len };
var remaining_splat = splat - buf.len;
while (remaining_splat > splat_buffer.len and len < iovecs.len) {
iovecs[len] = .{ .base = &splat_buffer, .len = splat_buffer.len };
remaining_splat -= splat_buffer.len;
len += 1;
}
if (remaining_splat > 0 and len < iovecs.len) {
iovecs[len] = .{ .base = &splat_buffer, .len = remaining_splat };
len += 1;
}
msg.iovlen = len;
}
if (remaining_splat > 0 and len < iovecs.len) {
iovecs[len] = .{ .base = &splat_buffer, .len = remaining_splat };
len += 1;
}
msg.iovlen = len;
}
},
},
}
const flags = posix.MSG.NOSIGNAL;
return std.posix.sendmsg(fw.file.handle, &msg, flags) catch |err| {
impl.err = err;
return error.WriteFailed;
};
}
const flags = posix.MSG.NOSIGNAL;
return std.posix.sendmsg(sock_fd, &msg, flags);
fn windows_writeFile(
context: *anyopaque,
in_file: std.fs.File,
in_offset: u64,
in_len: std.io.Writer.FileLen,
headers_and_trailers: []const []const u8,
headers_len: usize,
) std.io.Writer.FileError!usize {
const len_int = switch (in_len) {
.zero => return windows_writeSplat(context, headers_and_trailers, 1),
.entire_file => std.math.maxInt(usize),
else => in_len.int(),
};
if (headers_len > 0) return windows_writeSplat(context, headers_and_trailers[0..headers_len], 1);
var file_contents_buffer: [4096]u8 = undefined;
const read_buffer = file_contents_buffer[0..@min(file_contents_buffer.len, len_int)];
const n = try windows.ReadFile(in_file.handle, read_buffer, in_offset);
return windows_writeSplat(context, &.{read_buffer[0..n]}, 1);
}
};
pub fn reader(stream: Stream) Reader {
return switch (native_os) {
.windows => .{ .impl = stream },
else => .{ .impl = .{
.fr = std.fs.File.reader(.{ .handle = stream.handle }),
.err = {},
} },
};
}
fn windows_writeFile(
context: *anyopaque,
in_file: std.fs.File,
in_offset: u64,
in_len: std.io.Writer.FileLen,
headers_and_trailers: []const []const u8,
headers_len: usize,
) std.io.Writer.FileError!usize {
const len_int = switch (in_len) {
.zero => return windows_writeSplat(context, headers_and_trailers, 1),
.entire_file => std.math.maxInt(usize),
else => in_len.int(),
pub fn writer(stream: Stream) Writer {
return switch (native_os) {
.windows => .{ .impl = stream },
else => .{ .impl = .{
.fw = std.fs.File.writer(.{ .handle = stream.handle }),
.err = {},
} },
};
if (headers_len > 0) return windows_writeSplat(context, headers_and_trailers[0..headers_len], 1);
var file_contents_buffer: [4096]u8 = undefined;
const read_buffer = file_contents_buffer[0..@min(file_contents_buffer.len, len_int)];
const n = try windows.ReadFile(in_file.handle, read_buffer, in_offset);
return windows_writeSplat(context, &.{read_buffer[0..n]}, 1);
}
const max_buffers_len = 8;
fn handleToOpaque(handle: Handle) ?*anyopaque {
return switch (@typeInfo(Handle)) {
.pointer => @ptrCast(handle),
.int => @ptrFromInt(@as(u32, @bitCast(handle))),
else => @compileError("unhandled"),
};
}
fn opaqueToHandle(userdata: ?*anyopaque) Handle {
return switch (@typeInfo(Handle)) {
.pointer => @ptrCast(userdata),
.int => @intCast(@intFromPtr(userdata)),
else => @compileError("unhandled"),
};
}
};
pub const Server = struct {

View File

@ -1895,7 +1895,7 @@ pub fn createEnvironFromMap(
var i: usize = 0;
if (zig_progress_action == .add) {
envp_buf[i] = try std.fmt.allocPrintZ(arena, "ZIG_PROGRESS={d}", .{options.zig_progress_fd.?});
envp_buf[i] = try std.fmt.allocPrintSentinel(arena, "ZIG_PROGRESS={d}", .{options.zig_progress_fd.?}, 0);
i += 1;
}
@ -1906,16 +1906,16 @@ pub fn createEnvironFromMap(
.add => unreachable,
.delete => continue,
.edit => {
envp_buf[i] = try std.fmt.allocPrintZ(arena, "{s}={d}", .{
envp_buf[i] = try std.fmt.allocPrintSentinel(arena, "{s}={d}", .{
pair.key_ptr.*, options.zig_progress_fd.?,
});
}, 0);
i += 1;
continue;
},
.nothing => {},
};
envp_buf[i] = try std.fmt.allocPrintZ(arena, "{s}={s}", .{ pair.key_ptr.*, pair.value_ptr.* });
envp_buf[i] = try std.fmt.allocPrintSentinel(arena, "{s}={s}", .{ pair.key_ptr.*, pair.value_ptr.* }, 0);
i += 1;
}
}
@ -1965,7 +1965,7 @@ pub fn createEnvironFromExisting(
var existing_index: usize = 0;
if (zig_progress_action == .add) {
envp_buf[i] = try std.fmt.allocPrintZ(arena, "ZIG_PROGRESS={d}", .{options.zig_progress_fd.?});
envp_buf[i] = try std.fmt.allocPrintSentinel(arena, "ZIG_PROGRESS={d}", .{options.zig_progress_fd.?}, 0);
i += 1;
}
@ -1974,7 +1974,7 @@ pub fn createEnvironFromExisting(
.add => unreachable,
.delete => continue,
.edit => {
envp_buf[i] = try std.fmt.allocPrintZ(arena, "ZIG_PROGRESS={d}", .{options.zig_progress_fd.?});
envp_buf[i] = try std.fmt.allocPrintSentinel(arena, "ZIG_PROGRESS={d}", .{options.zig_progress_fd.?}, 0);
i += 1;
continue;
},

View File

@ -1003,18 +1003,18 @@ fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
}
fn writeIntFd(fd: i32, value: ErrInt) !void {
const file: File = .{ .handle = fd };
var fw = std.fs.File.writer(.{ .handle = fd });
var buffer: [8]u8 = undefined;
std.mem.writeInt(u64, &buffer, @intCast(value), .little);
file.writeAll(&buffer) catch return error.SystemResorces;
var bw = fw.interface().buffered(&buffer);
bw.writeInt(u64, value, .little) catch return error.SystemResources;
}
fn readIntFd(fd: i32) !ErrInt {
const file: File = .{ .handle = fd };
var fr = std.fs.File.reader(.{ .handle = fd });
var buffer: [8]u8 = undefined;
const n = file.readAll(&buffer) catch return error.SystemResources;
if (n != buffer.len) return error.SystemResources;
return @intCast(std.mem.readInt(u64, &buffer, .little));
var br: std.io.BufferedReader = undefined;
br.init(fr.interface(), &buffer);
return @intCast(br.takeInt(u64, .little) catch return error.SystemResources);
}
const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8);

View File

@ -163,7 +163,7 @@ pub fn renderToStdErr(eb: ErrorBundle, options: RenderOptions) void {
renderToWriter(eb, options, bw) catch return;
}
pub fn renderToWriter(eb: ErrorBundle, options: RenderOptions, bw: *std.io.BufferedWriter) std.io.Writer.Error!void {
pub fn renderToWriter(eb: ErrorBundle, options: RenderOptions, bw: *std.io.BufferedWriter) (std.io.Writer.Error || std.posix.UnexpectedError)!void {
if (eb.extra.len == 0) return;
for (eb.getMessages()) |err_msg| {
try renderErrorMessageToWriter(eb, options, err_msg, bw, "error", .red, 0);
@ -186,7 +186,7 @@ fn renderErrorMessageToWriter(
kind: []const u8,
color: std.io.tty.Color,
indent: usize,
) std.io.Writer.Error!void {
) (std.io.Writer.Error || std.posix.UnexpectedError)!void {
const ttyconf = options.ttyconf;
const err_msg = eb.getErrorMessage(err_msg_index);
const prefix_start = bw.count;

View File

@ -1367,10 +1367,7 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!U
const index_prog_node = f.prog_node.start("Index pack", 0);
defer index_prog_node.end();
var buffer: [4096]u8 = undefined;
var index_buffered_writer: std.io.BufferedWriter = .{
.unbuffered_writer = index_file.writer(),
.buffer = &buffer,
};
var index_buffered_writer: std.io.BufferedWriter = index_file.writer().buffered(&buffer);
try git.indexPack(gpa, object_format, pack_file, &index_buffered_writer);
try index_buffered_writer.flush();
try index_file.sync();

View File

@ -78,6 +78,7 @@ pub const Env = enum {
.ast_gen,
.sema,
.legalize,
.c_compiler,
.llvm_backend,
.c_backend,
.wasm_backend,
@ -127,6 +128,7 @@ pub const Env = enum {
.clang_command,
.cc_command,
.translate_c_command,
.c_compiler,
=> true,
else => false,
},
@ -248,6 +250,8 @@ pub const Feature = enum {
sema,
legalize,
c_compiler,
llvm_backend,
c_backend,
wasm_backend,

View File

@ -1388,10 +1388,7 @@ const x86_64 = struct {
.{ .imm = .s(-129) },
}, t) catch return false;
var buf: [std.atomic.cache_line]u8 = undefined;
var bw: std.io.BufferedWriter = .{
.unbuffered_writer = .null,
.buffer = &buf,
};
var bw = std.io.Writer.null.buffered(&buf);
inst.encode(&bw, .{}) catch return false;
return true;
},
@ -1599,7 +1596,7 @@ const aarch64 = struct {
const diags = &elf_file.base.comp.link_diags;
const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
const code = (try bw.writableSlice(4))[0..4];
const code = try bw.writableArray(4);
const file_ptr = atom.file(elf_file).?;
const P, const A, const S, const GOT, const G, const TP, const DTP = args;
@ -1626,7 +1623,7 @@ const aarch64 = struct {
const S_ = th.targetAddress(target_index, elf_file);
break :blk math.cast(i28, S_ + A - P) orelse return error.Overflow;
};
aarch64_util.writeBranchImm(disp, (try bw.writableSlice(4))[0..4]);
aarch64_util.writeBranchImm(disp, code);
},
.PREL32 => {
@ -1897,26 +1894,26 @@ const riscv = struct {
.HI20 => {
const value: u32 = @bitCast(math.cast(i32, S + A) orelse return error.Overflow);
riscv_util.writeInstU((try bw.writableSlice(4))[0..4], value);
riscv_util.writeInstU(try bw.writableArray(4), value);
},
.GOT_HI20 => {
assert(target.flags.has_got);
const disp: u32 = @bitCast(math.cast(i32, G + GOT + A - P) orelse return error.Overflow);
riscv_util.writeInstU((try bw.writableSlice(4))[0..4], disp);
riscv_util.writeInstU(try bw.writableArray(4), disp);
},
.CALL_PLT => {
// TODO: relax
const disp: u32 = @bitCast(math.cast(i32, S + A - P) orelse return error.Overflow);
const code = (try bw.writableSlice(8))[0..8];
const code = try bw.writableArray(8);
riscv_util.writeInstU(code[0..4], disp); // auipc
riscv_util.writeInstI(code[4..8], disp); // jalr
},
.PCREL_HI20 => {
const disp: u32 = @bitCast(math.cast(i32, S + A - P) orelse return error.Overflow);
riscv_util.writeInstU((try bw.writableSlice(4))[0..4], disp);
riscv_util.writeInstU(try bw.writableArray(4), disp);
},
.PCREL_LO12_I,
@ -1954,8 +1951,8 @@ const riscv = struct {
};
relocs_log.debug(" [{x} => {x}]", .{ P_, disp + P_ });
switch (r_type) {
.PCREL_LO12_I => riscv_util.writeInstI((try bw.writableSlice(4))[0..4], @bitCast(disp)),
.PCREL_LO12_S => riscv_util.writeInstS((try bw.writableSlice(4))[0..4], @bitCast(disp)),
.PCREL_LO12_I => riscv_util.writeInstI(try bw.writableArray(4), @bitCast(disp)),
.PCREL_LO12_S => riscv_util.writeInstS(try bw.writableArray(4), @bitCast(disp)),
else => unreachable,
}
},
@ -1965,8 +1962,8 @@ const riscv = struct {
=> {
const disp: u32 = @bitCast(math.cast(i32, S + A) orelse return error.Overflow);
switch (r_type) {
.LO12_I => riscv_util.writeInstI((try bw.writableSlice(4))[0..4], disp),
.LO12_S => riscv_util.writeInstS((try bw.writableSlice(4))[0..4], disp),
.LO12_I => riscv_util.writeInstI(try bw.writableArray(4), disp),
.LO12_S => riscv_util.writeInstS(try bw.writableArray(4), disp),
else => unreachable,
}
},
@ -1974,7 +1971,7 @@ const riscv = struct {
.TPREL_HI20 => {
const target_addr: u32 = @intCast(target.address(.{}, elf_file));
const val: i32 = @intCast(S + A - target_addr);
riscv_util.writeInstU((try bw.writableSlice(4))[0..4], @bitCast(val));
riscv_util.writeInstU(try bw.writableArray(4), @bitCast(val));
},
.TPREL_LO12_I,
@ -1983,8 +1980,8 @@ const riscv = struct {
const target_addr: u32 = @intCast(target.address(.{}, elf_file));
const val: i32 = @intCast(S + A - target_addr);
switch (r_type) {
.TPREL_LO12_I => riscv_util.writeInstI((try bw.writableSlice(4))[0..4], @bitCast(val)),
.TPREL_LO12_S => riscv_util.writeInstS((try bw.writableSlice(4))[0..4], @bitCast(val)),
.TPREL_LO12_I => riscv_util.writeInstI(try bw.writableArray(4), @bitCast(val)),
.TPREL_LO12_S => riscv_util.writeInstS(try bw.writableArray(4), @bitCast(val)),
else => unreachable,
}
},

View File

@ -185,10 +185,7 @@ const FinalizeNodeResult = struct {
/// Updates offset of this node in the output byte stream.
fn finalizeNode(self: *Trie, node_index: Node.Index, offset_in_trie: u32) !FinalizeNodeResult {
var buf: [1024]u8 = undefined;
var bw: std.io.BufferedWriter = .{
.unbuffered_writer = .null,
.buffer = &buf,
};
var bw = std.io.Writer.null.buffered(&buf);
const slice = self.nodes.slice();
var node_size: u32 = 0;

View File

@ -1,7 +1,7 @@
pub fn writeSetSub6(comptime op: enum { set, sub }, addend: anytype, bw: *std.io.BufferedWriter) std.io.Writer.Error!void {
const mask: u8 = 0b11_000000;
const actual: i8 = @truncate(addend);
const old_value = (try bw.writableSlice(1))[0];
const old_value = (try bw.writableArray(1))[0];
const new_value = (old_value & mask) | (@as(u8, switch (op) {
.set => @bitCast(actual),
.sub => @bitCast(@as(i8, @bitCast(old_value)) -| actual),
@ -14,7 +14,7 @@ pub fn writeSetSubUleb(comptime op: enum { set, sub }, addend: i64, bw: *std.io.
.set => try overwriteUleb(@intCast(addend), bw),
.sub => {
var br: std.io.BufferedReader = undefined;
br.initFixed(try bw.writableSlice(1));
br.initFixed(try bw.writableArray(1));
const old_value = try br.takeLeb128(u64);
try overwriteUleb(old_value -% @as(u64, @intCast(addend)), bw);
},
@ -24,7 +24,7 @@ pub fn writeSetSubUleb(comptime op: enum { set, sub }, addend: i64, bw: *std.io.
fn overwriteUleb(new_value: u64, bw: *std.io.BufferedWriter) std.io.Writer.Error!void {
var value: u64 = new_value;
while (true) {
const byte = (try bw.writableSlice(1))[0];
const byte = (try bw.writableArray(1))[0];
try bw.writeByte((byte & 0x80) | @as(u7, @truncate(value)));
if (byte & 0x80 == 0) break;
value >>= 7;

View File

@ -1808,6 +1808,7 @@ fn buildOutputType(
} else manifest_file = arg;
},
.assembly, .assembly_with_cpp, .c, .cpp, .h, .hpp, .hm, .hmm, .ll, .bc, .m, .mm => {
dev.check(.c_compiler);
try create_module.c_source_files.append(arena, .{
// Populated after module creation.
.owner = undefined,
@ -1818,6 +1819,7 @@ fn buildOutputType(
});
},
.rc => {
dev.check(.win32_resource);
try create_module.rc_source_files.append(arena, .{
// Populated after module creation.
.owner = undefined,
@ -3303,6 +3305,7 @@ fn buildOutputType(
defer thread_pool.deinit();
for (create_module.c_source_files.items) |*src| {
dev.check(.c_compiler);
if (!mem.eql(u8, src.src_path, "-")) continue;
const ext = src.ext orelse
@ -5008,7 +5011,11 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
var http_client: if (dev.env.supports(.fetch_command)) std.http.Client else struct {
allocator: Allocator,
fn deinit(_: @This()) void {}
} = .{ .allocator = gpa };
} = .{
.allocator = gpa,
.read_buffer_size = 0x4000,
.write_buffer_size = 0x4000,
};
defer http_client.deinit();
var unlazy_set: Package.Fetch.JobQueue.UnlazySet = .{};
@ -6815,7 +6822,11 @@ fn cmdFetch(
try thread_pool.init(.{ .allocator = gpa });
defer thread_pool.deinit();
var http_client: std.http.Client = .{ .allocator = gpa };
var http_client: std.http.Client = .{
.allocator = gpa,
.read_buffer_size = 0x4000,
.write_buffer_size = 0x4000,
};
defer http_client.deinit();
try http_client.initDefaultProxies(arena);