std.io: redo Reader and Writer yet again

explicit error sets ahoy matey

delete some sus APIs from File that need to be reworked
This commit is contained in:
Andrew Kelley 2025-04-16 23:01:33 -07:00
parent 20a784f713
commit a4fdda6ae0
53 changed files with 1383 additions and 1975 deletions

View File

@ -2,7 +2,6 @@
const builtin = @import("builtin");
const std = @import("std");
const io = std.io;
const testing = std.testing;
const assert = std.debug.assert;
@ -65,15 +64,21 @@ pub fn main() void {
}
}
var stdin_buffer: [std.heap.page_size_min]u8 align(std.heap.page_size_min) = undefined;
var stdout_buffer: [std.heap.page_size_min]u8 align(std.heap.page_size_min) = undefined;
fn mainServer() !void {
@disableInstrumentation();
var stdin_reader = std.fs.File.stdin().reader();
var stdout_writer = std.fs.File.stdout().writer();
var stdin_buffered_reader: std.io.BufferedReader = undefined;
stdin_buffered_reader.init(stdin_reader.interface(), &stdin_buffer);
var stdout_buffered_writer = stdout_writer.interface().buffered(&stdout_buffer);
var server = try std.zig.Server.init(.{
.gpa = fba.allocator(),
.in = .stdin(),
.out = .stdout(),
.in = &stdin_buffered_reader,
.out = &stdout_buffered_writer,
.zig_version = builtin.zig_version_string,
});
defer server.deinit();
if (builtin.fuzz) {
const coverage_id = fuzzer_coverage_id();

View File

@ -2766,9 +2766,8 @@ fn dumpBadDirnameHelp(
comptime msg: []const u8,
args: anytype,
) anyerror!void {
var buffered_writer = debug.lockStdErr2(&.{});
defer debug.unlockStdErr();
const w = &buffered_writer;
const w = debug.lockStderrWriter();
defer debug.unlockStderrWriter();
const stderr: fs.File = .stderr();
try w.print(msg, args);

View File

@ -333,7 +333,7 @@ pub const Manifest = struct {
pub const Diagnostic = union(enum) {
none,
manifest_create: fs.File.OpenError,
manifest_read: anyerror,
manifest_read: fs.File.ReadError,
manifest_lock: fs.File.LockError,
manifest_seek: fs.File.SeekError,
file_open: FileOp,

View File

@ -124,9 +124,9 @@ fn rebuildTestsWorkerRunFallible(run: *Step.Run, ttyconf: std.io.tty.Config, par
const show_stderr = compile.step.result_stderr.len > 0;
if (show_error_msgs or show_compile_errors or show_stderr) {
var bw = std.debug.lockStdErr2(&.{});
defer std.debug.unlockStdErr();
build_runner.printErrorMessages(gpa, &compile.step, .{ .ttyconf = ttyconf }, &bw, false) catch {};
const bw = std.debug.lockStderrWriter();
defer std.debug.unlockStderrWriter();
build_runner.printErrorMessages(gpa, &compile.step, .{ .ttyconf = ttyconf }, bw, false) catch {};
}
const rebuilt_bin_path = result catch |err| switch (err) {
@ -151,9 +151,9 @@ fn fuzzWorkerRun(
run.rerunInFuzzMode(web_server, unit_test_index, prog_node) catch |err| switch (err) {
error.MakeFailed => {
var bw = std.debug.lockStdErr2(&.{});
defer std.debug.unlockStdErr();
build_runner.printErrorMessages(gpa, &run.step, .{ .ttyconf = ttyconf }, &bw, false) catch {};
const bw = std.debug.lockStderrWriter();
defer std.debug.unlockStderrWriter();
build_runner.printErrorMessages(gpa, &run.step, .{ .ttyconf = ttyconf }, bw, false) catch {};
return;
},
else => {

View File

@ -233,7 +233,7 @@ const ComputeCompareExpected = struct {
value: ComputeCompareExpected,
bw: *std.io.BufferedWriter,
comptime fmt: []const u8,
) anyerror!void {
) !void {
if (fmt.len != 0) std.fmt.invalidFmtError(fmt, value);
try bw.print("{s} ", .{@tagName(value.op)});
switch (value.value) {

View File

@ -606,6 +606,38 @@ pub fn unlockStdErr() void {
stderr_mutex.unlock();
}
/// Protected by `stderr_mutex`.
var stderr_buffered_writer: std.io.BufferedWriter = .{
.unbuffered_writer = stderr_file_writer.interface(),
.buffer = &.{},
};
/// Protected by `stderr_mutex`.
var stderr_file_writer: std.fs.File.Writer = .{
.file = if (is_windows) undefined else .stderr(),
.mode = .streaming,
};
/// Allows the caller to freely write to the returned `std.io.BufferedWriter`,
/// initialized with `buffer`, until `unlockStderrWriter` is called.
///
/// During the lock, any `std.Progress` information is cleared from the terminal.
///
/// The lock is recursive; the same thread may hold the lock multiple times.
pub fn lockStderrWriter(buffer: []u8) *std.io.BufferedWriter {
stderr_mutex.lock();
clearWrittenWithEscapeCodes() catch {};
if (is_windows) stderr_file_writer.file = .stderr();
stderr_buffered_writer.flush() catch {};
stderr_buffered_writer.buffer = buffer;
return &stderr_buffered_writer;
}
pub fn unlockStderrWriter() void {
stderr_buffered_writer.flush() catch {};
stderr_buffered_writer.buffer = &.{};
stderr_mutex.unlock();
}
fn ipcThreadRun(fd: posix.fd_t) anyerror!void {
// Store this data in the thread so that it does not need to be part of the
// linker data of the main executable.

View File

@ -301,7 +301,7 @@ pub const Os = struct {
/// This function is defined to serialize a Zig source code representation of this
/// type, that, when parsed, will deserialize into the same data.
pub fn format(ver: WindowsVersion, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) anyerror!void {
pub fn format(ver: WindowsVersion, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) std.io.Writer.Error!void {
const maybe_name = std.enums.tagName(WindowsVersion, ver);
if (comptime std.mem.eql(u8, fmt_str, "s")) {
if (maybe_name) |name|

View File

@ -40,7 +40,7 @@ pub const Component = union(enum) {
};
}
pub fn format(component: Component, bw: *std.io.BufferedWriter, comptime fmt: []const u8) anyerror!void {
pub fn format(component: Component, bw: *std.io.BufferedWriter, comptime fmt: []const u8) std.io.Writer.Error!void {
if (fmt.len == 0) {
try bw.print("std.Uri.Component{{ .{s} = \"{}\" }}", .{
@tagName(component),
@ -95,7 +95,7 @@ pub const Component = union(enum) {
bw: *std.io.BufferedWriter,
raw: []const u8,
comptime isValidChar: fn (u8) bool,
) anyerror!void {
) std.io.Writer.Error!void {
var start: usize = 0;
for (raw, 0..) |char, index| {
if (isValidChar(char)) continue;
@ -236,7 +236,7 @@ pub const WriteToStreamOptions = struct {
port: bool = true,
};
pub fn writeToStream(uri: Uri, options: WriteToStreamOptions, bw: *std.io.BufferedWriter) anyerror!void {
pub fn writeToStream(uri: Uri, options: WriteToStreamOptions, bw: *std.io.BufferedWriter) std.io.Writer.Error!void {
if (options.scheme) {
try bw.print("{s}:", .{uri.scheme});
if (options.authority and uri.host != null) {
@ -273,7 +273,7 @@ pub fn writeToStream(uri: Uri, options: WriteToStreamOptions, bw: *std.io.Buffer
}
}
pub fn format(uri: Uri, bw: *std.io.BufferedWriter, comptime fmt: []const u8) anyerror!void {
pub fn format(uri: Uri, bw: *std.io.BufferedWriter, comptime fmt: []const u8) std.io.Writer.Error!void {
const scheme = comptime std.mem.indexOfScalar(u8, fmt, ';') != null or fmt.len == 0;
const authentication = comptime std.mem.indexOfScalar(u8, fmt, '@') != null or fmt.len == 0;
const authority = comptime std.mem.indexOfScalar(u8, fmt, '+') != null or fmt.len == 0;

View File

@ -908,7 +908,9 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.fromArrayList(gpa, self);
defer self.* = aw.toArrayList();
return @errorCast(bw.print(fmt, args));
return bw.print(fmt, args) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
}
pub fn printAssumeCapacity(self: *Self, comptime fmt: []const u8, args: anytype) void {

View File

@ -34,7 +34,7 @@ pub const StackTrace = struct {
index: usize,
instruction_addresses: []usize,
pub fn format(st: StackTrace, bw: *std.io.BufferedWriter, comptime fmt: []const u8) anyerror!void {
pub fn format(st: StackTrace, bw: *std.io.BufferedWriter, comptime fmt: []const u8) !void {
comptime if (fmt.len != 0) unreachable;
// TODO: re-evaluate whether to use format() methods at all.

View File

@ -9,7 +9,7 @@ pub const deflate = @import("flate/deflate.zig");
pub const inflate = @import("flate/inflate.zig");
/// Decompress compressed data from reader and write plain data to the writer.
pub fn decompress(reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) anyerror!void {
pub fn decompress(reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) std.io.Writer.Error!void {
try inflate.decompress(.raw, reader, writer);
}
@ -19,7 +19,7 @@ pub const Decompressor = inflate.Decompressor(.raw);
pub const Options = deflate.Options;
/// Compress plain data from reader and write compressed data to the writer.
pub fn compress(reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter, options: Options) anyerror!void {
pub fn compress(reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter, options: Options) std.io.Writer.Error!void {
try deflate.compress(.raw, reader, writer, options);
}
@ -28,7 +28,7 @@ pub const Compressor = deflate.Compressor(.raw);
/// Huffman only compression. Without Lempel-Ziv match searching. Faster
/// compression, less memory requirements but bigger compressed sizes.
pub const huffman = struct {
pub fn compress(reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) anyerror!void {
pub fn compress(reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) std.io.Writer.Error!void {
try deflate.huffman.compress(.raw, reader, writer);
}
@ -37,7 +37,7 @@ pub const huffman = struct {
// No compression store only. Compressed size is slightly bigger than plain.
pub const store = struct {
pub fn compress(reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) anyerror!void {
pub fn compress(reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) std.io.Writer.Error!void {
try deflate.store.compress(.raw, reader, writer);
}

View File

@ -39,7 +39,7 @@ pub fn setWriter(self: *Self, new_writer: *std.io.BufferedWriter) void {
self.inner_writer = new_writer;
}
pub fn flush(self: *Self) anyerror!void {
pub fn flush(self: *Self) std.io.Writer.Error!void {
var n = self.nbytes;
while (self.nbits != 0) {
self.bytes[n] = @as(u8, @truncate(self.bits));
@ -56,7 +56,7 @@ pub fn flush(self: *Self) anyerror!void {
self.nbytes = 0;
}
pub fn writeBits(self: *Self, b: u32, nb: u32) anyerror!void {
pub fn writeBits(self: *Self, b: u32, nb: u32) std.io.Writer.Error!void {
self.bits |= @as(u64, @intCast(b)) << @as(u6, @intCast(self.nbits));
self.nbits += nb;
if (self.nbits < 48)
@ -74,7 +74,7 @@ pub fn writeBits(self: *Self, b: u32, nb: u32) anyerror!void {
self.nbits -= 48;
}
pub fn writeBytes(self: *Self, bytes: []const u8) anyerror!void {
pub fn writeBytes(self: *Self, bytes: []const u8) std.io.Writer.Error!void {
var n = self.nbytes;
if (self.nbits & 7 != 0) {
return error.UnfinishedBits;

View File

@ -42,7 +42,7 @@ pub fn init(writer: *std.io.BufferedWriter) Self {
/// That is after final block; when last byte could be incomplete or
/// after stored block; which is aligned to the byte boundary (it has x
/// padding bits after first 3 bits).
pub fn flush(self: *Self) anyerror!void {
pub fn flush(self: *Self) std.io.Writer.Error!void {
try self.bit_writer.flush();
}
@ -50,7 +50,7 @@ pub fn setWriter(self: *Self, new_writer: *std.io.BufferedWriter) void {
self.bit_writer.setWriter(new_writer);
}
fn writeCode(self: *Self, c: hc.HuffCode) anyerror!void {
fn writeCode(self: *Self, c: hc.HuffCode) std.io.Writer.Error!void {
try self.bit_writer.writeBits(c.code, c.len);
}
@ -232,7 +232,7 @@ fn dynamicHeader(
num_distances: u32,
num_codegens: u32,
eof: bool,
) anyerror!void {
) std.io.Writer.Error!void {
const first_bits: u32 = if (eof) 5 else 4;
try self.bit_writer.writeBits(first_bits, 3);
try self.bit_writer.writeBits(num_literals - 257, 5);
@ -272,7 +272,7 @@ fn dynamicHeader(
}
}
fn storedHeader(self: *Self, length: usize, eof: bool) anyerror!void {
fn storedHeader(self: *Self, length: usize, eof: bool) std.io.Writer.Error!void {
assert(length <= 65535);
const flag: u32 = if (eof) 1 else 0;
try self.bit_writer.writeBits(flag, 3);
@ -282,7 +282,7 @@ fn storedHeader(self: *Self, length: usize, eof: bool) anyerror!void {
try self.bit_writer.writeBits(~l, 16);
}
fn fixedHeader(self: *Self, eof: bool) anyerror!void {
fn fixedHeader(self: *Self, eof: bool) std.io.Writer.Error!void {
// Indicate that we are a fixed Huffman block
var value: u32 = 2;
if (eof) {
@ -296,7 +296,7 @@ fn fixedHeader(self: *Self, eof: bool) anyerror!void {
// is larger than the original bytes, the data will be written as a
// stored block.
// If the input is null, the tokens will always be Huffman encoded.
pub fn write(self: *Self, tokens: []const Token, eof: bool, input: ?[]const u8) anyerror!void {
pub fn write(self: *Self, tokens: []const Token, eof: bool, input: ?[]const u8) std.io.Writer.Error!void {
const lit_and_dist = self.indexTokens(tokens);
const num_literals = lit_and_dist.num_literals;
const num_distances = lit_and_dist.num_distances;
@ -374,7 +374,7 @@ pub fn write(self: *Self, tokens: []const Token, eof: bool, input: ?[]const u8)
try self.writeTokens(tokens, &literal_encoding.codes, &distance_encoding.codes);
}
pub fn storedBlock(self: *Self, input: []const u8, eof: bool) anyerror!void {
pub fn storedBlock(self: *Self, input: []const u8, eof: bool) std.io.Writer.Error!void {
try self.storedHeader(input.len, eof);
try self.bit_writer.writeBytes(input);
}
@ -389,7 +389,7 @@ fn dynamicBlock(
tokens: []const Token,
eof: bool,
input: ?[]const u8,
) anyerror!void {
) std.io.Writer.Error!void {
const total_tokens = self.indexTokens(tokens);
const num_literals = total_tokens.num_literals;
const num_distances = total_tokens.num_distances;
@ -486,7 +486,7 @@ fn writeTokens(
tokens: []const Token,
le_codes: []hc.HuffCode,
oe_codes: []hc.HuffCode,
) anyerror!void {
) std.io.Writer.Error!void {
for (tokens) |t| {
if (t.kind == Token.Kind.literal) {
try self.writeCode(le_codes[t.literal()]);
@ -513,7 +513,7 @@ fn writeTokens(
// Encodes a block of bytes as either Huffman encoded literals or uncompressed bytes
// if the results only gains very little from compression.
pub fn huffmanBlock(self: *Self, input: []const u8, eof: bool) anyerror!void {
pub fn huffmanBlock(self: *Self, input: []const u8, eof: bool) std.io.Writer.Error!void {
// Add everything as literals
histogram(input, &self.literal_freq);

View File

@ -66,6 +66,8 @@ pub fn Inflate(comptime container: Container, comptime Lookahead: type) type {
block_type: u2 = 0b11,
state: ReadState = .protocol_header,
read_err: Error!void = {},
const ReadState = enum {
protocol_header,
block_header,
@ -76,19 +78,21 @@ pub fn Inflate(comptime container: Container, comptime Lookahead: type) type {
const Self = @This();
pub const Error = anyerror || Container.Error || hfd.Error || error{
pub const Error = Container.Error || hfd.Error || error{
InvalidCode,
InvalidMatch,
InvalidBlockType,
WrongStoredBlockNlen,
InvalidDynamicBlockHeader,
EndOfStream,
ReadFailed,
};
pub fn init(bw: *std.io.BufferedReader) Self {
return .{ .bits = LookaheadBitReader.init(bw) };
}
fn blockHeader(self: *Self) anyerror!void {
fn blockHeader(self: *Self) Error!void {
self.bfinal = try self.bits.read(u1);
self.block_type = try self.bits.read(u2);
}
@ -326,7 +330,7 @@ pub fn Inflate(comptime container: Container, comptime Lookahead: type) type {
/// returned bytes means end of stream reached. With limit=0 returns as
/// much data it can. It newer will be more than 65536 bytes, which is
/// size of internal buffer.
/// TODO merge this logic into reader_streamRead and reader_streamReadVec
/// TODO merge this logic into readerRead and readerReadVec
pub fn get(self: *Self, limit: usize) Error![]const u8 {
while (true) {
const out = self.hist.readAtMost(limit);
@ -339,42 +343,63 @@ pub fn Inflate(comptime container: Container, comptime Lookahead: type) type {
}
}
fn reader_streamRead(
ctx: ?*anyopaque,
fn readerRead(
context: ?*anyopaque,
bw: *std.io.BufferedWriter,
limit: std.io.Reader.Limit,
) anyerror!std.io.Reader.Status {
const self: *Self = @alignCast(@ptrCast(ctx));
) std.io.Reader.RwError!usize {
const self: *Self = @alignCast(@ptrCast(context));
const out = try bw.writableSlice(1);
const in = try self.get(limit.min(out.len));
const in = self.get(limit.min(out.len)) catch |err| switch (err) {
error.EndOfStream => return error.EndOfStream,
error.ReadFailed => return error.ReadFailed,
else => |e| {
self.read_err = e;
return error.ReadFailed;
},
};
if (in.len == 0) return error.EndOfStream;
@memcpy(out[0..in.len], in);
bw.advance(in.len);
return .{ .len = in.len, .end = in.len == 0 };
return in.len;
}
fn reader_streamReadVec(ctx: ?*anyopaque, data: []const []u8) anyerror!std.io.Reader.Status {
const self: *Self = @alignCast(@ptrCast(ctx));
fn readerReadVec(context: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
const self: *Self = @alignCast(@ptrCast(context));
return readVec(self, data) catch |err| switch (err) {
error.EndOfStream => return error.EndOfStream,
error.ReadFailed => return error.ReadFailed,
else => |e| {
self.read_err = e;
return error.ReadFailed;
},
};
}
fn readerDiscard(context: ?*anyopaque, limit: std.io.Reader.Limit) std.io.Reader.Error!usize {
_ = context;
_ = limit;
@panic("TODO");
}
pub fn readVec(self: *Self, data: []const []u8) Error!usize {
for (data) |out| {
if (out.len == 0) continue;
const in = try self.get(out.len);
@memcpy(out[0..in.len], in);
return .{ .len = @intCast(in.len), .end = in.len == 0 };
if (in.len == 0) return error.EndOfStream;
return in.len;
}
return .{};
}
pub fn streamReadVec(self: *Self, data: []const []u8) anyerror!std.io.Reader.Status {
return reader_streamReadVec(self, data);
return 0;
}
pub fn reader(self: *Self) std.io.Reader {
return .{
.context = self,
.vtable = &.{
.posRead = null,
.posReadVec = null,
.streamRead = reader_streamRead,
.streamReadVec = reader_streamReadVec,
.read = readerRead,
.readVec = readerReadVec,
.discard = readerDiscard,
},
};
}
@ -656,7 +681,7 @@ pub fn BitReader(comptime T: type) type {
(self.nbits >> 3); // 0 for 0-7, 1 for 8-16, ... same as / 8
var buf: [t_bytes]u8 = [_]u8{0} ** t_bytes;
const bytes_read = self.forward_reader.partialRead(buf[0..empty_bytes]) catch 0;
const bytes_read = self.forward_reader.readShort(buf[0..empty_bytes]) catch 0;
if (bytes_read > 0) {
const u: T = std.mem.readInt(T, buf[0..t_bytes], .little);
self.bits |= u << @as(Tshift, @intCast(self.nbits));
@ -669,7 +694,7 @@ pub fn BitReader(comptime T: type) type {
}
/// Read exactly buf.len bytes into buf.
pub fn readAll(self: *Self, buf: []u8) anyerror!void {
pub fn readAll(self: *Self, buf: []u8) std.io.Reader.Error!void {
assert(self.alignBits() == 0); // internal bits must be at byte boundary
// First read from internal bits buffer.

View File

@ -11,7 +11,7 @@ pub const RangeDecoder = struct {
range: u32,
code: u32,
pub fn init(rd: *RangeDecoder, br: *std.io.BufferedReader) anyerror!usize {
pub fn init(rd: *RangeDecoder, br: *std.io.BufferedReader) std.io.Reader.Error!usize {
const reserved = try br.takeByte();
if (reserved != 0) return error.CorruptInput;
rd.* = .{
@ -222,7 +222,7 @@ pub const Decode = struct {
dict_size: u32,
unpacked_size: ?u64,
pub fn readHeader(br: *std.io.BufferedReader, options: Options) anyerror!Params {
pub fn readHeader(br: *std.io.BufferedReader, options: Options) std.io.Reader.Error!Params {
var props = try br.readByte();
if (props >= 225) {
return error.CorruptInput;
@ -537,7 +537,7 @@ pub const Decode = struct {
pub const Decompress = struct {
pub const Error =
anyerror ||
std.io.Reader.Error ||
Allocator.Error ||
error{ CorruptInput, EndOfStream, Overflow };
@ -668,7 +668,7 @@ const LzCircularBuffer = struct {
allocator: Allocator,
lit: u8,
bw: *std.io.BufferedWriter,
) anyerror!void {
) std.io.Writer.Error!void {
try self.set(allocator, self.cursor, lit);
self.cursor += 1;
self.len += 1;
@ -687,7 +687,7 @@ const LzCircularBuffer = struct {
len: usize,
dist: usize,
bw: *std.io.BufferedWriter,
) anyerror!void {
) std.io.Writer.Error!void {
if (dist > self.dict_size or dist > self.len) {
return error.CorruptInput;
}
@ -704,7 +704,7 @@ const LzCircularBuffer = struct {
}
}
pub fn finish(self: *Self, bw: *std.io.BufferedWriter) anyerror!void {
pub fn finish(self: *Self, bw: *std.io.BufferedWriter) std.io.Writer.Error!void {
if (self.cursor > 0) {
try bw.writeAll(self.buf.items[0..self.cursor]);
self.cursor = 0;

View File

@ -2,7 +2,7 @@ const std = @import("../std.zig");
const Allocator = std.mem.Allocator;
const lzma = std.compress.lzma;
pub fn decompress(gpa: Allocator, reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) anyerror!void {
pub fn decompress(gpa: Allocator, reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) std.io.Reader.RwError!void {
var decoder = try Decode.init(gpa);
defer decoder.deinit(gpa);
return decoder.decompress(gpa, reader, writer);

View File

@ -39,7 +39,7 @@ pub const Decompressor = struct {
write_index: usize = 0,
};
pub const Error = anyerror || error{
pub const Error = std.io.Reader.Error || error{
ChecksumFailure,
DictionaryIdFlagUnsupported,
MalformedBlock,

View File

@ -69,28 +69,15 @@ application_cipher: tls.ApplicationCipher,
/// this connection.
ssl_key_log: ?*SslKeyLog,
pub const Diagnostics = union {
/// Populated on `error.WriteFailure` and `error.ReadFailure`.
err: anyerror,
pub const Diagnostics = union(enum) {
/// Any `ReadFailure` and `WriteFailure` was due to `input` or `output`
/// returning the error, respectively.
transitive,
/// Populated on `error.TlsAlert`.
///
/// If this isn't a error alert, then it's a closure alert, which makes
/// no sense in a handshake.
alert: tls.AlertDescription,
fn wrapWrite(d: *Diagnostics, returned: anyerror!void) error{WriteFailure}!void {
returned catch |err| {
d.* = .{ .err = err };
return error.WriteFailure;
};
}
fn wrapRead(d: *Diagnostics, returned: anyerror!void) error{ReadFailure}!void {
returned catch |err| {
d.* = .{ .err = err };
return error.ReadFailure;
};
}
};
pub const SslKeyLog = struct {
@ -205,7 +192,7 @@ pub fn init(
) InitError!void {
assert(input.storage.buffer.len >= min_buffer_len);
assert(output.buffer.len >= min_buffer_len);
const diags = &client.diagnostics;
client.diagnostics = .transient;
const host = switch (options.host) {
.no_verification => "",
.explicit => |host| host,
@ -298,7 +285,7 @@ pub fn init(
{
var iovecs: [2][]const u8 = .{ cleartext_header, host };
try diags.wrapWrite(output.writevAll(iovecs[0..if (host.len == 0) 1 else 2]));
try output.writevAll(iovecs[0..if (host.len == 0) 1 else 2]);
}
var tls_version: tls.ProtocolVersion = undefined;
@ -350,12 +337,12 @@ pub fn init(
var handshake_buffer: [tls.max_ciphertext_record_len]u8 = undefined;
var d: tls.Decoder = .{ .buf = &handshake_buffer };
fragment: while (true) {
try diags.wrapRead(d.readAtLeastOurAmt(input, tls.record_header_len));
try d.readAtLeastOurAmt(input, tls.record_header_len);
const record_header = d.buf[d.idx..][0..tls.record_header_len];
const record_ct = d.decode(tls.ContentType);
d.skip(2); // legacy_version
const record_len = d.decode(u16);
try diags.wrapRead(d.readAtLeast(input, record_len));
try d.readAtLeast(input, record_len);
var record_decoder = try d.sub(record_len);
var ctd, const ct = content: switch (cipher_state) {
.cleartext => .{ record_decoder, record_ct },
@ -433,7 +420,7 @@ pub fn init(
const level = ctd.decode(tls.AlertLevel);
const desc = ctd.decode(tls.AlertDescription);
_ = level;
diags.* = .{ .alert = desc };
client.diagnostics = .{ .alert = desc };
return error.TlsAlert;
},
.change_cipher_spec => {
@ -775,7 +762,7 @@ pub fn init(
&client_change_cipher_spec_msg,
&client_verify_msg,
};
try diags.wrapWrite(output.writevAll(&all_msgs_vec));
try output.writevAll(&all_msgs_vec);
},
}
write_seq += 1;
@ -840,7 +827,7 @@ pub fn init(
&client_change_cipher_spec_msg,
&finished_msg,
};
try diags.wrapWrite(output.writevAll(&all_msgs_vec));
try output.writevAll(&all_msgs_vec);
const client_secret = hkdfExpandLabel(P.Hkdf, pv.master_secret, "c ap traffic", &handshake_hash, P.Hash.digest_length);
const server_secret = hkdfExpandLabel(P.Hkdf, pv.master_secret, "s ap traffic", &handshake_hash, P.Hash.digest_length);
@ -905,8 +892,9 @@ pub fn init(
client.reader.init(.{
.context = client,
.vtable = &.{
.read = reader_read,
.readv = reader_readv,
.read = read,
.readVec = readVec,
.discard = discard,
},
}, input.storage.buffer[0..0]);
return;
@ -933,7 +921,7 @@ pub fn writer(c: *Client) std.io.Writer {
};
}
fn writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
fn writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
const c: *Client = @alignCast(@ptrCast(context));
const sliced_data = if (splat == 0) data[0..data.len -| 1] else data;
const output = &c.output;
@ -953,7 +941,7 @@ fn writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyer
/// Sends a `close_notify` alert, which is necessary for the server to
/// distinguish between a properly finished TLS session, or a truncation
/// attack.
pub fn end(c: *Client) anyerror!void {
pub fn end(c: *Client) std.io.Writer.Error!void {
const output = &c.output;
const ciphertext_buf = try output.writableSlice(min_buffer_len);
const prepared = prepareCiphertextRecord(c, ciphertext_buf, &tls.close_notify_alert, .alert);
@ -1070,18 +1058,18 @@ pub fn eof(c: Client) bool {
c.partial_ciphertext_idx >= c.partial_ciphertext_end;
}
fn reader_read(
fn read(
context: ?*anyopaque,
bw: *std.io.BufferedWriter,
limit: std.io.Reader.Limit,
) anyerror!std.io.Reader.Status {
) std.io.Reader.RwError!std.io.Reader.Status {
const buf = limit.slice(try bw.writableSlice(1));
const status = try reader_readv(context, &.{buf});
const status = try readVec(context, &.{buf});
bw.advance(status.len);
return status;
}
fn reader_readv(context: ?*anyopaque, data: []const []u8) anyerror!std.io.Reader.Status {
fn readVec(context: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
const c: *Client = @ptrCast(@alignCast(context));
if (c.eof()) return .{ .end = true };
@ -1429,6 +1417,12 @@ fn reader_readv(context: ?*anyopaque, data: []const []u8) anyerror!std.io.Reader
}
}
fn discard(context: ?*anyopaque, limit: std.io.Reader.Limit) std.io.Reader.Error!usize {
_ = context;
_ = limit;
@panic("TODO");
}
fn logSecrets(key_log_file: std.fs.File, context: anytype, secrets: anytype) void {
const locked = if (key_log_file.lock(.exclusive)) |_| true else |_| false;
defer if (locked) key_log_file.unlock();

View File

@ -210,16 +210,19 @@ pub fn unlockStdErr() void {
///
/// Returns a `std.io.BufferedWriter` with empty buffer, meaning that it is
/// in fact unbuffered and does not need to be flushed.
pub fn lockStdErr2(buffer: []u8) std.io.BufferedWriter {
std.Progress.lockStdErr();
return std.fs.File.stderr().writer().buffered(buffer);
pub fn lockStderrWriter(buffer: []u8) *std.io.BufferedWriter {
return std.Progress.lockStderrWriter(buffer);
}
pub fn unlockStderrWriter() void {
std.Progress.unlockStderrWriter();
}
/// Print to stderr, unbuffered, and silently returning on failure. Intended
/// for use in "printf debugging." Use `std.log` functions for proper logging.
/// for use in "printf debugging". Use `std.log` functions for proper logging.
pub fn print(comptime fmt: []const u8, args: anytype) void {
var bw = lockStdErr2(&.{});
defer unlockStdErr();
const bw = lockStderrWriter(&.{});
defer unlockStderrWriter();
nosuspend bw.print(fmt, args) catch return;
}
@ -242,10 +245,10 @@ pub fn getSelfDebugInfo() !*SelfInfo {
/// Tries to print a hexadecimal view of the bytes, unbuffered, and ignores any error returned.
/// Obtains the stderr mutex while dumping.
pub fn dumpHex(bytes: []const u8) void {
var bw = lockStdErr2(&.{});
defer unlockStdErr();
const bw = lockStderrWriter(&.{});
defer unlockStderrWriter();
const ttyconf = std.io.tty.detectConfig(.stderr());
dumpHexFallible(&bw, ttyconf, bytes) catch {};
dumpHexFallible(bw, ttyconf, bytes) catch {};
}
/// Prints a hexadecimal view of the bytes, returning any error that occurs.
@ -320,9 +323,9 @@ test dumpHexFallible {
/// Tries to print the current stack trace to stderr, unbuffered, and ignores any error returned.
pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
var stderr = lockStdErr2(&.{});
defer unlockStdErr();
nosuspend dumpCurrentStackTraceToWriter(start_addr, &stderr) catch return;
const stderr = lockStderrWriter(&.{});
defer unlockStderrWriter();
nosuspend dumpCurrentStackTraceToWriter(start_addr, stderr) catch return;
}
/// Prints the current stack trace to the provided writer.
@ -516,14 +519,14 @@ pub fn dumpStackTrace(stack_trace: std.builtin.StackTrace) void {
nosuspend {
if (builtin.target.cpu.arch.isWasm()) {
if (native_os == .wasi) {
var stderr = lockStdErr2(&.{});
defer unlockStdErr();
const stderr = lockStderrWriter(&.{});
defer unlockStderrWriter();
stderr.writeAll("Unable to dump stack trace: not implemented for Wasm\n") catch return;
}
return;
}
var stderr = lockStdErr2(&.{});
defer unlockStdErr();
const stderr = lockStderrWriter(&.{});
defer unlockStderrWriter();
if (builtin.strip_debug_info) {
stderr.writeAll("Unable to dump stack trace: debug info stripped\n") catch return;
return;
@ -532,7 +535,7 @@ pub fn dumpStackTrace(stack_trace: std.builtin.StackTrace) void {
stderr.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}) catch return;
return;
};
writeStackTrace(stack_trace, &stderr, debug_info, io.tty.detectConfig(.stderr())) catch |err| {
writeStackTrace(stack_trace, stderr, debug_info, io.tty.detectConfig(.stderr())) catch |err| {
stderr.print("Unable to dump stack trace: {s}\n", .{@errorName(err)}) catch return;
return;
};
@ -683,8 +686,8 @@ pub fn defaultPanic(
_ = panicking.fetchAdd(1, .seq_cst);
{
var stderr = lockStdErr2(&.{});
defer unlockStdErr();
const stderr = lockStderrWriter(&.{});
defer unlockStderrWriter();
if (builtin.single_threaded) {
stderr.print("panic: ", .{}) catch posix.abort();
@ -695,7 +698,7 @@ pub fn defaultPanic(
stderr.print("{s}\n", .{msg}) catch posix.abort();
if (@errorReturnTrace()) |t| dumpStackTrace(t.*);
dumpCurrentStackTraceToWriter(first_trace_addr orelse @returnAddress(), &stderr) catch {};
dumpCurrentStackTraceToWriter(first_trace_addr orelse @returnAddress(), stderr) catch {};
}
waitForOtherThreadToFinishPanicking();
@ -1468,8 +1471,8 @@ fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopa
}
fn dumpSegfaultInfoPosix(sig: i32, code: i32, addr: usize, ctx_ptr: ?*anyopaque) void {
var stderr = lockStdErr2(&.{});
defer unlockStdErr();
const stderr = lockStderrWriter(&.{});
defer unlockStderrWriter();
_ = switch (sig) {
posix.SIG.SEGV => if (native_arch == .x86_64 and native_os == .linux and code == 128) // SI_KERNEL
// x86_64 doesn't have a full 64-bit virtual address space.
@ -1517,7 +1520,7 @@ fn dumpSegfaultInfoPosix(sig: i32, code: i32, addr: usize, ctx_ptr: ?*anyopaque)
}, @ptrCast(ctx)).__mcontext_data;
}
relocateContext(&new_ctx);
dumpStackTraceFromBase(&new_ctx, &stderr);
dumpStackTraceFromBase(&new_ctx, stderr);
},
else => {},
}
@ -1547,10 +1550,10 @@ fn handleSegfaultWindowsExtra(info: *windows.EXCEPTION_POINTERS, msg: u8, label:
_ = panicking.fetchAdd(1, .seq_cst);
{
var stderr = lockStdErr2(&.{});
defer unlockStdErr();
const stderr = lockStderrWriter(&.{});
defer unlockStderrWriter();
dumpSegfaultInfoWindows(info, msg, label, &stderr);
dumpSegfaultInfoWindows(info, msg, label, stderr);
}
waitForOtherThreadToFinishPanicking();
@ -1665,8 +1668,8 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize
if (!enabled) return;
const tty_config = io.tty.detectConfig(.stderr());
var stderr = lockStdErr2(&.{});
defer unlockStdErr();
const stderr = lockStderrWriter(&.{});
defer unlockStderrWriter();
const end = @min(t.index, size);
const debug_info = getSelfDebugInfo() catch |err| {
stderr.print(
@ -1683,7 +1686,7 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize
.index = frames.len,
.instruction_addresses = frames,
};
writeStackTrace(stack_trace, &stderr, debug_info, tty_config) catch continue;
writeStackTrace(stack_trace, stderr, debug_info, tty_config) catch continue;
}
if (t.index > end) {
stderr.print("{d} more traces not shown; consider increasing trace size\n", .{

View File

@ -2212,7 +2212,7 @@ pub const ElfModule = struct {
var separate_debug_filename: ?[]const u8 = null;
var separate_debug_crc: ?u32 = null;
shdrs: for (shdrs) |*shdr| {
for (shdrs) |*shdr| {
if (shdr.sh_type == elf.SHT_NULL or shdr.sh_type == elf.SHT_NOBITS) continue;
const name = mem.sliceTo(header_strings[shdr.sh_name..], 0);
@ -2243,24 +2243,12 @@ pub const ElfModule = struct {
var zlib_stream: std.compress.zlib.Decompressor = .init(&section_reader);
const decompressed_section = try gpa.alloc(u8, ch_size);
errdefer gpa.free(decompressed_section);
{
var i: usize = 0;
while (true) {
const status = zlib_stream.streamReadVec(&.{decompressed_section[i..]}) catch {
gpa.free(decompressed_section);
continue :shdrs;
};
i += status.len;
if (i == decompressed_section.len) break;
if (status.end) {
gpa.free(decompressed_section);
continue :shdrs;
}
}
const decompressed_section = zlib_stream.reader().readAlloc(gpa, ch_size) catch continue;
if (decompressed_section.len != ch_size) {
gpa.free(decompressed_section);
continue;
}
errdefer gpa.free(decompressed_section);
break :blk .{
.data = decompressed_section,

View File

@ -62,7 +62,7 @@ pub const Error = error{
InvalidTypeLength,
TruncatedIntegralType,
} || abi.RegBytesError || error{ EndOfStream, Overflow, OutOfMemory, DivisionByZero };
} || abi.RegBytesError || error{ EndOfStream, Overflow, OutOfMemory, DivisionByZero, ReadFailed };
/// A stack machine that can decode and run DWARF expressions.
/// Expressions can be decoded for non-native address size and endianness,
@ -259,7 +259,7 @@ pub fn StackMachine(comptime options: Options) type {
allocator: std.mem.Allocator,
context: Context,
initial_value: ?usize,
) anyerror!?Value {
) Error!?Value {
if (initial_value) |i| try self.stack.append(allocator, .{ .generic = i });
var reader: std.io.BufferedReader = undefined;
reader.initFixed(expression);
@ -274,13 +274,13 @@ pub fn StackMachine(comptime options: Options) type {
reader: *std.io.BufferedReader,
allocator: std.mem.Allocator,
context: Context,
) anyerror!bool {
) Error!bool {
if (@sizeOf(usize) != @sizeOf(Address) or options.endian != native_endian)
@compileError("Execution of non-native address sizes / endianness is not supported");
const opcode = reader.takeByte() catch |err| switch (err) {
error.EndOfStream => return false,
else => |e| return @errorCast(e),
error.ReadFailed => return error.ReadFailed,
};
if (options.call_frame_context and !isOpcodeValidInCFA(opcode)) return error.InvalidCFAOpcode;
const operand = try readOperand(reader, opcode, context);

View File

@ -238,23 +238,30 @@ pub fn LinearFifo(
return .{
.context = self,
.vtable = &.{
.read = &reader_read,
.readv = &reader_readv,
.read = &readerRead,
.readVec = &readerReadVec,
.discard = &readerDiscard,
},
};
}
fn reader_read(
fn readerRead(
ctx: ?*anyopaque,
bw: *std.io.BufferedWriter,
limit: std.io.Reader.Limit,
) anyerror!std.io.Reader.Status {
) std.io.Reader.RwError!usize {
const fifo: *Self = @alignCast(@ptrCast(ctx));
_ = fifo;
_ = bw;
_ = limit;
@panic("TODO");
}
fn reader_readv(ctx: ?*anyopaque, data: []const []u8) anyerror!std.io.Reader.Status {
fn readerReadVec(ctx: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
const fifo: *Self = @alignCast(@ptrCast(ctx));
_ = fifo;
_ = data;
@panic("TODO");
}
fn readerDiscard(ctx: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
const fifo: *Self = @alignCast(@ptrCast(ctx));
_ = fifo;
_ = data;
@ -351,26 +358,26 @@ pub fn LinearFifo(
return .{
.context = fifo,
.vtable = &.{
.writeSplat = writer_writeSplat,
.writeFile = writer_writeFile,
.writeSplat = writerWriteSplat,
.writeFile = writerWriteFile,
},
};
}
fn writer_writeSplat(ctx: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
fn writerWriteSplat(ctx: ?*anyopaque, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
const fifo: *Self = @alignCast(@ptrCast(ctx));
_ = fifo;
_ = data;
_ = splat;
@panic("TODO");
}
fn writer_writeFile(
fn writerWriteFile(
ctx: ?*anyopaque,
file: std.fs.File,
offset: std.io.Writer.Offset,
limit: std.io.Writer.Limit,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
) std.io.Writer.Error!usize {
const fifo: *Self = @alignCast(@ptrCast(ctx));
_ = fifo;
_ = file;

View File

@ -1,8 +1,8 @@
//! String formatting and parsing.
const std = @import("std.zig");
const builtin = @import("builtin");
const std = @import("std.zig");
const io = std.io;
const math = std.math;
const assert = std.debug.assert;
@ -12,6 +12,7 @@ const meta = std.meta;
const lossyCast = math.lossyCast;
const expectFmt = std.testing.expectFmt;
const testing = std.testing;
const Allocator = std.mem.Allocator;
pub const float = @import("fmt/float.zig");
@ -91,7 +92,7 @@ pub const Options = struct {
/// A user type may be a `struct`, `vector`, `union` or `enum` type.
///
/// To print literal curly braces, escape them by writing them twice, e.g. `{{` or `}}`.
pub fn format(bw: *std.io.BufferedWriter, comptime fmt: []const u8, args: anytype) anyerror!void {
pub fn format(bw: *std.io.BufferedWriter, comptime fmt: []const u8, args: anytype) std.io.Writer.Error!void {
const ArgsType = @TypeOf(args);
const args_type_info = @typeInfo(ArgsType);
if (args_type_info != .@"struct") {
@ -531,7 +532,7 @@ pub fn Formatter(comptime formatFn: anytype) type {
const Data = @typeInfo(@TypeOf(formatFn)).@"fn".params[0].type.?;
return struct {
data: Data,
pub fn format(self: @This(), writer: *std.io.BufferedWriter, comptime fmt: []const u8) anyerror!void {
pub fn format(self: @This(), writer: *std.io.BufferedWriter, comptime fmt: []const u8) std.io.Writer.Error!void {
try formatFn(self.data, writer, fmt);
}
};
@ -833,8 +834,7 @@ pub fn bufPrint(buf: []u8, comptime fmt: []const u8, args: anytype) BufPrintErro
var bw: std.io.BufferedWriter = undefined;
bw.initFixed(buf);
bw.print(fmt, args) catch |err| switch (err) {
error.NoSpaceLeft => return error.NoSpaceLeft,
else => unreachable,
error.WriteFailed => return error.NoSpaceLeft,
};
return bw.getWritten();
}
@ -846,25 +846,34 @@ pub fn bufPrintZ(buf: []u8, comptime fmt: []const u8, args: anytype) BufPrintErr
/// Count the characters needed for format.
pub fn count(comptime fmt: []const u8, args: anytype) usize {
var buffer: [std.atomic.cache_line]u8 = undefined;
var bw = std.io.Writer.null.buffered(&buffer);
var trash_buffer: [std.atomic.cache_line]u8 = undefined;
var null_writer: std.io.Writer.Null = undefined;
var bw = null_writer.writer().buffered(&trash_buffer);
bw.print(fmt, args) catch unreachable;
return bw.count;
}
pub const AllocPrintError = error{OutOfMemory};
pub fn allocPrint(allocator: mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![]u8 {
const size = math.cast(usize, count(fmt, args)) orelse return error.OutOfMemory;
const buf = try allocator.alloc(u8, size);
return bufPrint(buf, fmt, args) catch |err| switch (err) {
error.NoSpaceLeft => unreachable, // we just counted the size above
pub fn allocPrint(gpa: Allocator, comptime fmt: []const u8, args: anytype) Allocator.Error![]u8 {
var aw: std.io.AllocatingWriter = undefined;
try aw.initCapacity(gpa, fmt.len);
aw.buffered_writer.print(fmt, args) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
return aw.toOwnedSlice();
}
pub fn allocPrintZ(allocator: mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![:0]u8 {
const result = try allocPrint(allocator, fmt ++ "\x00", args);
return result[0 .. result.len - 1 :0];
pub fn allocPrintSentinel(
gpa: Allocator,
comptime fmt: []const u8,
args: anytype,
comptime sentinel: u8,
) Allocator.Error![:sentinel]u8 {
var aw: std.io.AllocatingWriter = undefined;
try aw.initCapacity(gpa, fmt.len);
aw.buffered_writer.print(fmt, args) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
return aw.toOwnedSliceSentinel(sentinel);
}
pub inline fn comptimePrint(comptime fmt: []const u8, args: anytype) *const [count(fmt, args):0]u8 {

View File

@ -2619,10 +2619,13 @@ pub fn updateFile(
var atomic_file = try dest_dir.atomicFile(dest_path, .{ .mode = actual_mode });
defer atomic_file.deinit();
try atomic_file.file.writeFileAll(src_file, .{ .in_len = src_stat.size });
try atomic_file.file.writeFileAll(src_file, .{
.offset = .zero,
.limit = .limited(src_stat.size),
});
try atomic_file.file.updateTimes(src_stat.atime, src_stat.mtime);
try atomic_file.finish();
return PrevStatus.stale;
return .stale;
}
pub const CopyFileError = File.OpenError || File.StatError ||
@ -2833,15 +2836,6 @@ pub fn setPermissions(self: Dir, permissions: Permissions) SetPermissionsError!v
try file.setPermissions(permissions);
}
const Metadata = File.Metadata;
pub const MetadataError = File.MetadataError;
/// Returns a `Metadata` struct, representing the permissions on the directory
pub fn metadata(self: Dir) MetadataError!Metadata {
const file: File = .{ .handle = self.fd };
return try file.metadata();
}
const Dir = @This();
const builtin = @import("builtin");
const std = @import("../std.zig");

File diff suppressed because it is too large Load Diff

View File

@ -1953,113 +1953,6 @@ test "chown" {
try dir.chown(null, null);
}
test "File.Metadata" {
var tmp = tmpDir(.{});
defer tmp.cleanup();
const file = try tmp.dir.createFile("test_file", .{ .read = true });
defer file.close();
const metadata = try file.metadata();
try testing.expectEqual(File.Kind.file, metadata.kind());
try testing.expectEqual(@as(u64, 0), metadata.size());
_ = metadata.accessed();
_ = metadata.modified();
_ = metadata.created();
}
test "File.Permissions" {
if (native_os == .wasi)
return error.SkipZigTest;
var tmp = tmpDir(.{});
defer tmp.cleanup();
const file = try tmp.dir.createFile("test_file", .{ .read = true });
defer file.close();
const metadata = try file.metadata();
var permissions = metadata.permissions();
try testing.expect(!permissions.readOnly());
permissions.setReadOnly(true);
try testing.expect(permissions.readOnly());
try file.setPermissions(permissions);
const new_permissions = (try file.metadata()).permissions();
try testing.expect(new_permissions.readOnly());
// Must be set to non-read-only to delete
permissions.setReadOnly(false);
try file.setPermissions(permissions);
}
test "File.PermissionsUnix" {
if (native_os == .windows or native_os == .wasi)
return error.SkipZigTest;
var tmp = tmpDir(.{});
defer tmp.cleanup();
const file = try tmp.dir.createFile("test_file", .{ .mode = 0o666, .read = true });
defer file.close();
const metadata = try file.metadata();
var permissions = metadata.permissions();
permissions.setReadOnly(true);
try testing.expect(permissions.readOnly());
try testing.expect(!permissions.inner.unixHas(.user, .write));
permissions.inner.unixSet(.user, .{ .write = true });
try testing.expect(!permissions.readOnly());
try testing.expect(permissions.inner.unixHas(.user, .write));
try testing.expect(permissions.inner.mode & 0o400 != 0);
permissions.setReadOnly(true);
try file.setPermissions(permissions);
permissions = (try file.metadata()).permissions();
try testing.expect(permissions.readOnly());
// Must be set to non-read-only to delete
permissions.setReadOnly(false);
try file.setPermissions(permissions);
const permissions_unix = File.PermissionsUnix.unixNew(0o754);
try testing.expect(permissions_unix.unixHas(.user, .execute));
try testing.expect(!permissions_unix.unixHas(.other, .execute));
}
test "delete a read-only file on windows" {
if (native_os != .windows)
return error.SkipZigTest;
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
const file = try tmp.dir.createFile("test_file", .{ .read = true });
defer file.close();
// Create a file and make it read-only
const metadata = try file.metadata();
var permissions = metadata.permissions();
permissions.setReadOnly(true);
try file.setPermissions(permissions);
// If the OS and filesystem support it, POSIX_SEMANTICS and IGNORE_READONLY_ATTRIBUTE
// is used meaning that the deletion of a read-only file will succeed.
// Otherwise, this delete will fail and the read-only flag must be unset before it's
// able to be deleted.
const delete_result = tmp.dir.deleteFile("test_file");
if (delete_result) {
try testing.expectError(error.FileNotFound, tmp.dir.deleteFile("test_file"));
} else |err| {
try testing.expectEqual(@as(anyerror, error.AccessDenied), err);
// Now make the file not read-only
permissions.setReadOnly(false);
try file.setPermissions(permissions);
try tmp.dir.deleteFile("test_file");
}
}
test "delete a setAsCwd directory on Windows" {
if (native_os != .windows) return error.SkipZigTest;

View File

@ -386,7 +386,7 @@ pub const Connection = struct {
}
}
pub fn flush(c: *Connection) anyerror!void {
pub fn flush(c: *Connection) std.io.Writer.Error!void {
try c.writer.flush();
if (c.protocol == .tls) {
if (disable_tls) unreachable;
@ -398,7 +398,7 @@ pub const Connection = struct {
/// If the connection is a TLS connection, sends the close_notify alert.
///
/// Flushes all buffers.
pub fn end(c: *Connection) anyerror!void {
pub fn end(c: *Connection) std.io.Writer.Error!void {
try c.writer.flush();
if (c.protocol == .tls) {
if (disable_tls) unreachable;
@ -826,7 +826,7 @@ pub const Request = struct {
}
/// Send the HTTP request headers to the server.
pub fn send(req: *Request) anyerror!void {
pub fn send(req: *Request) std.io.Writer.Error!void {
assert(req.transfer_encoding == .none or req.method.requestHasBody());
const connection = req.connection.?;
@ -959,7 +959,7 @@ pub const Request = struct {
/// TODO collapse each error set into its own meta error code, and store
/// the underlying error code as a field on Request
pub const WaitError = RequestError || anyerror || TransferReadError ||
pub const WaitError = RequestError || std.io.Writer.Error || TransferReadError ||
proto.HeadersParser.CheckCompleteHeadError || Response.ParseError ||
error{
TooManyHttpRedirects,
@ -1156,7 +1156,7 @@ pub const Request = struct {
};
}
fn chunked_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
fn chunked_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
const req: *Request = @ptrCast(@alignCast(context));
var total: usize = 0;
for (data) |bytes| total += bytes.len;
@ -1187,7 +1187,7 @@ pub const Request = struct {
len: std.io.Writer.FileLen,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
) std.io.Writer.Error!usize {
if (len == .entire_file) return error.Unimplemented;
const req: *Request = @ptrCast(@alignCast(context));
var total: usize = len.int();
@ -1213,7 +1213,7 @@ pub const Request = struct {
return total;
}
fn cl_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
fn cl_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
const req: *Request = @ptrCast(@alignCast(context));
const n = try req.connection.?.writer.writeSplat(data, splat);
req.transfer_encoding.content_length -= n;
@ -1227,7 +1227,7 @@ pub const Request = struct {
len: std.io.Writer.FileLen,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
) std.io.Writer.Error!usize {
const req: *Request = @ptrCast(@alignCast(context));
const n = try req.connection.?.writer.writeFile(file, offset, len, headers_and_trailers, headers_len);
req.transfer_encoding.content_length -= n;
@ -1236,7 +1236,7 @@ pub const Request = struct {
/// Finish the body of a request. This notifies the server that you have no more data to send.
/// Must be called after `send`.
pub fn finish(req: *Request) anyerror!void {
pub fn finish(req: *Request) std.io.Writer.Error!void {
switch (req.transfer_encoding) {
.chunked => try req.connection.?.writer.writeAll("0\r\n\r\n"),
.content_length => |len| assert(len == 0),
@ -1353,7 +1353,7 @@ pub const basic_authorization = struct {
return bw.getWritten();
}
pub fn write(uri: Uri, out: *std.io.BufferedWriter) anyerror!void {
pub fn write(uri: Uri, out: *std.io.BufferedWriter) std.io.Writer.Error!void {
var buf: [max_user_len + ":".len + max_password_len]u8 = undefined;
var bw: std.io.BufferedWriter = undefined;
bw.initFixed(&buf);
@ -1574,7 +1574,7 @@ pub fn connect(
/// TODO collapse each error set into its own meta error code, and store
/// the underlying error code as a field on Request
pub const RequestError = ConnectTcpError || ConnectErrorPartial || anyerror ||
pub const RequestError = ConnectTcpError || ConnectErrorPartial || std.io.Writer.Error ||
std.fmt.ParseIntError || Connection.WriteError ||
error{
UnsupportedUriScheme,

View File

@ -19,7 +19,7 @@ out: *std.io.BufferedWriter,
/// same connection, and makes invalid API usage cause assertion failures
/// rather than HTTP protocol violations.
state: State,
in_err: anyerror,
head_parse_err: Request.Head.ParseError,
pub const State = enum {
/// The connection is available to be used for the first time, or reused.
@ -53,8 +53,8 @@ pub const ReceiveHeadError = error{
/// The HTTP specification suggests to respond with a 431 status code
/// before closing the connection.
HttpHeadersOversize,
/// Client sent headers that did not conform to the HTTP protocol.
/// `in_err` is populated with a `Request.Head.ParseError`.
/// Client sent headers that did not conform to the HTTP protocol;
/// `head_parse_err` is populated.
HttpHeadersInvalid,
/// Partial HTTP request was received but the connection was closed before
/// fully receiving the headers.
@ -62,7 +62,7 @@ pub const ReceiveHeadError = error{
/// The client sent 0 bytes of headers before closing the stream.
/// In other words, a keep-alive connection was finally closed.
HttpConnectionClosing,
/// Error occurred reading from `in`; `in_err` is populated.
/// Transitive error occurred reading from `in`.
ReadFailure,
};
@ -79,23 +79,22 @@ pub fn receiveHead(s: *Server) ReceiveHeadError!Request {
while (true) {
if (head_end >= in.bufferContents().len) return error.HttpHeadersOversize;
const buf = (in.peekGreedy(head_end + 1) catch |err| {
s.in_err = err;
return error.ReadFailure;
}) orelse switch (head_end) {
0 => return error.HttpConnectionClosing,
else => return error.HttpRequestTruncated,
const buf = in.peekGreedy(head_end + 1) catch |err| switch (err) {
error.EndOfStream => switch (head_end) {
0 => return error.HttpConnectionClosing,
else => return error.HttpRequestTruncated,
},
error.ReadFailure => return error.ReadFailure,
};
head_end += hp.feed(buf[head_end..]);
if (hp.state == .finished) return .{
.server = s,
.head_end = head_end,
.head = Request.Head.parse(buf[0..head_end]) catch |err| {
s.in_err = err;
s.head_parse_err = err;
return error.HttpHeadersInvalid;
},
.reader_state = undefined,
.write_error = undefined,
};
}
}
@ -109,8 +108,6 @@ pub const Request = struct {
remaining_content_length: u64,
chunk_parser: http.ChunkParser,
},
/// Populated when `error.HttpContinueWriteFailed` is received.
write_error: anyerror,
pub const Compression = union(enum) {
deflate: std.compress.zlib.Decompressor,
@ -310,7 +307,6 @@ pub const Request = struct {
.head_end = request_bytes.len,
.head = undefined,
.reader_state = undefined,
.write_error = undefined,
};
var it = request.iterateHeaders();
@ -375,7 +371,7 @@ pub const Request = struct {
request: *Request,
content: []const u8,
options: RespondOptions,
) anyerror!void {
) std.io.Writer.Error!void {
const max_extra_headers = 25;
assert(options.status != .@"continue");
assert(options.extra_headers.len <= max_extra_headers);
@ -581,7 +577,7 @@ pub const Request = struct {
ctx: ?*anyopaque,
bw: *std.io.BufferedWriter,
limit: std.io.Reader.Limit,
) anyerror!std.io.Reader.Status {
) std.io.Reader.Error!std.io.Reader.Status {
const request: *Request = @alignCast(@ptrCast(ctx));
_ = request;
_ = bw;
@ -589,7 +585,7 @@ pub const Request = struct {
@panic("TODO");
}
fn contentLengthReader_readv(ctx: ?*anyopaque, data: []const []u8) anyerror!std.io.Reader.Status {
fn contentLengthReader_readv(ctx: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
const request: *Request = @alignCast(@ptrCast(ctx));
_ = request;
_ = data;
@ -600,7 +596,7 @@ pub const Request = struct {
ctx: ?*anyopaque,
bw: *std.io.BufferedWriter,
limit: std.io.Reader.Limit,
) anyerror!std.io.Reader.Status {
) std.io.Reader.Error!usize {
const request: *Request = @alignCast(@ptrCast(ctx));
_ = request;
_ = bw;
@ -608,7 +604,7 @@ pub const Request = struct {
@panic("TODO");
}
fn chunkedReader_readv(ctx: ?*anyopaque, data: []const []u8) anyerror!std.io.Reader.Status {
fn chunkedReader_readv(ctx: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
const request: *Request = @alignCast(@ptrCast(ctx));
_ = request;
_ = data;
@ -732,9 +728,10 @@ pub const Request = struct {
}
pub const ReaderError = error{
/// Failed to write "100-continue" to the stream. Error value is
/// stored in `Request.write_error`.
HttpContinueWriteFailed,
/// Failed to write "100-continue" to the stream.
WriteFailed,
/// Failed to write "100-continue" to the stream because it ended.
EndOfStream,
/// The client sent an expect HTTP header value other than
/// "100-continue".
HttpExpectationFailed,
@ -755,10 +752,7 @@ pub const Request = struct {
if (request.head.expect) |expect| {
if (mem.eql(u8, expect, "100-continue")) {
var w = request.server.connection.stream.writer().unbuffered();
w.writeAll("HTTP/1.1 100 Continue\r\n\r\n") catch |err| {
request.write_error = err;
return error.HttpContinueWriteFailed;
};
try w.writeAll("HTTP/1.1 100 Continue\r\n\r\n");
request.head.expect = null;
} else {
return error.HttpExpectationFailed;
@ -854,7 +848,7 @@ pub const Response = struct {
/// Otherwise, transfer-encoding: chunked is being used, and it writes the
/// end-of-stream message, then flushes the stream to the system.
/// Respects the value of `elide_body` to omit all data after the headers.
pub fn end(r: *Response) anyerror!void {
pub fn end(r: *Response) std.io.Writer.Error!void {
switch (r.transfer_encoding) {
.content_length => |len| {
assert(len == 0); // Trips when end() called before all bytes written.
@ -879,7 +873,7 @@ pub const Response = struct {
/// flushes the stream to the system.
/// Respects the value of `elide_body` to omit all data after the headers.
/// Asserts there are at most 25 trailers.
pub fn endChunked(r: *Response, options: EndChunkedOptions) anyerror!void {
pub fn endChunked(r: *Response, options: EndChunkedOptions) std.io.Writer.Error!void {
assert(r.transfer_encoding == .chunked);
try flush_chunked(r, options.trailers);
r.* = undefined;
@ -889,14 +883,14 @@ pub const Response = struct {
/// would not exceed the content-length value sent in the HTTP header.
/// May return 0, which does not indicate end of stream. The caller decides
/// when the end of stream occurs by calling `end`.
pub fn write(r: *Response, bytes: []const u8) anyerror!usize {
pub fn write(r: *Response, bytes: []const u8) std.io.Writer.Error!usize {
switch (r.transfer_encoding) {
.content_length, .none => return @errorCast(cl_writeSplat(r, &.{bytes}, 1)),
.chunked => return @errorCast(chunked_writeSplat(r, &.{bytes}, 1)),
}
}
fn cl_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
fn cl_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
_ = splat;
return cl_write(context, data[0]); // TODO: try to send all the data
}
@ -908,7 +902,7 @@ pub const Response = struct {
limit: std.io.Writer.Limit,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
) std.io.Writer.Error!usize {
_ = context;
_ = file;
_ = offset;
@ -918,7 +912,7 @@ pub const Response = struct {
return error.Unimplemented;
}
fn cl_write(context: ?*anyopaque, bytes: []const u8) anyerror!usize {
fn cl_write(context: ?*anyopaque, bytes: []const u8) std.io.Writer.Error!usize {
const r: *Response = @alignCast(@ptrCast(context));
var trash: u64 = std.math.maxInt(u64);
@ -963,7 +957,7 @@ pub const Response = struct {
return bytes.len;
}
fn chunked_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
fn chunked_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
_ = splat;
return chunked_write(context, data[0]); // TODO: try to send all the data
}
@ -975,7 +969,7 @@ pub const Response = struct {
limit: std.io.Writer.Limit,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
) std.io.Writer.Error!usize {
_ = context;
_ = file;
_ = offset;
@ -985,7 +979,7 @@ pub const Response = struct {
return error.Unimplemented; // TODO lower to a call to writeFile on the output
}
fn chunked_write(context: ?*anyopaque, bytes: []const u8) anyerror!usize {
fn chunked_write(context: ?*anyopaque, bytes: []const u8) std.io.Writer.Error!usize {
const r: *Response = @alignCast(@ptrCast(context));
assert(r.transfer_encoding == .chunked);
@ -1024,7 +1018,7 @@ pub const Response = struct {
/// If using content-length, asserts that writing these bytes to the client
/// would not exceed the content-length value sent in the HTTP header.
pub fn writeAll(r: *Response, bytes: []const u8) anyerror!void {
pub fn writeAll(r: *Response, bytes: []const u8) std.io.Writer.Error!void {
var index: usize = 0;
while (index < bytes.len) {
index += try write(r, bytes[index..]);
@ -1034,21 +1028,21 @@ pub const Response = struct {
/// Sends all buffered data to the client.
/// This is redundant after calling `end`.
/// Respects the value of `elide_body` to omit all data after the headers.
pub fn flush(r: *Response) anyerror!void {
pub fn flush(r: *Response) std.io.Writer.Error!void {
switch (r.transfer_encoding) {
.none, .content_length => return flush_cl(r),
.chunked => return flush_chunked(r, null),
}
}
fn flush_cl(r: *Response) anyerror!void {
fn flush_cl(r: *Response) std.io.Writer.Error!void {
var w = r.stream.writer().unbuffered();
try w.writeAll(r.send_buffer[r.send_buffer_start..r.send_buffer_end]);
r.send_buffer_start = 0;
r.send_buffer_end = 0;
}
fn flush_chunked(r: *Response, end_trailers: ?[]const http.Header) anyerror!void {
fn flush_chunked(r: *Response, end_trailers: ?[]const http.Header) std.io.Writer.Error!void {
const max_trailers = 25;
if (end_trailers) |trailers| assert(trailers.len <= max_trailers);
assert(r.transfer_encoding == .chunked);

View File

@ -194,14 +194,16 @@ fn recvReadInt(ws: *WebSocket, comptime I: type) !I {
};
}
pub fn writeMessage(ws: *WebSocket, message: []const u8, opcode: Opcode) anyerror!void {
pub const WriteError = std.http.Server.Response.WriteError;
pub fn writeMessage(ws: *WebSocket, message: []const u8, opcode: Opcode) WriteError!void {
const iovecs: [1]std.posix.iovec_const = .{
.{ .base = message.ptr, .len = message.len },
};
return writeMessagev(ws, &iovecs, opcode);
}
pub fn writeMessagev(ws: *WebSocket, message: []const std.posix.iovec_const, opcode: Opcode) anyerror!void {
pub fn writeMessagev(ws: *WebSocket, message: []const std.posix.iovec_const, opcode: Opcode) WriteError!void {
const total_len = l: {
var total_len: u64 = 0;
for (message) |iovec| total_len += iovec.len;

View File

@ -17,8 +17,6 @@ const Alignment = std.mem.Alignment;
pub const Reader = @import("io/Reader.zig");
pub const Writer = @import("io/Writer.zig");
pub const PositionalReader = @import("io/PositionalReader.zig");
pub const BufferedReader = @import("io/BufferedReader.zig");
pub const BufferedWriter = @import("io/BufferedWriter.zig");
pub const AllocatingWriter = @import("io/AllocatingWriter.zig");
@ -453,7 +451,6 @@ test {
_ = BufferedReader;
_ = Reader;
_ = Writer;
_ = PositionalReader;
_ = AllocatingWriter;
_ = @import("io/bit_reader.zig");
_ = @import("io/bit_writer.zig");

View File

@ -130,7 +130,7 @@ pub fn clearRetainingCapacity(aw: *AllocatingWriter) void {
aw.shrinkRetainingCapacity(0);
}
fn writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
fn writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
const aw: *AllocatingWriter = @alignCast(@ptrCast(context));
const start_len = aw.written.len;
const bw = &aw.buffered_writer;
@ -145,7 +145,7 @@ fn writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anye
const pattern = data[data.len - 1];
var new_capacity: usize = list.capacity + pattern.len * splat;
for (rest) |bytes| new_capacity += bytes.len;
try list.ensureTotalCapacity(aw.allocator, new_capacity + 1);
list.ensureTotalCapacity(aw.allocator, new_capacity + 1) catch return error.WriteFailed;
for (rest) |bytes| list.appendSliceAssumeCapacity(bytes);
appendPatternAssumeCapacity(&list, pattern, splat);
aw.written = list.items;
@ -168,7 +168,7 @@ fn writeFile(
limit: std.io.Writer.Limit,
headers_and_trailers_full: []const []const u8,
headers_len_full: usize,
) anyerror!usize {
) std.io.Writer.FileError!usize {
const aw: *AllocatingWriter = @alignCast(@ptrCast(context));
const gpa = aw.allocator;
var list = aw.toArrayList();
@ -184,14 +184,14 @@ fn writeFile(
const limit_int = limit.toInt() orelse {
var new_capacity: usize = list.capacity + std.atomic.cache_line;
for (headers_and_trailers) |bytes| new_capacity += bytes.len;
try list.ensureTotalCapacity(gpa, new_capacity);
list.ensureTotalCapacity(gpa, new_capacity) catch return error.WriteFailed;
for (headers_and_trailers[0..headers_len]) |bytes| list.appendSliceAssumeCapacity(bytes);
const dest = list.items.ptr[list.items.len..list.capacity];
const n = try file.pread(dest, pos);
if (n == 0) {
new_capacity = list.capacity;
for (trailers) |bytes| new_capacity += bytes.len;
try list.ensureTotalCapacity(gpa, new_capacity);
list.ensureTotalCapacity(gpa, new_capacity) catch return error.WriteFailed;
for (trailers) |bytes| list.appendSliceAssumeCapacity(bytes);
return list.items.len - start_len;
}
@ -200,7 +200,7 @@ fn writeFile(
};
var new_capacity: usize = list.capacity + limit_int;
for (headers_and_trailers) |bytes| new_capacity += bytes.len;
try list.ensureTotalCapacity(gpa, new_capacity);
list.ensureTotalCapacity(gpa, new_capacity) catch return error.WriteFailed;
for (headers_and_trailers[0..headers_len]) |bytes| list.appendSliceAssumeCapacity(bytes);
const dest = list.items.ptr[list.items.len..][0..limit_int];
const n = try file.pread(dest, pos);

View File

@ -23,74 +23,23 @@ pub fn init(br: *BufferedReader, r: Reader, buffer: []u8) void {
br.storage.initFixed(buffer);
}
const eof_writer: std.io.Writer.VTable = .{
.writeSplat = eof_writeSplat,
.writeFile = eof_writeFile,
};
const eof_reader: std.io.Reader.VTable = .{
.read = eof_read,
.readv = eof_readv,
};
fn eof_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
_ = context;
_ = data;
_ = splat;
return error.NoSpaceLeft;
}
fn eof_writeFile(
context: ?*anyopaque,
file: std.fs.File,
offset: std.io.Writer.Offset,
limit: std.io.Writer.Limit,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
_ = context;
_ = file;
_ = offset;
_ = limit;
_ = headers_and_trailers;
_ = headers_len;
return error.NoSpaceLeft;
}
fn eof_read(ctx: ?*anyopaque, bw: *std.io.BufferedWriter, limit: Reader.Limit) anyerror!Reader.Status {
_ = ctx;
_ = bw;
_ = limit;
return error.EndOfStream;
}
fn eof_readv(ctx: ?*anyopaque, data: []const []u8) anyerror!Reader.Status {
_ = ctx;
_ = data;
return error.EndOfStream;
}
/// Constructs `br` such that it will read from `buffer` and then end.
/// TODO either remove the const cast here or make methods of this file return a const slice
pub fn initFixed(br: *BufferedReader, buffer: []const u8) void {
br.* = .{
.seek = 0,
.storage = .{
.buffer = @constCast(buffer),
.unbuffered_writer = .{
.context = undefined,
.vtable = &eof_writer,
},
},
.unbuffered_reader = .{
.context = undefined,
.vtable = &eof_reader,
.unbuffered_writer = .failing,
},
.unbuffered_reader = .ending,
};
}
pub fn storageBuffer(br: *BufferedReader) []u8 {
const storage = &br.storage;
assert(storage.unbuffered_writer.vtable == &eof_writer);
assert(br.unbuffered_reader.vtable == &eof_reader);
assert(storage.unbuffered_writer.vtable == std.io.Writer.failing.vtable);
assert(br.unbuffered_reader.vtable == Reader.ending.vtable);
return storage.buffer;
}
@ -106,47 +55,43 @@ pub fn reader(br: *BufferedReader) Reader {
return .{
.context = br,
.vtable = &.{
.read = passthru_read,
.readv = passthru_readv,
.read = passthruRead,
.readVec = passthruReadVec,
},
};
}
fn passthru_read(ctx: ?*anyopaque, bw: *BufferedWriter, limit: Reader.Limit) anyerror!Reader.RwResult {
fn passthruRead(ctx: ?*anyopaque, bw: *BufferedWriter, limit: Reader.Limit) Reader.RwError!usize {
const br: *BufferedReader = @alignCast(@ptrCast(ctx));
const storage = &br.storage;
const buffer = storage.buffer[0..storage.end];
const buffered = buffer[br.seek..];
const limited = buffered[0..limit.min(buffered.len)];
if (limited.len > 0) {
const result = bw.writeSplat(limited, 1);
br.seek += result.len;
return .{
.len = result.len,
.write_err = result.err,
.write_end = result.end,
};
const n = try bw.writeSplat(limited, 1);
br.seek += n;
return n;
}
return br.unbuffered_reader.read(bw, limit);
}
fn passthru_readv(ctx: ?*anyopaque, data: []const []u8) anyerror!Reader.Status {
fn passthruReadVec(ctx: ?*anyopaque, data: []const []u8) Reader.Error!usize {
const br: *BufferedReader = @alignCast(@ptrCast(ctx));
_ = br;
_ = data;
@panic("TODO");
}
pub fn seekBy(br: *BufferedReader, seek_by: i64) anyerror!void {
pub fn seekBy(br: *BufferedReader, seek_by: i64) !void {
if (seek_by < 0) try br.seekBackwardBy(@abs(seek_by)) else try br.seekForwardBy(@abs(seek_by));
}
pub fn seekBackwardBy(br: *BufferedReader, seek_by: u64) anyerror!void {
pub fn seekBackwardBy(br: *BufferedReader, seek_by: u64) !void {
if (seek_by > br.storage.end - br.seek) return error.Unseekable; // TODO
br.seek += @abs(seek_by);
}
pub fn seekForwardBy(br: *BufferedReader, seek_by: u64) anyerror!void {
pub fn seekForwardBy(br: *BufferedReader, seek_by: u64) !void {
const seek, const need_unbuffered_seek = @subWithOverflow(br.seek, @abs(seek_by));
if (need_unbuffered_seek > 0) return error.Unseekable; // TODO
br.seek = seek;
@ -166,27 +111,11 @@ pub fn seekForwardBy(br: *BufferedReader, seek_by: u64) anyerror!void {
/// See also:
/// * `peekGreedy`
/// * `toss`
pub fn peek(br: *BufferedReader, n: usize) anyerror![]u8 {
return (try br.peekGreedy(n))[0..n];
}
/// Returns the next `n` bytes from `unbuffered_reader`, filling the buffer as
/// necessary.
///
/// Invalidates previously returned values from `peek`.
///
/// Asserts that the `BufferedReader` was initialized with a buffer capacity at
/// least as big as `n`.
///
/// If there are fewer than `n` bytes left in the stream, `null` is returned
/// instead.
///
/// See also:
/// * `peekGreedy`
/// * `toss`
pub fn peek2(br: *BufferedReader, n: usize) anyerror!?[]u8 {
if (try br.peekGreedy(n)) |buf| return buf[0..n];
return null;
pub fn peek(br: *BufferedReader, n: usize) Reader.Error![]u8 {
const storage = &br.storage;
assert(n <= storage.buffer.len);
try br.fill(n);
return storage.buffer[br.seek..][0..n];
}
/// Returns all the next buffered bytes from `unbuffered_reader`, after filling
@ -203,30 +132,11 @@ pub fn peek2(br: *BufferedReader, n: usize) anyerror!?[]u8 {
/// See also:
/// * `peek`
/// * `toss`
pub fn peekGreedy(br: *BufferedReader, n: usize) anyerror![]u8 {
assert(n <= br.storage.buffer.len);
if (try br.fill(n)) return br.bufferContents();
return error.EndOfStream;
}
/// Returns all the next buffered bytes from `unbuffered_reader`, after filling
/// the buffer to ensure it contains at least `n` bytes.
///
/// Invalidates previously returned values from `peek` and `peekGreedy`.
///
/// Asserts that the `BufferedReader` was initialized with a buffer capacity at
/// least as big as `n`.
///
/// If there are fewer than `n` bytes left in the stream, `null` is returned
/// instead.
///
/// See also:
/// * `peek`
/// * `toss`
pub fn peekGreedy2(br: *BufferedReader, n: usize) anyerror!?[]u8 {
assert(n <= br.storage.buffer.len);
if (try br.fill(n)) return br.bufferContents();
return null;
pub fn peekGreedy(br: *BufferedReader, n: usize) Reader.Error![]u8 {
const storage = &br.storage;
assert(n <= storage.buffer.len);
try br.fill(n);
return storage.buffer[br.seek..storage.end];
}
/// Skips the next `n` bytes from the stream, advancing the seek position. This
@ -242,8 +152,11 @@ pub fn toss(br: *BufferedReader, n: usize) void {
assert(br.seek <= br.storage.end);
}
/// Equivalent to `peek` + `toss`.
pub fn take(br: *BufferedReader, n: usize) anyerror![]u8 {
/// Equivalent to `peek` followed by `toss`.
///
/// The data returned is invalidated by the next call to `take`, `peek`,
/// `fill`, and functions with those prefixes.
pub fn take(br: *BufferedReader, n: usize) Reader.Error![]u8 {
const result = try br.peek(n);
br.toss(n);
return result;
@ -260,7 +173,7 @@ pub fn take(br: *BufferedReader, n: usize) anyerror![]u8 {
///
/// See also:
/// * `take`
pub fn takeArray(br: *BufferedReader, comptime n: usize) anyerror!*[n]u8 {
pub fn takeArray(br: *BufferedReader, comptime n: usize) Reader.Error!*[n]u8 {
return (try br.take(n))[0..n];
}
@ -272,10 +185,10 @@ pub fn takeArray(br: *BufferedReader, comptime n: usize) anyerror!*[n]u8 {
///
/// See also:
/// * `toss`
/// * `discardUntilEnd`
/// * `discardUpTo`
pub fn discard(br: *BufferedReader, n: usize) anyerror!void {
if ((try br.discardUpTo(n)) != n) return error.EndOfStream;
/// * `discardRemaining`
/// * `discardShort`
pub fn discard(br: *BufferedReader, n: usize) Reader.Error!void {
if ((try br.discardShort(n)) != n) return error.EndOfStream;
}
/// Skips the next `n` bytes from the stream, advancing the seek position.
@ -288,35 +201,35 @@ pub fn discard(br: *BufferedReader, n: usize) anyerror!void {
/// See also:
/// * `discard`
/// * `toss`
/// * `discardUntilEnd`
pub fn discardUpTo(br: *BufferedReader, n: usize) anyerror!usize {
/// * `discardRemaining`
pub fn discardShort(br: *BufferedReader, n: usize) Reader.ShortError!usize {
const storage = &br.storage;
var remaining = n;
while (remaining > 0) {
const proposed_seek = br.seek + remaining;
if (proposed_seek <= storage.end) {
br.seek = proposed_seek;
return n;
}
remaining -= (storage.end - br.seek);
storage.end = 0;
br.seek = 0;
const result = try br.unbuffered_reader.read(storage, .unlimited);
assert(result.len == storage.end);
if (remaining <= storage.end) continue;
if (result.end) return n - remaining;
const proposed_seek = br.seek + n;
if (proposed_seek <= storage.end) {
@branchHint(.likely);
br.seek = proposed_seek;
return n;
}
var remaining = n - (storage.end - br.seek);
storage.end = 0;
br.seek = 0;
while (true) {
const discard_len = br.unbuffered_reader.discard(remaining, .unlimited) catch |err| switch (err) {
error.EndOfStream => return n - remaining,
error.ReadFailed => return error.ReadFailed,
};
remaining -= discard_len;
if (remaining == 0) return n;
}
return n;
}
/// Reads the stream until the end, ignoring all the data.
/// Returns the number of bytes discarded.
pub fn discardUntilEnd(br: *BufferedReader) anyerror!usize {
pub fn discardRemaining(br: *BufferedReader) Reader.ShortError!usize {
const storage = &br.storage;
var total: usize = storage.end;
const buffered_len = storage.end;
storage.end = 0;
total += try br.unbuffered_reader.discardUntilEnd();
return total;
return buffered_len + try br.unbuffered_reader.discardRemaining();
}
/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing
@ -329,7 +242,7 @@ pub fn discardUntilEnd(br: *BufferedReader) anyerror!usize {
///
/// See also:
/// * `peek`
pub fn read(br: *BufferedReader, buffer: []u8) anyerror!void {
pub fn read(br: *BufferedReader, buffer: []u8) Reader.Error!void {
const storage = &br.storage;
const in_buffer = storage.buffer[0..storage.end];
const seek = br.seek;
@ -344,7 +257,12 @@ pub fn read(br: *BufferedReader, buffer: []u8) anyerror!void {
br.seek = 0;
var i: usize = in_buffer.len;
while (true) {
const status = try br.unbuffered_reader.read(storage, .unlimited);
// TODO if remaining buffer len is greater than storage len, read directly into buffer
const read_len = br.unbuffered_reader.read(storage, .unlimited) catch |err| switch (err) {
error.WriteFailed => storage.end,
else => |e| return e,
};
assert(read_len == storage.end);
const next_i = i + storage.end;
if (next_i >= buffer.len) {
const remaining = buffer[i..];
@ -352,46 +270,48 @@ pub fn read(br: *BufferedReader, buffer: []u8) anyerror!void {
br.seek = remaining.len;
return;
}
if (status.end) return error.EndOfStream;
@memcpy(buffer[i..next_i], storage.buffer[0..storage.end]);
storage.end = 0;
i = next_i;
}
}
/// Returns the number of bytes read. If the number read is smaller than `buffer.len`, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
pub fn partialRead(br: *BufferedReader, buffer: []u8) anyerror!usize {
/// Returns the number of bytes read, which is less than `buffer.len` if and
/// only if the stream reached the end.
pub fn readShort(br: *BufferedReader, buffer: []u8) Reader.ShortError!usize {
_ = br;
_ = buffer;
@panic("TODO");
}
pub const DelimiterInclusiveError = error{
/// See the `Reader` implementation for detailed diagnostics.
ReadFailed,
/// Stream ended before the delimiter was found.
EndOfStream,
/// The delimiter was not found within a number of bytes matching the
/// capacity of the `BufferedReader`.
StreamTooLong,
};
/// Returns a slice of the next bytes of buffered data from the stream until
/// `sentinel` is found, advancing the seek position.
///
/// Returned slice has a sentinel.
///
/// If the stream ends before the sentinel is found, `error.EndOfStream` is
/// returned.
///
/// If the sentinel is not found within a number of bytes matching the
/// capacity of the `BufferedReader`, `error.StreamTooLong` is returned.
///
/// Invalidates previously returned values from `peek`.
///
/// See also:
/// * `peekSentinel`
/// * `takeDelimiterExclusive`
/// * `takeDelimiterInclusive`
pub fn takeSentinel(br: *BufferedReader, comptime sentinel: u8) anyerror![:sentinel]u8 {
pub fn takeSentinel(br: *BufferedReader, comptime sentinel: u8) DelimiterInclusiveError![:sentinel]u8 {
const result = try br.peekSentinel(sentinel);
br.toss(result.len + 1);
return result;
}
pub fn peekSentinel(br: *BufferedReader, comptime sentinel: u8) anyerror![:sentinel]u8 {
pub fn peekSentinel(br: *BufferedReader, comptime sentinel: u8) DelimiterInclusiveError![:sentinel]u8 {
const result = try br.takeDelimiterInclusive(sentinel);
return result[0 .. result.len - 1 :sentinel];
}
@ -401,28 +321,30 @@ pub fn peekSentinel(br: *BufferedReader, comptime sentinel: u8) anyerror![:senti
///
/// Returned slice includes the delimiter as the last byte.
///
/// If the stream ends before the delimiter is found, `error.EndOfStream` is
/// returned.
///
/// If the delimiter is not found within a number of bytes matching the
/// capacity of the `BufferedReader`, `error.StreamTooLong` is returned.
///
/// Invalidates previously returned values from `peek`.
///
/// See also:
/// * `takeSentinel`
/// * `takeDelimiterExclusive`
/// * `peekDelimiterInclusive`
pub fn takeDelimiterInclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 {
pub fn takeDelimiterInclusive(br: *BufferedReader, delimiter: u8) DelimiterInclusiveError![]u8 {
const result = try br.peekDelimiterInclusive(delimiter);
br.toss(result.len);
return result;
}
pub fn peekDelimiterInclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 {
pub fn peekDelimiterInclusive(br: *BufferedReader, delimiter: u8) DelimiterInclusiveError![]u8 {
return (try br.peekDelimiterInclusiveUnlessEnd(delimiter)) orelse error.EndOfStream;
}
pub const DelimiterExclusiveError = error{
/// See the `Reader` implementation for detailed diagnostics.
ReadFailed,
/// The delimiter was not found within a number of bytes matching the
/// capacity of the `BufferedReader`.
StreamTooLong,
};
/// Returns a slice of the next bytes of buffered data from the stream until
/// `delimiter` is found, advancing the seek position.
///
@ -430,32 +352,33 @@ pub fn peekDelimiterInclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8
///
/// End-of-stream is treated equivalent to a delimiter.
///
/// If the delimiter is not found within a number of bytes matching the
/// capacity of the `BufferedReader`, `error.StreamTooLong` is returned.
///
/// Invalidates previously returned values from `peek`.
///
/// See also:
/// * `takeSentinel`
/// * `takeDelimiterInclusive`
/// * `peekDelimiterExclusive`
pub fn takeDelimiterExclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 {
const result_unless_end = try br.peekDelimiterInclusiveUnlessEnd(delimiter);
const result = result_unless_end orelse {
br.toss(br.storage.end);
return br.storage.buffer[0..br.storage.end];
pub fn takeDelimiterExclusive(br: *BufferedReader, delimiter: u8) DelimiterExclusiveError![]u8 {
const result = br.peekDelimiterInclusiveUnlessEnd(delimiter) catch |err| switch (err) {
error.EndOfStream => {
br.toss(br.storage.end);
return br.storage.buffer[0..br.storage.end];
},
else => |e| return e,
};
br.toss(result.len);
return result[0 .. result.len - 1];
}
pub fn peekDelimiterExclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 {
const result_unless_end = try br.peekDelimiterInclusiveUnlessEnd(delimiter);
const result = result_unless_end orelse return br.storage.buffer[0..br.storage.end];
pub fn peekDelimiterExclusive(br: *BufferedReader, delimiter: u8) DelimiterExclusiveError![]u8 {
const result = br.peekDelimiterInclusiveUnlessEnd(delimiter) catch |err| switch (err) {
error.EndOfStream => return br.storage.buffer[0..br.storage.end],
else => |e| return e,
};
return result[0 .. result.len - 1];
}
fn peekDelimiterInclusiveUnlessEnd(br: *BufferedReader, delimiter: u8) anyerror!?[]u8 {
fn peekDelimiterInclusiveUnlessEnd(br: *BufferedReader, delimiter: u8) DelimiterInclusiveError!?[]u8 {
const storage = &br.storage;
const buffer = storage.buffer[0..storage.end];
const seek = br.seek;
@ -469,21 +392,29 @@ fn peekDelimiterInclusiveUnlessEnd(br: *BufferedReader, delimiter: u8) anyerror!
storage.end = i;
br.seek = 0;
while (i < storage.buffer.len) {
const status = try br.unbuffered_reader.read(storage, .unlimited);
if (std.mem.indexOfScalarPos(u8, storage.buffer[0..storage.end], i, delimiter)) |end| return storage.buffer[0 .. end + 1];
if (status.end) return null;
const eos = eos: {
const read_len = br.unbuffered_reader.read(storage, .unlimited) catch |err| switch (err) {
error.WriteFailed => storage.end - i,
error.ReadFailed => return error.ReadFailed,
error.EndOfStream => break :eos true,
};
assert(read_len == storage.end - i);
break :eos false;
};
if (std.mem.indexOfScalarPos(u8, storage.buffer[0..storage.end], i, delimiter)) |end| {
return storage.buffer[0 .. end + 1];
}
if (eos) return error.EndOfStream;
i = storage.end;
}
return error.StreamTooLong;
}
/// Appends to `bw` contents by reading from the stream until `delimiter` is found.
/// Does not write the delimiter itself.
///
/// If stream ends before delimiter found, returns `error.EndOfStream`.
/// Appends to `bw` contents by reading from the stream until `delimiter` is
/// found. Does not write the delimiter itself.
///
/// Returns number of bytes streamed.
pub fn streamReadDelimiter(br: *BufferedReader, bw: *std.io.BufferedWriter, delimiter: u8) anyerror!usize {
pub fn streamReadDelimiter(br: *BufferedReader, bw: *BufferedWriter, delimiter: u8) Reader.Error!usize {
_ = br;
_ = bw;
_ = delimiter;
@ -495,29 +426,35 @@ pub fn streamReadDelimiter(br: *BufferedReader, bw: *std.io.BufferedWriter, deli
///
/// Succeeds if stream ends before delimiter found.
///
/// Returns number of bytes streamed as well as whether the input reached the end.
/// The end is not signaled to the writer.
/// Returns number of bytes streamed. The end is not signaled to the writer.
pub fn streamReadDelimiterExclusive(
br: *BufferedReader,
bw: *std.io.BufferedWriter,
bw: *BufferedWriter,
delimiter: u8,
) anyerror!Reader.Status {
) Reader.ShortError!usize {
_ = br;
_ = bw;
_ = delimiter;
@panic("TODO");
}
pub const StreamDelimiterLimitedError = Reader.ShortError || error{
/// Stream ended before the delimiter was found.
EndOfStream,
/// The delimiter was not found within the limit.
StreamTooLong,
};
/// Appends to `bw` contents by reading from the stream until `delimiter` is found.
/// Does not write the delimiter itself.
///
/// If `limit` is exceeded, returns `error.StreamTooLong`.
//
/// Returns number of bytes streamed.
pub fn streamReadDelimiterLimited(
br: *BufferedReader,
bw: *BufferedWriter,
delimiter: u8,
limit: usize,
) anyerror!void {
limit: Reader.Limit,
) StreamDelimiterLimitedError!usize {
_ = br;
_ = bw;
_ = delimiter;
@ -529,7 +466,7 @@ pub fn streamReadDelimiterLimited(
/// including the delimiter.
///
/// If end of stream is found, this function succeeds.
pub fn discardDelimiterExclusive(br: *BufferedReader, delimiter: u8) anyerror!void {
pub fn discardDelimiterInclusive(br: *BufferedReader, delimiter: u8) Reader.Error!void {
_ = br;
_ = delimiter;
@panic("TODO");
@ -538,8 +475,8 @@ pub fn discardDelimiterExclusive(br: *BufferedReader, delimiter: u8) anyerror!vo
/// Reads from the stream until specified byte is found, discarding all data,
/// excluding the delimiter.
///
/// If end of stream is found, `error.EndOfStream` is returned.
pub fn discardDelimiterInclusive(br: *BufferedReader, delimiter: u8) anyerror!void {
/// Succeeds if stream ends before delimiter found.
pub fn discardDelimiterExclusive(br: *BufferedReader, delimiter: u8) Reader.ShortError!void {
_ = br;
_ = delimiter;
@panic("TODO");
@ -548,69 +485,75 @@ pub fn discardDelimiterInclusive(br: *BufferedReader, delimiter: u8) anyerror!vo
/// Fills the buffer such that it contains at least `n` bytes, without
/// advancing the seek position.
///
/// Returns `false` if and only if there are fewer than `n` bytes remaining.
/// Returns `error.EndOfStream` if and only if there are fewer than `n` bytes
/// remaining.
///
/// Asserts buffer capacity is at least `n`.
pub fn fill(br: *BufferedReader, n: usize) anyerror!bool {
pub fn fill(br: *BufferedReader, n: usize) Reader.Error!void {
const storage = &br.storage;
assert(n <= storage.buffer.len);
const buffer = storage.buffer[0..storage.end];
const seek = br.seek;
if (seek + n <= buffer.len) {
@branchHint(.likely);
return true;
return;
}
const remainder = buffer[seek..];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
storage.end = remainder.len;
br.seek = 0;
while (true) {
const status = try br.unbuffered_reader.read(storage, .unlimited);
if (n <= storage.end) return true;
if (status.end) return false;
const read_len = br.unbuffered_reader.read(storage, .unlimited) catch |err| switch (err) {
error.WriteFailed => storage.end - remainder.len,
else => |e| return e,
};
assert(storage.end == remainder.len + read_len);
if (n <= storage.end) return;
}
}
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
pub fn takeByte(br: *BufferedReader) anyerror!u8 {
pub fn takeByte(br: *BufferedReader) Reader.Error!u8 {
const storage = &br.storage;
const buffer = storage.buffer[0..storage.end];
const seek = br.seek;
if (seek >= buffer.len) {
@branchHint(.unlikely);
const filled = try fill(br, 1);
if (!filled) return error.EndOfStream;
try fill(br, 1);
}
br.seek = seek + 1;
return buffer[seek];
}
/// Same as `takeByte` except the returned byte is signed.
pub fn takeByteSigned(br: *BufferedReader) anyerror!i8 {
pub fn takeByteSigned(br: *BufferedReader) Reader.Error!i8 {
return @bitCast(try br.takeByte());
}
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
pub inline fn takeInt(br: *BufferedReader, comptime T: type, endian: std.builtin.Endian) anyerror!T {
pub inline fn takeInt(br: *BufferedReader, comptime T: type, endian: std.builtin.Endian) Reader.Error!T {
const n = @divExact(@typeInfo(T).int.bits, 8);
return std.mem.readInt(T, try br.takeArray(n), endian);
}
/// Asserts the buffer was initialized with a capacity at least `n`.
pub fn takeVarInt(br: *BufferedReader, comptime Int: type, endian: std.builtin.Endian, n: usize) anyerror!Int {
pub fn takeVarInt(br: *BufferedReader, comptime Int: type, endian: std.builtin.Endian, n: usize) Reader.Error!Int {
assert(n <= @sizeOf(Int));
return std.mem.readVarInt(Int, try br.take(n), endian);
}
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
pub fn takeStruct(br: *BufferedReader, comptime T: type) anyerror!*align(1) T {
pub fn takeStruct(br: *BufferedReader, comptime T: type) Reader.Error!*align(1) T {
// Only extern and packed structs have defined in-memory layout.
comptime assert(@typeInfo(T).@"struct".layout != .auto);
return @ptrCast(try br.takeArray(@sizeOf(T)));
}
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
pub fn takeStructEndian(br: *BufferedReader, comptime T: type, endian: std.builtin.Endian) anyerror!T {
///
/// This function is inline to avoid referencing `std.mem.byteSwapAllFields`
/// when `endian` is comptime-known and matches the host endianness.
pub inline fn takeStructEndian(br: *BufferedReader, comptime T: type, endian: std.builtin.Endian) Reader.Error!T {
var res = (try br.takeStruct(T)).*;
if (native_endian != endian) std.mem.byteSwapAllFields(T, &res);
return res;
@ -621,14 +564,16 @@ pub fn takeStructEndian(br: *BufferedReader, comptime T: type, endian: std.built
/// it. Otherwise, returns `error.InvalidEnumTag`.
///
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(Enum)`.
pub fn takeEnum(br: *BufferedReader, comptime Enum: type, endian: std.builtin.Endian) anyerror!Enum {
pub fn takeEnum(br: *BufferedReader, comptime Enum: type, endian: std.builtin.Endian) Reader.Error!Enum {
const Tag = @typeInfo(Enum).@"enum".tag_type;
const int = try br.takeInt(Tag, endian);
return std.meta.intToEnum(Enum, int);
}
pub const TakeLeb128Error = Reader.Error || error{Overflow};
/// Read a single LEB128 value as type T, or `error.Overflow` if the value cannot fit.
pub fn takeLeb128(br: *BufferedReader, comptime Result: type) anyerror!Result {
pub fn takeLeb128(br: *BufferedReader, comptime Result: type) TakeLeb128Error!Result {
const result_info = @typeInfo(Result).int;
return std.math.cast(Result, try br.takeMultipleOf7Leb128(@Type(.{ .int = .{
.signedness = result_info.signedness,
@ -636,7 +581,7 @@ pub fn takeLeb128(br: *BufferedReader, comptime Result: type) anyerror!Result {
} }))) orelse error.Overflow;
}
fn takeMultipleOf7Leb128(br: *BufferedReader, comptime Result: type) anyerror!Result {
fn takeMultipleOf7Leb128(br: *BufferedReader, comptime Result: type) TakeLeb128Error!Result {
const result_info = @typeInfo(Result).int;
comptime assert(result_info.bits % 7 == 0);
var remaining_bits: std.math.Log2IntCeil(Result) = result_info.bits;
@ -708,7 +653,7 @@ test discard {
try testing.expectError(error.EndOfStream, br.discard(1));
}
test discardUntilEnd {
test discardRemaining {
return error.Unimplemented;
}
@ -795,3 +740,7 @@ test takeEnum {
test takeLeb128 {
return error.Unimplemented;
}
test readShort {
return error.Unimplemented;
}

View File

@ -35,19 +35,19 @@ pub fn writer(bw: *BufferedWriter) Writer {
return .{
.context = bw,
.vtable = &.{
.writeSplat = passthru_writeSplat,
.writeFile = passthru_writeFile,
.writeSplat = passthruWriteSplat,
.writeFile = passthruWriteFile,
},
};
}
const fixed_vtable: Writer.VTable = .{
.writeSplat = fixed_writeSplat,
.writeFile = Writer.unimplemented_writeFile,
.writeSplat = fixedWriteSplat,
.writeFile = Writer.failingWriteFile,
};
/// Replaces the `BufferedWriter` with one that writes to `buffer` and returns
/// `error.NoSpaceLeft` when it is full. `end` and `count` will always be
/// `error.WriteFailed` when it is full. `end` and `count` will always be
/// equal.
pub fn initFixed(bw: *BufferedWriter, buffer: []u8) void {
bw.* = .{
@ -72,10 +72,10 @@ pub fn reset(bw: *BufferedWriter) void {
bw.count = 0;
}
pub fn flush(bw: *BufferedWriter) anyerror!void {
pub fn flush(bw: *BufferedWriter) Writer.Error!void {
const send_buffer = bw.buffer[0..bw.end];
var index: usize = 0;
while (index < send_buffer.len) index += try bw.unbuffered_writer.writev(&.{send_buffer[index..]});
while (index < send_buffer.len) index += try bw.unbuffered_writer.writeVec(&.{send_buffer[index..]});
bw.end = 0;
}
@ -84,7 +84,7 @@ pub fn unusedCapacitySlice(bw: *const BufferedWriter) []u8 {
}
/// Asserts the provided buffer has total capacity enough for `minimum_length`.
pub fn writableSlice(bw: *BufferedWriter, minimum_length: usize) anyerror![]u8 {
pub fn writableSlice(bw: *BufferedWriter, minimum_length: usize) Writer.Error![]u8 {
assert(bw.buffer.len >= minimum_length);
const cap_slice = bw.buffer[bw.end..];
if (cap_slice.len >= minimum_length) {
@ -92,7 +92,7 @@ pub fn writableSlice(bw: *BufferedWriter, minimum_length: usize) anyerror![]u8 {
return cap_slice;
}
const buffer = bw.buffer[0..bw.end];
const n = try bw.unbuffered_writer.writev(&.{buffer});
const n = try bw.unbuffered_writer.writeVec(&.{buffer});
if (n == buffer.len) {
@branchHint(.likely);
bw.end = 0;
@ -115,11 +115,11 @@ pub fn advance(bw: *BufferedWriter, n: usize) void {
}
/// The `data` parameter is mutable because this function needs to mutate the
/// fields in order to handle partial writes from `Writer.VTable.writev`.
pub fn writevAll(bw: *BufferedWriter, data: [][]const u8) anyerror!void {
/// fields in order to handle partial writes from `Writer.VTable.writeVec`.
pub fn writeVecAll(bw: *BufferedWriter, data: [][]const u8) Writer.Error!void {
var i: usize = 0;
while (true) {
var n = try passthru_writeSplat(bw, data[i..], 1);
var n = try passthruWriteSplat(bw, data[i..], 1);
const len = data[i].len;
while (n >= len) {
n -= len;
@ -130,15 +130,15 @@ pub fn writevAll(bw: *BufferedWriter, data: [][]const u8) anyerror!void {
}
}
pub fn writeSplat(bw: *BufferedWriter, data: []const []const u8, splat: usize) anyerror!usize {
return passthru_writeSplat(bw, data, splat);
pub fn writeSplat(bw: *BufferedWriter, data: []const []const u8, splat: usize) Writer.Error!usize {
return passthruWriteSplat(bw, data, splat);
}
pub fn writev(bw: *BufferedWriter, data: []const []const u8) anyerror!usize {
return passthru_writeSplat(bw, data, 1);
pub fn writeVec(bw: *BufferedWriter, data: []const []const u8) Writer.Error!usize {
return passthruWriteSplat(bw, data, 1);
}
fn passthru_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
fn passthruWriteSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) Writer.Error!usize {
const bw: *BufferedWriter = @alignCast(@ptrCast(context));
const buffer = bw.buffer;
const start_end = bw.end;
@ -258,11 +258,11 @@ fn track(count: *usize, n: usize) usize {
/// When this function is called it means the buffer got full, so it's time
/// to return an error. However, we still need to make sure all of the
/// available buffer has been filled.
fn fixed_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
fn fixedWriteSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) Writer.Error!usize {
const bw: *BufferedWriter = @alignCast(@ptrCast(context));
for (data) |bytes| {
const dest = bw.buffer[bw.end..];
if (dest.len == 0) return error.NoSpaceLeft;
if (dest.len == 0) return error.WriteFailed;
const len = @min(bytes.len, dest.len);
@memcpy(dest[0..len], bytes[0..len]);
bw.end += len;
@ -277,16 +277,16 @@ fn fixed_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize
}
bw.end = bw.buffer.len;
bw.count = bw.end;
return error.NoSpaceLeft;
return error.WriteFailed;
}
pub fn write(bw: *BufferedWriter, bytes: []const u8) anyerror!usize {
pub fn write(bw: *BufferedWriter, bytes: []const u8) Writer.Error!usize {
const buffer = bw.buffer;
const end = bw.end;
const new_end = end + bytes.len;
if (new_end > buffer.len) {
var data: [2][]const u8 = .{ buffer[0..end], bytes };
const n = try bw.unbuffered_writer.writev(&data);
const n = try bw.unbuffered_writer.writeVec(&data);
if (n < end) {
@branchHint(.unlikely);
const remainder = buffer[n..end];
@ -304,16 +304,16 @@ pub fn write(bw: *BufferedWriter, bytes: []const u8) anyerror!usize {
/// Calls `write` as many times as necessary such that all of `bytes` are
/// transferred.
pub fn writeAll(bw: *BufferedWriter, bytes: []const u8) anyerror!void {
pub fn writeAll(bw: *BufferedWriter, bytes: []const u8) Writer.Error!void {
var index: usize = 0;
while (index < bytes.len) index += try bw.write(bytes[index..]);
}
pub fn print(bw: *BufferedWriter, comptime format: []const u8, args: anytype) anyerror!void {
pub fn print(bw: *BufferedWriter, comptime format: []const u8, args: anytype) Writer.Error!void {
try std.fmt.format(bw, format, args);
}
pub fn writeByte(bw: *BufferedWriter, byte: u8) anyerror!void {
pub fn writeByte(bw: *BufferedWriter, byte: u8) Writer.Error!void {
const buffer = bw.buffer[0..bw.end];
if (buffer.len < bw.buffer.len) {
@branchHint(.likely);
@ -324,7 +324,7 @@ pub fn writeByte(bw: *BufferedWriter, byte: u8) anyerror!void {
}
var buffers: [2][]const u8 = .{ buffer, &.{byte} };
while (true) {
const n = try bw.unbuffered_writer.writev(&buffers);
const n = try bw.unbuffered_writer.writeVec(&buffers);
if (n == 0) {
@branchHint(.unlikely);
continue;
@ -352,7 +352,7 @@ pub fn writeByte(bw: *BufferedWriter, byte: u8) anyerror!void {
/// Writes the same byte many times, performing the underlying write call as
/// many times as necessary.
pub fn splatByteAll(bw: *BufferedWriter, byte: u8, n: usize) anyerror!void {
pub fn splatByteAll(bw: *BufferedWriter, byte: u8, n: usize) Writer.Error!void {
var remaining: usize = n;
while (remaining > 0) remaining -= try bw.splatByte(byte, remaining);
}
@ -360,13 +360,13 @@ pub fn splatByteAll(bw: *BufferedWriter, byte: u8, n: usize) anyerror!void {
/// Writes the same byte many times, allowing short writes.
///
/// Does maximum of one underlying `Writer.VTable.writeSplat`.
pub fn splatByte(bw: *BufferedWriter, byte: u8, n: usize) anyerror!usize {
return passthru_writeSplat(bw, &.{&.{byte}}, n);
pub fn splatByte(bw: *BufferedWriter, byte: u8, n: usize) Writer.Error!usize {
return passthruWriteSplat(bw, &.{&.{byte}}, n);
}
/// Writes the same slice many times, performing the underlying write call as
/// many times as necessary.
pub fn splatBytesAll(bw: *BufferedWriter, bytes: []const u8, splat: usize) anyerror!void {
pub fn splatBytesAll(bw: *BufferedWriter, bytes: []const u8, splat: usize) Writer.Error!void {
var remaining_bytes: usize = bytes.len * splat;
remaining_bytes -= try bw.splatBytes(bytes, splat);
while (remaining_bytes > 0) {
@ -378,26 +378,28 @@ pub fn splatBytesAll(bw: *BufferedWriter, bytes: []const u8, splat: usize) anyer
/// Writes the same slice many times, allowing short writes.
///
/// Does maximum of one underlying `Writer.VTable.writev`.
pub fn splatBytes(bw: *BufferedWriter, bytes: []const u8, n: usize) anyerror!usize {
return passthru_writeSplat(bw, &.{bytes}, n);
/// Does maximum of one underlying `Writer.VTable.writeVec`.
pub fn splatBytes(bw: *BufferedWriter, bytes: []const u8, n: usize) Writer.Error!usize {
return passthruWriteSplat(bw, &.{bytes}, n);
}
/// Asserts the `buffer` was initialized with a capacity of at least `@sizeOf(T)` bytes.
pub inline fn writeInt(bw: *BufferedWriter, comptime T: type, value: T, endian: std.builtin.Endian) anyerror!void {
pub inline fn writeInt(bw: *BufferedWriter, comptime T: type, value: T, endian: std.builtin.Endian) Writer.Error!void {
var bytes: [@divExact(@typeInfo(T).int.bits, 8)]u8 = undefined;
std.mem.writeInt(std.math.ByteAlignedInt(@TypeOf(value)), &bytes, value, endian);
return bw.writeAll(&bytes);
}
pub fn writeStruct(bw: *BufferedWriter, value: anytype) anyerror!void {
pub fn writeStruct(bw: *BufferedWriter, value: anytype) Writer.Error!void {
// Only extern and packed structs have defined in-memory layout.
comptime assert(@typeInfo(@TypeOf(value)).@"struct".layout != .auto);
return bw.writeAll(std.mem.asBytes(&value));
}
pub fn writeStructEndian(bw: *BufferedWriter, value: anytype, endian: std.builtin.Endian) anyerror!void {
// TODO: make sure this value is not a reference type
/// The function is inline to avoid the dead code in case `endian` is
/// comptime-known and matches host endianness.
/// TODO: make sure this value is not a reference type
pub inline fn writeStructEndian(bw: *BufferedWriter, value: anytype, endian: std.builtin.Endian) Writer.Error!void {
if (native_endian == endian) {
return bw.writeStruct(value);
} else {
@ -407,6 +409,27 @@ pub fn writeStructEndian(bw: *BufferedWriter, value: anytype, endian: std.builti
}
}
pub inline fn writeArrayEndian(
bw: *BufferedWriter,
Elem: type,
array: []const Elem,
endian: std.builtin.Endian,
) Writer.Error!void {
if (native_endian == endian) {
return writeAll(bw, @ptrCast(array));
} else {
return bw.writeArraySwap(bw, Elem, array);
}
}
/// Asserts that the buffer storage capacity is at least enough to store `@sizeOf(Elem)`
pub fn writeArraySwap(bw: *BufferedWriter, Elem: type, array: []const Elem) Writer.Error!void {
// copy to storage first, then swap in place
_ = bw;
_ = array;
@panic("TODO");
}
pub fn writeFile(
bw: *BufferedWriter,
file: std.fs.File,
@ -414,18 +437,18 @@ pub fn writeFile(
limit: Writer.Limit,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
return passthru_writeFile(bw, file, offset, limit, headers_and_trailers, headers_len);
) Writer.FileError!usize {
return passthruWriteFile(bw, file, offset, limit, headers_and_trailers, headers_len);
}
fn passthru_writeFile(
fn passthruWriteFile(
context: ?*anyopaque,
file: std.fs.File,
offset: Writer.Offset,
limit: Writer.Limit,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
) Writer.FileError!usize {
const bw: *BufferedWriter = @alignCast(@ptrCast(context));
const buffer = bw.buffer;
if (buffer.len == 0) return track(
@ -468,8 +491,8 @@ fn passthru_writeFile(
bw.end = 0;
return track(&bw.count, n - start_end);
}
// Have not made it past the headers yet; must call `writev`.
const n = try bw.unbuffered_writer.writev(buffers[0 .. buffers_len + 1]);
// Have not made it past the headers yet; must call `writeVec`.
const n = try bw.unbuffered_writer.writeVec(buffers[0 .. buffers_len + 1]);
if (n < end) {
@branchHint(.unlikely);
const remainder = buffer[n..end];
@ -505,7 +528,7 @@ pub const WriteFileOptions = struct {
/// size here will save one syscall.
limit: Writer.Limit = .unlimited,
/// Headers and trailers must be passed together so that in case `len` is
/// zero, they can be forwarded directly to `Writer.VTable.writev`.
/// zero, they can be forwarded directly to `Writer.VTable.writeVec`.
///
/// The parameter is mutable because this function needs to mutate the
/// fields in order to handle partial writes from `Writer.VTable.writeFile`.
@ -515,11 +538,11 @@ pub const WriteFileOptions = struct {
headers_len: usize = 0,
};
pub fn writeFileAll(bw: *BufferedWriter, file: std.fs.File, options: WriteFileOptions) anyerror!void {
pub fn writeFileAll(bw: *BufferedWriter, file: std.fs.File, options: WriteFileOptions) Writer.FileError!void {
const headers_and_trailers = options.headers_and_trailers;
const headers = headers_and_trailers[0..options.headers_len];
switch (options.limit) {
.nothing => return bw.writevAll(headers_and_trailers),
.nothing => return bw.writeVecAll(headers_and_trailers),
.unlimited => {
// When reading the whole file, we cannot include the trailers in the
// call that reads from the file handle, because we have no way to
@ -564,7 +587,7 @@ pub fn writeFileAll(bw: *BufferedWriter, file: std.fs.File, options: WriteFileOp
if (i >= headers_and_trailers.len) return;
}
headers_and_trailers[i] = headers_and_trailers[i][n..];
return bw.writevAll(headers_and_trailers[i..]);
return bw.writeVecAll(headers_and_trailers[i..]);
}
offset = offset.advance(n);
len -= n;
@ -579,7 +602,7 @@ pub fn alignBuffer(
width: usize,
alignment: std.fmt.Alignment,
fill: u8,
) anyerror!void {
) Writer.Error!void {
const padding = if (buffer.len < width) width - buffer.len else 0;
if (padding == 0) {
@branchHint(.likely);
@ -604,11 +627,11 @@ pub fn alignBuffer(
}
}
pub fn alignBufferOptions(bw: *BufferedWriter, buffer: []const u8, options: std.fmt.Options) anyerror!void {
pub fn alignBufferOptions(bw: *BufferedWriter, buffer: []const u8, options: std.fmt.Options) Writer.Error!void {
return bw.alignBuffer(buffer, options.width orelse buffer.len, options.alignment, options.fill);
}
pub fn printAddress(bw: *BufferedWriter, value: anytype) anyerror!void {
pub fn printAddress(bw: *BufferedWriter, value: anytype) Writer.Error!void {
const T = @TypeOf(value);
switch (@typeInfo(T)) {
.pointer => |info| {
@ -638,7 +661,7 @@ pub fn printValue(
options: std.fmt.Options,
value: anytype,
max_depth: usize,
) anyerror!void {
) Writer.Error!void {
const T = @TypeOf(value);
const actual_fmt = comptime if (std.mem.eql(u8, fmt, ANY))
defaultFormatString(T)
@ -791,7 +814,7 @@ pub fn printValue(
},
else => {
var buffers: [2][]const u8 = .{ @typeName(ptr_info.child), "@" };
try bw.writevAll(&buffers);
try bw.writeVecAll(&buffers);
try bw.printIntOptions(@intFromPtr(value), 16, .lower, options);
return;
},
@ -896,7 +919,7 @@ pub fn printInt(
comptime fmt: []const u8,
options: std.fmt.Options,
value: anytype,
) anyerror!void {
) Writer.Error!void {
const int_value = if (@TypeOf(value) == comptime_int) blk: {
const Int = std.math.IntFittingRange(value, value);
break :blk @as(Int, value);
@ -940,15 +963,15 @@ pub fn printInt(
comptime unreachable;
}
pub fn printAsciiChar(bw: *BufferedWriter, c: u8, options: std.fmt.Options) anyerror!void {
pub fn printAsciiChar(bw: *BufferedWriter, c: u8, options: std.fmt.Options) Writer.Error!void {
return bw.alignBufferOptions(@as(*const [1]u8, &c), options);
}
pub fn printAscii(bw: *BufferedWriter, bytes: []const u8, options: std.fmt.Options) anyerror!void {
pub fn printAscii(bw: *BufferedWriter, bytes: []const u8, options: std.fmt.Options) Writer.Error!void {
return bw.alignBufferOptions(bytes, options);
}
pub fn printUnicodeCodepoint(bw: *BufferedWriter, c: u21, options: std.fmt.Options) anyerror!void {
pub fn printUnicodeCodepoint(bw: *BufferedWriter, c: u21, options: std.fmt.Options) Writer.Error!void {
var buf: [4]u8 = undefined;
const len = try std.unicode.utf8Encode(c, &buf);
return bw.alignBufferOptions(buf[0..len], options);
@ -960,7 +983,7 @@ pub fn printIntOptions(
base: u8,
case: std.fmt.Case,
options: std.fmt.Options,
) anyerror!void {
) Writer.Error!void {
assert(base >= 2);
const int_value = if (@TypeOf(value) == comptime_int) blk: {
@ -1027,7 +1050,7 @@ pub fn printFloat(
comptime fmt: []const u8,
options: std.fmt.Options,
value: anytype,
) anyerror!void {
) Writer.Error!void {
var buf: [std.fmt.float.bufferSize(.decimal, f64)]u8 = undefined;
if (fmt.len > 1) invalidFmtError(fmt, value);
@ -1054,7 +1077,7 @@ pub fn printFloat(
}
}
pub fn printFloatHexadecimal(bw: *BufferedWriter, value: anytype, opt_precision: ?usize) anyerror!void {
pub fn printFloatHexadecimal(bw: *BufferedWriter, value: anytype, opt_precision: ?usize) Writer.Error!void {
if (std.math.signbit(value)) try bw.writeByte('-');
if (std.math.isNan(value)) return bw.writeAll("nan");
if (std.math.isInf(value)) return bw.writeAll("inf");
@ -1168,7 +1191,7 @@ pub fn printByteSize(
value: u64,
comptime units: ByteSizeUnits,
options: std.fmt.Options,
) anyerror!void {
) Writer.Error!void {
if (value == 0) return bw.alignBufferOptions("0B", options);
// The worst case in terms of space needed is 32 bytes + 3 for the suffix.
var buf: [std.fmt.float.min_buffer_size + 3]u8 = undefined;
@ -1248,12 +1271,12 @@ pub fn invalidFmtError(comptime fmt: []const u8, value: anytype) noreturn {
@compileError("invalid format string '" ++ fmt ++ "' for type '" ++ @typeName(@TypeOf(value)) ++ "'");
}
pub fn printDurationSigned(bw: *BufferedWriter, ns: i64) anyerror!void {
pub fn printDurationSigned(bw: *BufferedWriter, ns: i64) Writer.Error!void {
if (ns < 0) try bw.writeByte('-');
return bw.printDurationUnsigned(@abs(ns));
}
pub fn printDurationUnsigned(bw: *BufferedWriter, ns: u64) anyerror!void {
pub fn printDurationUnsigned(bw: *BufferedWriter, ns: u64) Writer.Error!void {
var ns_remaining = ns;
inline for (.{
.{ .ns = 365 * std.time.ns_per_day, .sep = 'y' },
@ -1303,7 +1326,7 @@ pub fn printDurationUnsigned(bw: *BufferedWriter, ns: u64) anyerror!void {
/// Writes number of nanoseconds according to its signed magnitude:
/// `[#y][#w][#d][#h][#m]#[.###][n|u|m]s`
/// `nanoseconds` must be an integer that coerces into `u64` or `i64`.
pub fn printDuration(bw: *BufferedWriter, nanoseconds: anytype, options: std.fmt.Options) anyerror!void {
pub fn printDuration(bw: *BufferedWriter, nanoseconds: anytype, options: std.fmt.Options) Writer.Error!void {
// worst case: "-XXXyXXwXXdXXhXXmXX.XXXs".len = 24
var buf: [24]u8 = undefined;
var sub_bw: BufferedWriter = undefined;
@ -1315,7 +1338,7 @@ pub fn printDuration(bw: *BufferedWriter, nanoseconds: anytype, options: std.fmt
return bw.alignBufferOptions(sub_bw.getWritten(), options);
}
pub fn printHex(bw: *BufferedWriter, bytes: []const u8, case: std.fmt.Case) anyerror!void {
pub fn printHex(bw: *BufferedWriter, bytes: []const u8, case: std.fmt.Case) Writer.Error!void {
const charset = switch (case) {
.upper => "0123456789ABCDEF",
.lower => "0123456789abcdef",
@ -1326,7 +1349,7 @@ pub fn printHex(bw: *BufferedWriter, bytes: []const u8, case: std.fmt.Case) anye
}
}
pub fn printBase64(bw: *BufferedWriter, bytes: []const u8) anyerror!void {
pub fn printBase64(bw: *BufferedWriter, bytes: []const u8) Writer.Error!void {
var chunker = std.mem.window(u8, bytes, 3, 3);
var temp: [5]u8 = undefined;
while (chunker.next()) |chunk| {
@ -1335,7 +1358,7 @@ pub fn printBase64(bw: *BufferedWriter, bytes: []const u8) anyerror!void {
}
/// Write a single unsigned integer as LEB128 to the given writer.
pub fn writeUleb128(bw: *BufferedWriter, value: anytype) anyerror!void {
pub fn writeUleb128(bw: *BufferedWriter, value: anytype) Writer.Error!void {
try bw.writeLeb128(switch (@typeInfo(@TypeOf(value))) {
.comptime_int => @as(std.math.IntFittingRange(0, @abs(value)), value),
.int => |value_info| switch (value_info.signedness) {
@ -1347,7 +1370,7 @@ pub fn writeUleb128(bw: *BufferedWriter, value: anytype) anyerror!void {
}
/// Write a single signed integer as LEB128 to the given writer.
pub fn writeSleb128(bw: *BufferedWriter, value: anytype) anyerror!void {
pub fn writeSleb128(bw: *BufferedWriter, value: anytype) Writer.Error!void {
try bw.writeLeb128(switch (@typeInfo(@TypeOf(value))) {
.comptime_int => @as(std.math.IntFittingRange(@min(value, -1), @max(0, value)), value),
.int => |value_info| switch (value_info.signedness) {
@ -1359,7 +1382,7 @@ pub fn writeSleb128(bw: *BufferedWriter, value: anytype) anyerror!void {
}
/// Write a single integer as LEB128 to the given writer.
pub fn writeLeb128(bw: *BufferedWriter, value: anytype) anyerror!void {
pub fn writeLeb128(bw: *BufferedWriter, value: anytype) Writer.Error!void {
const value_info = @typeInfo(@TypeOf(value)).int;
try bw.writeMultipleOf7Leb128(@as(@Type(.{ .int = .{
.signedness = value_info.signedness,
@ -1367,7 +1390,7 @@ pub fn writeLeb128(bw: *BufferedWriter, value: anytype) anyerror!void {
} }), value));
}
fn writeMultipleOf7Leb128(bw: *BufferedWriter, value: anytype) anyerror!void {
fn writeMultipleOf7Leb128(bw: *BufferedWriter, value: anytype) Writer.Error!void {
const value_info = @typeInfo(@TypeOf(value)).int;
comptime assert(value_info.bits % 7 == 0);
var remaining = value;
@ -1409,7 +1432,7 @@ test "formatValue max_depth" {
comptime fmt: []const u8,
options: std.fmt.Options,
bw: *BufferedWriter,
) anyerror!void {
) Writer.Error!void {
_ = options;
if (fmt.len == 0) {
return bw.print("({d:.3},{d:.3})", .{ self.x, self.y });

View File

@ -1,64 +0,0 @@
const std = @import("../std.zig");
const PositionalReader = @This();
const assert = std.debug.assert;
context: ?*anyopaque,
vtable: *const VTable,
pub const VTable = struct {
/// Writes bytes starting from `offset` to `bw`.
///
/// Returns the number of bytes written, which will be at minimum `0` and
/// at most `limit`. The number of bytes written, including zero, does not
/// indicate end of stream.
///
/// If the resource represented by the reader has an internal seek
/// position, it is not mutated.
///
/// The implementation should do a maximum of one underlying read call.
///
/// If `error.Unseekable` is returned, the resource cannot be used via a
/// positional reading interface.
read: *const fn (ctx: ?*anyopaque, bw: *std.io.BufferedWriter, limit: Limit, offset: u64) anyerror!Status,
/// Writes bytes starting from `offset` to `data`.
///
/// Returns the number of bytes written, which will be at minimum `0` and
/// at most `limit`. The number of bytes written, including zero, does not
/// indicate end of stream.
///
/// If the resource represented by the reader has an internal seek
/// position, it is not mutated.
///
/// The implementation should do a maximum of one underlying read call.
///
/// If `error.Unseekable` is returned, the resource cannot be used via a
/// positional reading interface.
readv: *const fn (ctx: ?*anyopaque, data: []const []u8, offset: u64) anyerror!Status,
};
pub const Len = std.io.Reader.Len;
pub const Status = std.io.Reader.Status;
pub const Limit = std.io.Reader.Limit;
pub fn read(pr: PositionalReader, bw: *std.io.BufferedWriter, limit: Limit, offset: u64) anyerror!Status {
return pr.vtable.read(pr.context, bw, limit, offset);
}
pub fn readv(pr: PositionalReader, data: []const []u8, offset: u64) anyerror!Status {
return pr.vtable.read(pr.context, data, offset);
}
/// Returns total number of bytes written to `w`.
///
/// May return `error.Unseekable`, indicating this function cannot be used to
/// read from the reader.
pub fn readAll(pr: PositionalReader, w: *std.io.BufferedWriter, start_offset: u64) anyerror!usize {
const readFn = pr.vtable.read;
var offset: u64 = start_offset;
while (true) {
const status = try readFn(pr.context, w, .none, offset);
offset += status.len;
if (status.end) return @intCast(offset - start_offset);
}
}

View File

@ -1,6 +1,7 @@
const std = @import("../std.zig");
const Reader = @This();
const assert = std.debug.assert;
const BufferedWriter = std.io.BufferedWriter;
context: ?*anyopaque,
vtable: *const VTable,
@ -16,35 +17,54 @@ pub const VTable = struct {
/// accordance with the number of bytes return from this function.
///
/// The implementation should do a maximum of one underlying read call.
///
/// If `error.Unstreamable` is returned, the resource cannot be used via a
/// streaming reading interface.
read: *const fn (ctx: ?*anyopaque, bw: *std.io.BufferedWriter, limit: Limit) anyerror!Status,
read: *const fn (context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) RwError!usize,
/// Writes bytes from the internally tracked stream position to `data`.
///
/// Returns the number of bytes written, which will be at minimum `0` and at
/// most `limit`. The number of bytes read, including zero, does not
/// Returns the number of bytes written, which will be at minimum `0` and
/// at most the sum of each data slice length. The number of bytes read,
/// including zero, does not indicate end of stream.
///
/// If the reader has an internal seek position, it moves forward in
/// accordance with the number of bytes return from this function.
///
/// The implementation should do a maximum of one underlying read call.
readVec: *const fn (context: ?*anyopaque, data: []const []u8) Error!usize,
/// Consumes bytes from the internally tracked stream position without
/// providing access to them.
///
/// Returns the number of bytes discarded, which will be at minimum `0` and
/// at most `limit`. The number of bytes returned, including zero, does not
/// indicate end of stream.
///
/// If the reader has an internal seek position, it moves forward in
/// accordance with the number of bytes return from this function.
///
/// The implementation should do a maximum of one underlying read call.
///
/// If `error.Unstreamable` is returned, the resource cannot be used via a
/// streaming reading interface.
readv: *const fn (ctx: ?*anyopaque, data: []const []u8) anyerror!Status,
discard: *const fn (context: ?*anyopaque, limit: Limit) Error!usize,
};
pub const Len = @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(usize) - 1 } });
pub const RwError = RwAllError || error{
/// End of stream indicated from the `Reader`. This error cannot originate
/// from the `Writer`.
EndOfStream,
};
pub const Status = packed struct(usize) {
/// Number of bytes that were transferred. Zero does not mean end of
/// stream.
len: Len = 0,
/// Indicates end of stream.
end: bool = false,
pub const Error = ShortError || error{
EndOfStream,
};
/// For functions that handle end of stream as a success case.
pub const RwAllError = ShortError || error{
/// See the `Writer` implementation for detailed diagnostics.
WriteFailed,
};
/// For functions that cannot fail with `error.EndOfStream`.
pub const ShortError = error{
/// See the `Reader` implementation for detailed diagnostics.
ReadFailed,
};
pub const Limit = enum(usize) {
@ -93,50 +113,122 @@ pub const Limit = enum(usize) {
}
};
pub fn read(r: Reader, w: *std.io.BufferedWriter, limit: Limit) anyerror!Status {
return r.vtable.read(r.context, w, limit);
pub fn read(r: Reader, bw: *BufferedWriter, limit: Limit) RwError!usize {
return r.vtable.read(r.context, bw, limit);
}
pub fn readv(r: Reader, data: []const []u8) anyerror!Status {
return r.vtable.readv(r.context, data);
pub fn readVec(r: Reader, data: []const []u8) Error!usize {
return r.vtable.readVec(r.context, data);
}
/// Returns total number of bytes written to `w`.
pub fn readAll(r: Reader, w: *std.io.BufferedWriter) anyerror!usize {
pub fn discard(r: Reader, limit: Limit) Error!usize {
return r.vtable.discard(r.context, limit);
}
/// Returns total number of bytes written to `bw`.
pub fn readAll(r: Reader, bw: *BufferedWriter) RwAllError!usize {
const readFn = r.vtable.read;
var offset: usize = 0;
while (true) {
const status = try readFn(r.context, w, .unlimited);
offset += status.len;
if (status.end) return offset;
offset += readFn(r.context, bw, .unlimited) catch |err| switch (err) {
error.EndOfStream => return offset,
else => |e| return e,
};
}
}
/// Consumes the stream until the end, ignoring all the data, returning the
/// number of bytes discarded.
pub fn discardRemaining(r: Reader) ShortError!usize {
const discardFn = r.vtable.discard;
var offset: usize = 0;
while (true) {
offset += discardFn(r.context, .unlimited) catch |err| switch (err) {
error.EndOfStream => return offset,
else => |e| return e,
};
}
}
pub const ReadAllocError = std.mem.Allocator.Error || ShortError;
/// Allocates enough memory to hold all the contents of the stream. If the allocated
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
///
/// Caller owns returned memory.
///
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readAlloc(r: Reader, gpa: std.mem.Allocator, max_size: usize) anyerror![]u8 {
pub fn readAlloc(r: Reader, gpa: std.mem.Allocator, max_size: usize) ReadAllocError![]u8 {
const readFn = r.vtable.read;
var aw: std.io.AllocatingWriter = undefined;
errdefer aw.deinit();
aw.init(gpa);
var remaining = max_size;
while (remaining > 0) {
const status = try readFn(r.context, &aw.buffered_writer, .limited(remaining));
if (status.end) break;
remaining -= status.len;
const n = readFn(r.context, &aw.buffered_writer, .limited(remaining)) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
error.EndOfStream => break,
error.ReadFailed => return error.ReadFailed,
};
remaining -= n;
}
return aw.toOwnedSlice();
}
/// Reads the stream until the end, ignoring all the data.
/// Returns the number of bytes discarded.
pub fn discardUntilEnd(r: Reader) anyerror!usize {
var bw = std.io.Writer.null.unbuffered();
return r.readAll(&bw);
pub const failing: Reader = .{
.context = undefined,
.vtable = &.{
.read = failingRead,
.readVec = failingReadVec,
.discard = failingDiscard,
},
};
pub const ending: Reader = .{
.context = undefined,
.vtable = &.{
.read = endingRead,
.readVec = endingReadVec,
.discard = endingDiscard,
},
};
fn endingRead(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) RwError!usize {
_ = context;
_ = bw;
_ = limit;
return error.EndOfStream;
}
fn endingReadVec(context: ?*anyopaque, data: []const []u8) Error!usize {
_ = context;
_ = data;
return error.EndOfStream;
}
fn endingDiscard(context: ?*anyopaque, limit: Limit) Error!usize {
_ = context;
_ = limit;
return error.EndOfStream;
}
fn failingRead(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) RwError!usize {
_ = context;
_ = bw;
_ = limit;
return error.ReadFailed;
}
fn failingReadVec(context: ?*anyopaque, data: []const []u8) Error!usize {
_ = context;
_ = data;
return error.ReadFailed;
}
fn failingDiscard(context: ?*anyopaque, limit: Limit) Error!usize {
_ = context;
_ = limit;
return error.ReadFailed;
}
test "readAlloc when the backing reader provides one byte at a time" {
@ -144,7 +236,7 @@ test "readAlloc when the backing reader provides one byte at a time" {
str: []const u8,
curr: usize,
fn read(self: *@This(), dest: []u8) anyerror!usize {
fn read(self: *@This(), dest: []u8) usize {
if (self.str.len <= self.curr or dest.len == 0)
return 0;

View File

@ -2,6 +2,8 @@ const std = @import("../std.zig");
const assert = std.debug.assert;
const Writer = @This();
pub const Null = @import("Writer/Null.zig");
context: ?*anyopaque,
vtable: *const VTable,
@ -16,8 +18,8 @@ pub const VTable = struct {
///
/// Number of bytes returned may be zero, which does not mean
/// end-of-stream. A subsequent call may return nonzero, or may signal end
/// of stream via an error.
writeSplat: *const fn (ctx: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize,
/// of stream via `error.WriteFailed`.
writeSplat: *const fn (ctx: ?*anyopaque, data: []const []const u8, splat: usize) Error!usize,
/// Writes contents from an open file. `headers` are written first, then `len`
/// bytes of `file` starting from `offset`, then `trailers`.
@ -27,7 +29,7 @@ pub const VTable = struct {
///
/// Number of bytes returned may be zero, which does not mean
/// end-of-stream. A subsequent call may return nonzero, or may signal end
/// of stream via an error.
/// of stream via `error.WriteFailed`.
writeFile: *const fn (
ctx: ?*anyopaque,
file: std.fs.File,
@ -37,12 +39,19 @@ pub const VTable = struct {
/// Maximum amount of bytes to read from the file.
limit: Limit,
/// Headers and trailers must be passed together so that in case `len` is
/// zero, they can be forwarded directly to `VTable.writev`.
/// zero, they can be forwarded directly to `VTable.writeVec`.
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize,
) FileError!usize,
};
pub const Error = error{
/// See the `Writer` implementation for detailed diagnostics.
WriteFailed,
};
pub const FileError = Error || std.fs.File.PReadError;
pub const Limit = std.io.Reader.Limit;
pub const Offset = enum(u64) {
@ -69,11 +78,11 @@ pub const Offset = enum(u64) {
}
};
pub fn writev(w: Writer, data: []const []const u8) anyerror!usize {
pub fn writeVec(w: Writer, data: []const []const u8) Error!usize {
return w.vtable.writeSplat(w.context, data, 1);
}
pub fn writeSplat(w: Writer, data: []const []const u8, splat: usize) anyerror!usize {
pub fn writeSplat(w: Writer, data: []const []const u8, splat: usize) Error!usize {
return w.vtable.writeSplat(w.context, data, splat);
}
@ -84,27 +93,10 @@ pub fn writeFile(
limit: Limit,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
) FileError!usize {
return w.vtable.writeFile(w.context, file, offset, limit, headers_and_trailers, headers_len);
}
pub fn unimplemented_writeFile(
context: ?*anyopaque,
file: std.fs.File,
offset: Offset,
limit: Limit,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
_ = context;
_ = file;
_ = offset;
_ = limit;
_ = headers_and_trailers;
_ = headers_len;
return error.Unimplemented;
}
pub fn buffered(w: Writer, buffer: []u8) std.io.BufferedWriter {
return .{
.buffer = buffer,
@ -116,52 +108,38 @@ pub fn unbuffered(w: Writer) std.io.BufferedWriter {
return w.buffered(&.{});
}
/// A `Writer` that discards all data.
pub const @"null": Writer = .{
pub fn failingWriteSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) Error!usize {
_ = context;
_ = data;
_ = splat;
return error.WriteFailed;
}
pub fn failingWriteFile(
context: ?*anyopaque,
file: std.fs.File,
offset: std.io.Writer.Offset,
limit: std.io.Writer.Limit,
headers_and_trailers: []const []const u8,
headers_len: usize,
) Error!usize {
_ = context;
_ = file;
_ = offset;
_ = limit;
_ = headers_and_trailers;
_ = headers_len;
return error.WriteFailed;
}
pub const failing: Writer = .{
.context = undefined,
.vtable = &.{
.writeSplat = null_writeSplat,
.writeFile = null_writeFile,
.writeSplat = failingWriteSplat,
.writeFile = failingWriteFile,
},
};
fn null_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
_ = context;
const headers = data[0 .. data.len - 1];
const pattern = data[headers.len..];
var written: usize = pattern.len * splat;
for (headers) |bytes| written += bytes.len;
return written;
}
fn null_writeFile(
context: ?*anyopaque,
file: std.fs.File,
offset: Offset,
limit: Limit,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
_ = context;
var n: usize = 0;
if (offset == .none) {
@panic("TODO seek the file forwards");
}
const limit_int = limit.toInt() orelse {
const headers = headers_and_trailers[0..headers_len];
for (headers) |bytes| n += bytes.len;
if (offset.toInt()) |off| {
const stat = try file.stat();
n += stat.size - off;
for (headers_and_trailers[headers_len..]) |bytes| n += bytes.len;
return n;
}
@panic("TODO stream from file until eof, counting");
};
for (headers_and_trailers) |bytes| n += bytes.len;
return limit_int + n;
}
test @"null" {
try @"null".writeAll("yay");
test {
_ = Null;
}

View File

@ -0,0 +1,66 @@
//! A `Writer` that discards all data.
const std = @import("../../std.zig");
const Writer = std.io.Writer;
const NullWriter = @This();
err: Error,
pub const Error = std.fs.File.StatError;
pub fn writer(nw: *NullWriter) Writer {
return .{
.context = nw,
.vtable = &.{
.writeSplat = writeSplat,
.writeFile = writeFile,
},
};
}
fn writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) Writer.Error!usize {
_ = context;
const headers = data[0 .. data.len - 1];
const pattern = data[headers.len..];
var written: usize = pattern.len * splat;
for (headers) |bytes| written += bytes.len;
return written;
}
fn writeFile(
context: ?*anyopaque,
file: std.fs.File,
offset: Writer.Offset,
limit: Writer.Limit,
headers_and_trailers: []const []const u8,
headers_len: usize,
) Writer.Error!usize {
const nw: *NullWriter = @alignCast(@ptrCast(context));
var n: usize = 0;
if (offset == .none) {
@panic("TODO seek the file forwards");
}
const limit_int = limit.toInt() orelse {
const headers = headers_and_trailers[0..headers_len];
for (headers) |bytes| n += bytes.len;
if (offset.toInt()) |off| {
const stat = file.stat() catch |err| {
nw.err = err;
return error.WriteFailed;
};
n += stat.size - off;
for (headers_and_trailers[headers_len..]) |bytes| n += bytes.len;
return n;
}
@panic("TODO stream from file until eof, counting");
};
for (headers_and_trailers) |bytes| n += bytes.len;
return limit_int + n;
}
test "writing a small string" {
var nw: NullWriter = undefined;
var bw = nw.writer().unbuffered();
try bw.writeAll("yay");
}

View File

@ -71,7 +71,9 @@ pub const Config = union(enum) {
reset_attributes: u16,
};
pub fn setColor(conf: Config, bw: *std.io.BufferedWriter, color: Color) anyerror!void {
pub const SetColorError = std.os.windows.SetConsoleTextAttributeError || std.io.Writer.Error;
pub fn setColor(conf: Config, bw: *std.io.BufferedWriter, color: Color) SetColorError!void {
nosuspend switch (conf) {
.no_color => return,
.escape_codes => {

View File

@ -77,7 +77,9 @@ const safety_checks: @TypeOf(safety_checks_hint) = if (build_mode_has_safety)
else
.assumed_correct;
pub fn beginArray(self: *Stringify) anyerror!void {
pub const Error = std.io.Writer.Error;
pub fn beginArray(self: *Stringify) Error!void {
if (build_mode_has_safety) assert(self.raw_streaming_mode == .none);
try self.valueStart();
try self.writer.writeByte('[');
@ -85,7 +87,7 @@ pub fn beginArray(self: *Stringify) anyerror!void {
self.next_punctuation = .none;
}
pub fn beginObject(self: *Stringify) anyerror!void {
pub fn beginObject(self: *Stringify) Error!void {
if (build_mode_has_safety) assert(self.raw_streaming_mode == .none);
try self.valueStart();
try self.writer.writeByte('{');
@ -93,7 +95,7 @@ pub fn beginObject(self: *Stringify) anyerror!void {
self.next_punctuation = .none;
}
pub fn endArray(self: *Stringify) anyerror!void {
pub fn endArray(self: *Stringify) Error!void {
if (build_mode_has_safety) assert(self.raw_streaming_mode == .none);
self.popIndentation(.array);
switch (self.next_punctuation) {
@ -107,7 +109,7 @@ pub fn endArray(self: *Stringify) anyerror!void {
self.valueDone();
}
pub fn endObject(self: *Stringify) anyerror!void {
pub fn endObject(self: *Stringify) Error!void {
if (build_mode_has_safety) assert(self.raw_streaming_mode == .none);
self.popIndentation(.object);
switch (self.next_punctuation) {
@ -213,7 +215,7 @@ fn isComplete(self: *const Stringify) bool {
/// assuming the resulting formatted string represents a single complete value;
/// e.g. `"1"`, `"[]"`, `"[1,2]"`, not `"1,2"`.
/// This function may be useful for doing your own number formatting.
pub fn print(self: *Stringify, comptime fmt: []const u8, args: anytype) anyerror!void {
pub fn print(self: *Stringify, comptime fmt: []const u8, args: anytype) Error!void {
if (build_mode_has_safety) assert(self.raw_streaming_mode == .none);
try self.valueStart();
try self.writer.print(fmt, args);
@ -274,7 +276,7 @@ pub fn endWriteRaw(self: *Stringify) void {
/// `key` is the string content of the property name.
/// Surrounding quotes will be added and any special characters will be escaped.
/// See also `objectFieldRaw`.
pub fn objectField(self: *Stringify, key: []const u8) anyerror!void {
pub fn objectField(self: *Stringify, key: []const u8) Error!void {
if (build_mode_has_safety) assert(self.raw_streaming_mode == .none);
try self.objectFieldStart();
try encodeJsonString(key, self.options, self.writer);
@ -284,7 +286,7 @@ pub fn objectField(self: *Stringify, key: []const u8) anyerror!void {
/// `quoted_key` is the complete bytes of the key including quotes and any necessary escape sequences.
/// A few assertions are performed on the given value to ensure that the caller of this function understands the API contract.
/// See also `objectField`.
pub fn objectFieldRaw(self: *Stringify, quoted_key: []const u8) anyerror!void {
pub fn objectFieldRaw(self: *Stringify, quoted_key: []const u8) Error!void {
if (build_mode_has_safety) assert(self.raw_streaming_mode == .none);
assert(quoted_key.len >= 2 and quoted_key[0] == '"' and quoted_key[quoted_key.len - 1] == '"'); // quoted_key should be "quoted".
try self.objectFieldStart();
@ -343,7 +345,7 @@ pub fn endObjectFieldRaw(self: *Stringify) void {
///
/// See also alternative functions `print` and `beginWriteRaw`.
/// For writing object field names, use `objectField` instead.
pub fn write(self: *Stringify, v: anytype) anyerror!void {
pub fn write(self: *Stringify, v: anytype) Error!void {
if (build_mode_has_safety) assert(self.raw_streaming_mode == .none);
const T = @TypeOf(v);
switch (@typeInfo(T)) {
@ -568,7 +570,7 @@ pub const Options = struct {
/// Writes the given value to the `std.io.Writer` writer.
/// See `Stringify` for how the given value is serialized into JSON.
/// The maximum nesting depth of the output JSON document is 256.
pub fn value(v: anytype, options: Options, writer: *std.io.BufferedWriter) anyerror!void {
pub fn value(v: anytype, options: Options, writer: *std.io.BufferedWriter) Error!void {
var s: Stringify = .{ .writer = writer, .options = options };
try s.write(v);
}
@ -632,7 +634,7 @@ test valueAlloc {
try std.testing.expectEqualStrings(expected, actual);
}
fn outputUnicodeEscape(codepoint: u21, bw: *std.io.BufferedWriter) anyerror!void {
fn outputUnicodeEscape(codepoint: u21, bw: *std.io.BufferedWriter) Error!void {
if (codepoint <= 0xFFFF) {
// If the character is in the Basic Multilingual Plane (U+0000 through U+FFFF),
// then it may be represented as a six-character sequence: a reverse solidus, followed
@ -652,7 +654,7 @@ fn outputUnicodeEscape(codepoint: u21, bw: *std.io.BufferedWriter) anyerror!void
}
}
fn outputSpecialEscape(c: u8, writer: *std.io.BufferedWriter) anyerror!void {
fn outputSpecialEscape(c: u8, writer: *std.io.BufferedWriter) Error!void {
switch (c) {
'\\' => try writer.writeAll("\\\\"),
'\"' => try writer.writeAll("\\\""),
@ -666,14 +668,14 @@ fn outputSpecialEscape(c: u8, writer: *std.io.BufferedWriter) anyerror!void {
}
/// Write `string` to `writer` as a JSON encoded string.
pub fn encodeJsonString(string: []const u8, options: Options, writer: *std.io.BufferedWriter) anyerror!void {
pub fn encodeJsonString(string: []const u8, options: Options, writer: *std.io.BufferedWriter) Error!void {
try writer.writeByte('\"');
try encodeJsonStringChars(string, options, writer);
try writer.writeByte('\"');
}
/// Write `chars` to `writer` as JSON encoded string characters.
pub fn encodeJsonStringChars(chars: []const u8, options: Options, writer: *std.io.BufferedWriter) anyerror!void {
pub fn encodeJsonStringChars(chars: []const u8, options: Options, writer: *std.io.BufferedWriter) Error!void {
var write_cursor: usize = 0;
var i: usize = 0;
if (options.escape_unicode) {
@ -722,7 +724,7 @@ test "json write stream" {
try testBasicWriteStream(&w);
}
fn testBasicWriteStream(w: *Stringify) anyerror!void {
fn testBasicWriteStream(w: *Stringify) Error!void {
w.writer.reset();
try w.beginObject();

View File

@ -51,10 +51,10 @@ pub const Value = union(enum) {
}
pub fn dump(v: Value) void {
var bw = std.debug.lockStdErr2(&.{});
defer std.debug.unlockStdErr();
const bw = std.debug.lockStderrWriter(&.{});
defer std.debug.unlockStderrWriter();
json.Stringify.value(v, .{}, &bw) catch return;
json.Stringify.value(v, .{}, bw) catch return;
}
pub fn jsonStringify(value: @This(), jws: anytype) !void {

View File

@ -149,12 +149,9 @@ pub fn defaultLog(
const level_txt = comptime message_level.asText();
const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): ";
var buffer: [1024]u8 = undefined;
var bw: std.io.BufferedWriter = std.debug.lockStdErr2(&buffer);
defer std.debug.unlockStdErr();
nosuspend {
bw.print(level_txt ++ prefix2 ++ format ++ "\n", args) catch return;
bw.flush() catch return;
}
const bw = std.debug.lockStderrWriter(&buffer);
defer std.debug.unlockStderrWriter();
bw.print(level_txt ++ prefix2 ++ format ++ "\n", args) catch return;
}
/// Returns a scoped logging namespace that logs all messages using the scope

View File

@ -2322,7 +2322,7 @@ pub const Const = struct {
/// this function will fail to print the string, printing "(BigInt)" instead of a number.
/// This is because the rendering algorithm requires reversing a string, which requires O(N) memory.
/// See `toString` and `toStringAlloc` for a way to print big integers without failure.
pub fn format(self: Const, bw: *std.io.BufferedWriter, comptime fmt: []const u8) anyerror!void {
pub fn format(self: Const, bw: *std.io.BufferedWriter, comptime fmt: []const u8) !void {
comptime var base = 10;
comptime var case: std.fmt.Case = .lower;

View File

@ -850,8 +850,8 @@ pub fn tcpConnectToAddress(address: Address) TcpConnectToAddressError!Stream {
}
// TODO: Instead of having a massive error set, make the error set have categories, and then
// store the sub-error as a diagnostic anyerror value.
const GetAddressListError = std.mem.Allocator.Error || std.fs.File.OpenError || anyerror || posix.SocketError || posix.BindError || posix.SetSockOptError || error{
// store the sub-error as a diagnostic value.
const GetAddressListError = std.mem.Allocator.Error || std.fs.File.OpenError || posix.SocketError || posix.BindError || posix.SetSockOptError || error{
TemporaryNameServerFailure,
NameServerFailure,
AddressFamilyNotSupported,
@ -1873,14 +1873,14 @@ pub const Stream = struct {
context: ?*anyopaque,
bw: *std.io.BufferedWriter,
limit: std.io.Reader.Limit,
) anyerror!std.io.Reader.Status {
) std.io.Reader.Error!usize {
const buf = limit.slice(try bw.writableSlice(1));
const status = try windows_readv(context, &.{buf});
bw.advance(status.len);
return status;
}
fn windows_readv(context: ?*anyopaque, data: []const []u8) anyerror!std.io.Reader.Status {
fn windows_readv(context: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
var iovecs: [max_buffers_len]windows.WSABUF = undefined;
var iovecs_i: usize = 0;
for (data) |d| {
@ -1915,7 +1915,7 @@ pub const Stream = struct {
return .{ .len = n, .end = n == 0 };
}
fn windows_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
fn windows_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
comptime assert(native_os == .windows);
if (data.len == 1 and splat == 0) return 0;
var splat_buffer: [256]u8 = undefined;
@ -1974,7 +1974,7 @@ pub const Stream = struct {
return n;
}
fn posix_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
fn posix_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
const sock_fd = opaqueToHandle(context);
comptime assert(native_os != .windows);
var splat_buffer: [256]u8 = undefined;
@ -2028,7 +2028,7 @@ pub const Stream = struct {
in_len: std.io.Writer.FileLen,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
) std.io.Writer.FileError!usize {
const len_int = switch (in_len) {
.zero => return windows_writeSplat(context, headers_and_trailers, 1),
.entire_file => std.math.maxInt(usize),

View File

@ -603,7 +603,7 @@ fn PaxIterator(comptime ReaderType: type) type {
return null;
}
fn readUntil(self: *Self, delimiter: u8) anyerror![]const u8 {
fn readUntil(self: *Self, delimiter: u8) ![]const u8 {
var fbs: std.io.BufferedWriter = undefined;
fbs.initFixed(&self.scratch);
try self.reader.streamUntilDelimiter(&fbs, delimiter, null);

View File

@ -390,8 +390,8 @@ pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const
const actual_window = actual[window_start..@min(actual.len, window_start + max_window_size)];
const actual_truncated = window_start + actual_window.len < actual.len;
var bw = std.debug.lockStdErr2(&.{});
defer std.debug.unlockStdErr();
const bw = std.debug.lockStderrWriter(&.{});
defer std.debug.unlockStderrWriter();
const ttyconf = std.io.tty.detectConfig(.stderr());
var differ = if (T == u8) BytesDiffer{
.expected = expected_window,
@ -416,7 +416,7 @@ pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const
print("... truncated ...\n", .{});
}
}
differ.write(&bw) catch {};
differ.write(bw) catch {};
if (expected_truncated) {
const end_offset = window_start + expected_window.len;
const num_missing_items = expected.len - (window_start + expected_window.len);
@ -438,7 +438,7 @@ pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const
print("... truncated ...\n", .{});
}
}
differ.write(&bw) catch {};
differ.write(bw) catch {};
if (actual_truncated) {
const end_offset = window_start + actual_window.len;
const num_missing_items = actual.len - (window_start + actual_window.len);

View File

@ -207,7 +207,7 @@ pub fn renderAlloc(tree: Ast, gpa: Allocator) RenderError![]u8 {
return aw.toOwnedSlice();
}
pub fn render(tree: Ast, gpa: Allocator, bw: *std.io.BufferedWriter, fixups: Fixups) anyerror!void {
pub fn render(tree: Ast, gpa: Allocator, bw: *std.io.BufferedWriter, fixups: Fixups) RenderError!void {
return @import("./render.zig").renderTree(gpa, bw, tree, fixups);
}
@ -315,7 +315,7 @@ pub fn rootDecls(tree: Ast) []const Node.Index {
}
}
pub fn renderError(tree: Ast, parse_error: Error, bw: *std.io.BufferedWriter) anyerror!void {
pub fn renderError(tree: Ast, parse_error: Error, bw: *std.io.BufferedWriter) std.io.Writer.Error!void {
switch (parse_error.tag) {
.asterisk_after_ptr_deref => {
// Note that the token will point at the `.*` but ideally the source

View File

@ -158,13 +158,12 @@ pub const RenderOptions = struct {
pub fn renderToStdErr(eb: ErrorBundle, options: RenderOptions) void {
var buffer: [256]u8 = undefined;
var bw = std.debug.lockStdErr2(&buffer);
defer std.debug.unlockStdErr();
renderToWriter(eb, options, &bw) catch return;
bw.flush() catch return;
const bw = std.debug.lockStderrWriter(&buffer);
defer std.debug.unlockStderrWriter();
renderToWriter(eb, options, bw) catch return;
}
pub fn renderToWriter(eb: ErrorBundle, options: RenderOptions, bw: *std.io.BufferedWriter) anyerror!void {
pub fn renderToWriter(eb: ErrorBundle, options: RenderOptions, bw: *std.io.BufferedWriter) std.io.Writer.Error!void {
if (eb.extra.len == 0) return;
for (eb.getMessages()) |err_msg| {
try renderErrorMessageToWriter(eb, options, err_msg, bw, "error", .red, 0);
@ -187,7 +186,7 @@ fn renderErrorMessageToWriter(
kind: []const u8,
color: std.io.tty.Color,
indent: usize,
) anyerror!void {
) std.io.Writer.Error!void {
const ttyconf = options.ttyconf;
const err_msg = eb.getErrorMessage(err_msg_index);
const prefix_start = bw.count;

View File

@ -1,6 +1,5 @@
in: std.fs.File,
out: std.fs.File,
receive_fifo: std.fifo.LinearFifo(u8, .Dynamic),
in: *std.io.BufferedReader,
out: *std.io.BufferedWriter,
pub const Message = struct {
pub const Header = extern struct {
@ -94,9 +93,8 @@ pub const Message = struct {
};
pub const Options = struct {
gpa: Allocator,
in: std.fs.File,
out: std.fs.File,
in: *std.io.BufferedReader,
out: *std.io.BufferedWriter,
zig_version: []const u8,
};
@ -104,96 +102,40 @@ pub fn init(options: Options) !Server {
var s: Server = .{
.in = options.in,
.out = options.out,
.receive_fifo = std.fifo.LinearFifo(u8, .Dynamic).init(options.gpa),
};
try s.serveStringMessage(.zig_version, options.zig_version);
return s;
}
pub fn deinit(s: *Server) void {
s.receive_fifo.deinit();
s.* = undefined;
}
pub fn receiveMessage(s: *Server) !InMessage.Header {
const Header = InMessage.Header;
const fifo = &s.receive_fifo;
var last_amt_zero = false;
while (true) {
const buf = fifo.readableSlice(0);
assert(fifo.readableLength() == buf.len);
if (buf.len >= @sizeOf(Header)) {
const header: *align(1) const Header = @ptrCast(buf[0..@sizeOf(Header)]);
const bytes_len = bswap(header.bytes_len);
const tag = bswap(header.tag);
if (buf.len - @sizeOf(Header) >= bytes_len) {
fifo.discard(@sizeOf(Header));
return .{
.tag = tag,
.bytes_len = bytes_len,
};
} else {
const needed = bytes_len - (buf.len - @sizeOf(Header));
const write_buffer = try fifo.writableWithSize(needed);
const amt = try s.in.read(write_buffer);
fifo.update(amt);
continue;
}
}
const write_buffer = try fifo.writableWithSize(256);
const amt = try s.in.read(write_buffer);
fifo.update(amt);
if (amt == 0) {
if (last_amt_zero) return error.BrokenPipe;
last_amt_zero = true;
}
}
return try s.in.takeStructEndian(InMessage.Header, .little);
}
pub fn receiveBody_u32(s: *Server) !u32 {
const fifo = &s.receive_fifo;
const buf = fifo.readableSlice(0);
const result = @as(*align(1) const u32, @ptrCast(buf[0..4])).*;
fifo.discard(4);
return bswap(result);
return s.in.takeInt(u32, .little);
}
pub fn serveStringMessage(s: *Server, tag: OutMessage.Tag, msg: []const u8) !void {
return s.serveMessage(.{
try s.serveMessageHeader(.{
.tag = tag,
.bytes_len = @as(u32, @intCast(msg.len)),
}, &.{msg});
.bytes_len = @intCast(msg.len),
});
try s.out.writeAll(msg);
try s.out.flush();
}
pub fn serveMessage(
s: *const Server,
header: OutMessage.Header,
bufs: []const []const u8,
) !void {
var iovecs: [10]std.posix.iovec_const = undefined;
const header_le = bswap(header);
iovecs[0] = .{
.base = @as([*]const u8, @ptrCast(&header_le)),
.len = @sizeOf(OutMessage.Header),
};
for (bufs, iovecs[1 .. bufs.len + 1]) |buf, *iovec| {
iovec.* = .{
.base = buf.ptr,
.len = buf.len,
};
}
try s.out.writevAll(iovecs[0 .. bufs.len + 1]);
/// Don't forget to flush!
pub fn serveMessageHeader(s: *const Server, header: OutMessage.Header) !void {
try s.out.writeStructEndian(header, .little);
}
pub fn serveU64Message(s: *Server, tag: OutMessage.Tag, int: u64) !void {
const msg_le = bswap(int);
return s.serveMessage(.{
pub fn serveU64Message(s: *const Server, tag: OutMessage.Tag, int: u64) !void {
try serveMessageHeader(s, .{
.tag = tag,
.bytes_len = @sizeOf(u64),
}, &.{std.mem.asBytes(&msg_le)});
});
try s.out.writeInt(u64, int, .little);
try s.out.flush();
}
pub fn serveEmitDigest(
@ -201,26 +143,22 @@ pub fn serveEmitDigest(
digest: *const [Cache.bin_digest_len]u8,
header: OutMessage.EmitDigest,
) !void {
try s.serveMessage(.{
try s.serveMessageHeader(.{
.tag = .emit_digest,
.bytes_len = @intCast(digest.len + @sizeOf(OutMessage.EmitDigest)),
}, &.{
std.mem.asBytes(&header),
digest,
});
try s.out.writeStructEndian(header, .little);
try s.out.writeAll(digest);
try s.out.flush();
}
pub fn serveTestResults(
s: *Server,
msg: OutMessage.TestResults,
) !void {
const msg_le = bswap(msg);
try s.serveMessage(.{
pub fn serveTestResults(s: *Server, msg: OutMessage.TestResults) !void {
try s.serveMessageHeader(.{
.tag = .test_results,
.bytes_len = @intCast(@sizeOf(OutMessage.TestResults)),
}, &.{
std.mem.asBytes(&msg_le),
});
try s.out.writeStructEndian(msg, .little);
try s.out.flush();
}
pub fn serveErrorBundle(s: *Server, error_bundle: std.zig.ErrorBundle) !void {
@ -230,81 +168,40 @@ pub fn serveErrorBundle(s: *Server, error_bundle: std.zig.ErrorBundle) !void {
};
const bytes_len = @sizeOf(OutMessage.ErrorBundle) +
4 * error_bundle.extra.len + error_bundle.string_bytes.len;
try s.serveMessage(.{
try s.serveMessageHeader(.{
.tag = .error_bundle,
.bytes_len = @intCast(bytes_len),
}, &.{
std.mem.asBytes(&eb_hdr),
// TODO: implement @ptrCast between slices changing the length
std.mem.sliceAsBytes(error_bundle.extra),
error_bundle.string_bytes,
});
try s.out.writeStructEndian(eb_hdr, .little);
try s.out.writeArrayEndian(u32, error_bundle.extra, .little);
try s.out.writeAll(error_bundle.string_bytes);
try s.out.flush();
}
pub const TestMetadata = struct {
names: []u32,
expected_panic_msgs: []u32,
names: []const u32,
expected_panic_msgs: []const u32,
string_bytes: []const u8,
};
pub fn serveTestMetadata(s: *Server, test_metadata: TestMetadata) !void {
const header: OutMessage.TestMetadata = .{
.tests_len = bswap(@as(u32, @intCast(test_metadata.names.len))),
.string_bytes_len = bswap(@as(u32, @intCast(test_metadata.string_bytes.len))),
.tests_len = @as(u32, @intCast(test_metadata.names.len)),
.string_bytes_len = @as(u32, @intCast(test_metadata.string_bytes.len)),
};
const trailing = 2;
const bytes_len = @sizeOf(OutMessage.TestMetadata) +
trailing * @sizeOf(u32) * test_metadata.names.len + test_metadata.string_bytes.len;
if (need_bswap) {
bswap_u32_array(test_metadata.names);
bswap_u32_array(test_metadata.expected_panic_msgs);
}
defer if (need_bswap) {
bswap_u32_array(test_metadata.names);
bswap_u32_array(test_metadata.expected_panic_msgs);
};
return s.serveMessage(.{
try s.serveMessageHeader(.{
.tag = .test_metadata,
.bytes_len = @intCast(bytes_len),
}, &.{
std.mem.asBytes(&header),
// TODO: implement @ptrCast between slices changing the length
std.mem.sliceAsBytes(test_metadata.names),
std.mem.sliceAsBytes(test_metadata.expected_panic_msgs),
test_metadata.string_bytes,
});
}
fn bswap(x: anytype) @TypeOf(x) {
if (!need_bswap) return x;
const T = @TypeOf(x);
switch (@typeInfo(T)) {
.@"enum" => return @as(T, @enumFromInt(@byteSwap(@intFromEnum(x)))),
.int => return @byteSwap(x),
.@"struct" => |info| switch (info.layout) {
.@"extern" => {
var result: T = undefined;
inline for (info.fields) |field| {
@field(result, field.name) = bswap(@field(x, field.name));
}
return result;
},
.@"packed" => {
const I = info.backing_integer.?;
return @as(T, @bitCast(@byteSwap(@as(I, @bitCast(x)))));
},
.auto => @compileError("auto layout struct"),
},
else => @compileError("bswap on type " ++ @typeName(T)),
}
}
fn bswap_u32_array(slice: []u32) void {
comptime assert(need_bswap);
for (slice) |*elem| elem.* = @byteSwap(elem.*);
try s.out.writeStructEndian(header, .little);
try s.out.writeArrayEndian(u32, test_metadata.names, .little);
try s.out.writeArrayEndian(u32, test_metadata.expected_panic_msgs, .little);
try s.out.writeAll(test_metadata.string_bytes);
try s.out.flush();
}
const OutMessage = std.zig.Server.Message;

View File

@ -520,7 +520,7 @@ pub fn parseStrLit(
tree: Ast,
node: Ast.Node.Index,
writer: *std.io.BufferedWriter,
) anyerror!std.zig.string_literal.Result {
) error{OutOfMemory}!std.zig.string_literal.Result {
switch (tree.nodeTag(node)) {
.string_literal => {
const token = tree.nodeMainToken(node);

View File

@ -170,7 +170,7 @@ pub fn next(bc: *BitcodeReader) !?Item {
}
}
pub fn skipBlock(bc: *BitcodeReader, block: Block) anyerror!void {
pub fn skipBlock(bc: *BitcodeReader, block: Block) !void {
assert(bc.bit_offset == 0);
try bc.br.discard(4 * @as(u34, block.len));
try bc.endBlock();
@ -369,12 +369,12 @@ fn align32Bits(bc: *BitcodeReader) void {
bc.bit_offset = 0;
}
fn read32Bits(bc: *BitcodeReader) anyerror!u32 {
fn read32Bits(bc: *BitcodeReader) !u32 {
assert(bc.bit_offset == 0);
return bc.br.takeInt(u32, .little);
}
fn readBytes(bc: *BitcodeReader, bytes: []u8) anyerror!void {
fn readBytes(bc: *BitcodeReader, bytes: []u8) !void {
assert(bc.bit_offset == 0);
try bc.br.read(bytes);

View File

@ -91,7 +91,7 @@ pub const String = enum(u32) {
string: String,
builder: *const Builder,
};
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) anyerror!void {
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) std.io.Writer.Error!void {
if (comptime std.mem.indexOfNone(u8, fmt_str, "\"r")) |_|
@compileError("invalid format string: '" ++ fmt_str ++ "'");
assert(data.string != .none);
@ -649,7 +649,7 @@ pub const Type = enum(u32) {
type: Type,
builder: *const Builder,
};
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) anyerror!void {
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) std.io.Writer.Error!void {
assert(data.type != .none);
if (comptime std.mem.eql(u8, fmt_str, "m")) {
const item = data.builder.type_items.items[@intFromEnum(data.type)];
@ -1129,7 +1129,7 @@ pub const Attribute = union(Kind) {
attribute_index: Index,
builder: *const Builder,
};
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) anyerror!void {
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) std.io.Writer.Error!void {
if (comptime std.mem.indexOfNone(u8, fmt_str, "\"#")) |_|
@compileError("invalid format string: '" ++ fmt_str ++ "'");
const attribute = data.attribute_index.toAttribute(data.builder);
@ -1568,7 +1568,7 @@ pub const Attributes = enum(u32) {
attributes: Attributes,
builder: *const Builder,
};
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) anyerror!void {
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) std.io.Writer.Error!void {
for (data.attributes.slice(data.builder)) |attribute_index| try Attribute.Index.format(.{
.attribute_index = attribute_index,
.builder = data.builder,
@ -1761,11 +1761,11 @@ pub const Linkage = enum(u4) {
extern_weak = 7,
external = 0,
pub fn format(self: Linkage, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
pub fn format(self: Linkage, bw: *std.io.BufferedWriter, comptime _: []const u8) std.io.Writer.Error!void {
if (self != .external) try bw.print(" {s}", .{@tagName(self)});
}
fn formatOptional(data: ?Linkage, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
fn formatOptional(data: ?Linkage, bw: *std.io.BufferedWriter, comptime _: []const u8) std.io.Writer.Error!void {
if (data) |linkage| try bw.print(" {s}", .{@tagName(linkage)});
}
pub fn fmtOptional(self: ?Linkage) std.fmt.Formatter(formatOptional) {
@ -1778,7 +1778,7 @@ pub const Preemption = enum {
dso_local,
implicit_dso_local,
pub fn format(self: Preemption, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
pub fn format(self: Preemption, bw: *std.io.BufferedWriter, comptime _: []const u8) std.io.Writer.Error!void {
if (self == .dso_local) try bw.print(" {s}", .{@tagName(self)});
}
};
@ -1799,8 +1799,8 @@ pub const Visibility = enum(u2) {
pub fn format(
self: Visibility,
comptime format_string: []const u8,
writer: anytype,
) @TypeOf(writer).Error!void {
writer: *std.io.BufferedWriter,
) std.io.Writer.Error!void {
comptime assert(format_string.len == 0);
if (self != .default) try writer.print(" {s}", .{@tagName(self)});
}
@ -1811,7 +1811,7 @@ pub const DllStorageClass = enum(u2) {
dllimport = 1,
dllexport = 2,
pub fn format(self: DllStorageClass, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
pub fn format(self: DllStorageClass, bw: *std.io.BufferedWriter, comptime _: []const u8) std.io.Writer.Error!void {
if (self != .default) try bw.print(" {s}", .{@tagName(self)});
}
};
@ -1823,7 +1823,7 @@ pub const ThreadLocal = enum(u3) {
initialexec = 3,
localexec = 4,
pub fn format(self: ThreadLocal, bw: *std.io.BufferedWriter, comptime prefix: []const u8) anyerror!void {
pub fn format(self: ThreadLocal, bw: *std.io.BufferedWriter, comptime prefix: []const u8) std.io.Writer.Error!void {
if (self == .default) return;
try bw.print("{s}thread_local", .{prefix});
if (self != .generaldynamic) try bw.print("({s})", .{@tagName(self)});
@ -1837,7 +1837,7 @@ pub const UnnamedAddr = enum(u2) {
unnamed_addr = 1,
local_unnamed_addr = 2,
pub fn format(self: UnnamedAddr, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
pub fn format(self: UnnamedAddr, bw: *std.io.BufferedWriter, comptime _: []const u8) std.io.Writer.Error!void {
if (self != .default) try bw.print(" {s}", .{@tagName(self)});
}
};
@ -1931,7 +1931,7 @@ pub const AddrSpace = enum(u24) {
pub const funcref: AddrSpace = @enumFromInt(20);
};
pub fn format(self: AddrSpace, bw: *std.io.BufferedWriter, comptime prefix: []const u8) anyerror!void {
pub fn format(self: AddrSpace, bw: *std.io.BufferedWriter, comptime prefix: []const u8) std.io.Writer.Error!void {
if (self != .default) try bw.print("{s}addrspace({d})", .{ prefix, @intFromEnum(self) });
}
};
@ -1940,7 +1940,7 @@ pub const ExternallyInitialized = enum {
default,
externally_initialized,
pub fn format(self: ExternallyInitialized, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
pub fn format(self: ExternallyInitialized, bw: *std.io.BufferedWriter, comptime _: []const u8) std.io.Writer.Error!void {
if (self != .default) try bw.print(" {s}", .{@tagName(self)});
}
};
@ -1964,7 +1964,7 @@ pub const Alignment = enum(u6) {
return if (self == .default) 0 else (@intFromEnum(self) + 1);
}
pub fn format(self: Alignment, bw: *std.io.BufferedWriter, comptime prefix: []const u8) anyerror!void {
pub fn format(self: Alignment, bw: *std.io.BufferedWriter, comptime prefix: []const u8) std.io.Writer.Error!void {
try bw.print("{s}align {d}", .{ prefix, self.toByteUnits() orelse return });
}
};
@ -2038,7 +2038,7 @@ pub const CallConv = enum(u10) {
pub const default = CallConv.ccc;
pub fn format(self: CallConv, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
pub fn format(self: CallConv, bw: *std.io.BufferedWriter, comptime _: []const u8) std.io.Writer.Error!void {
switch (self) {
default => {},
.fastcc,
@ -2119,7 +2119,7 @@ pub const StrtabString = enum(u32) {
string: StrtabString,
builder: *const Builder,
};
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) anyerror!void {
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) std.io.Writer.Error!void {
if (comptime std.mem.indexOfNone(u8, fmt_str, "\"r")) |_|
@compileError("invalid format string: '" ++ fmt_str ++ "'");
assert(data.string != .none);
@ -2306,7 +2306,7 @@ pub const Global = struct {
global: Index,
builder: *const Builder,
};
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime _: []const u8) std.io.Writer.Error!void {
try bw.print("@{f}", .{
data.global.unwrap(data.builder).name(data.builder).fmt(data.builder),
});
@ -4752,7 +4752,7 @@ pub const Function = struct {
function: Function.Index,
builder: *Builder,
};
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) anyerror!void {
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) std.io.Writer.Error!void {
if (comptime std.mem.indexOfNone(u8, fmt_str, ", %")) |_|
@compileError("invalid format string: '" ++ fmt_str ++ "'");
if (comptime std.mem.indexOfScalar(u8, fmt_str, ',') != null) {
@ -6944,7 +6944,7 @@ pub const MemoryAccessKind = enum(u1) {
normal,
@"volatile",
pub fn format(self: MemoryAccessKind, bw: *std.io.BufferedWriter, comptime prefix: []const u8) anyerror!void {
pub fn format(self: MemoryAccessKind, bw: *std.io.BufferedWriter, comptime prefix: []const u8) std.io.Writer.Error!void {
if (self != .normal) try bw.print("{s}{s}", .{ prefix, @tagName(self) });
}
};
@ -6953,7 +6953,7 @@ pub const SyncScope = enum(u1) {
singlethread,
system,
pub fn format(self: SyncScope, bw: *std.io.BufferedWriter, comptime prefix: []const u8) anyerror!void {
pub fn format(self: SyncScope, bw: *std.io.BufferedWriter, comptime prefix: []const u8) std.io.Writer.Error!void {
if (self != .system) try bw.print(
\\{s}syncscope("{s}")
, .{ prefix, @tagName(self) });
@ -6969,7 +6969,7 @@ pub const AtomicOrdering = enum(u3) {
acq_rel = 5,
seq_cst = 6,
pub fn format(self: AtomicOrdering, bw: *std.io.BufferedWriter, comptime prefix: []const u8) anyerror!void {
pub fn format(self: AtomicOrdering, bw: *std.io.BufferedWriter, comptime prefix: []const u8) std.io.Writer.Error!void {
if (self != .none) try bw.print("{s}{s}", .{ prefix, @tagName(self) });
}
};
@ -7385,7 +7385,7 @@ pub const Constant = enum(u32) {
constant: Constant,
builder: *Builder,
};
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) anyerror!void {
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) std.io.Writer.Error!void {
if (comptime std.mem.indexOfNone(u8, fmt_str, ", %")) |_|
@compileError("invalid format string: '" ++ fmt_str ++ "'");
if (comptime std.mem.indexOfScalar(u8, fmt_str, ',') != null) {
@ -7712,7 +7712,7 @@ pub const Value = enum(u32) {
function: Function.Index,
builder: *Builder,
};
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) anyerror!void {
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) std.io.Writer.Error!void {
switch (data.value.unwrap()) {
.instruction => |instruction| try Function.Instruction.Index.format(.{
.instruction = instruction,
@ -7757,7 +7757,7 @@ pub const MetadataString = enum(u32) {
metadata_string: MetadataString,
builder: *const Builder,
};
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime _: []const u8) std.io.Writer.Error!void {
try printEscapedString(data.metadata_string.slice(data.builder), .always_quote, bw);
}
fn fmt(self: MetadataString, builder: *const Builder) std.fmt.Formatter(format) {
@ -7922,7 +7922,7 @@ pub const Metadata = enum(u32) {
AllCallsDescribed: bool = false,
Unused: u2 = 0,
pub fn format(self: DIFlags, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
pub fn format(self: DIFlags, bw: *std.io.BufferedWriter, comptime _: []const u8) std.io.Writer.Error!void {
var need_pipe = false;
inline for (@typeInfo(DIFlags).@"struct".fields) |field| {
switch (@typeInfo(field.type)) {
@ -7979,7 +7979,7 @@ pub const Metadata = enum(u32) {
ObjCDirect: bool = false,
Unused: u20 = 0,
pub fn format(self: DISPFlags, bw: *std.io.BufferedWriter, comptime _: []const u8) anyerror!void {
pub fn format(self: DISPFlags, bw: *std.io.BufferedWriter, comptime _: []const u8) std.io.Writer.Error!void {
var need_pipe = false;
inline for (@typeInfo(DISPFlags).@"struct".fields) |field| {
switch (@typeInfo(field.type)) {
@ -8196,7 +8196,7 @@ pub const Metadata = enum(u32) {
};
};
};
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) anyerror!void {
fn format(data: FormatData, bw: *std.io.BufferedWriter, comptime fmt_str: []const u8) std.io.Writer.Error!void {
if (data.node == .none) return;
const is_specialized = fmt_str.len > 0 and fmt_str[0] == 'S';
@ -8371,7 +8371,7 @@ pub const Metadata = enum(u32) {
},
nodes: anytype,
bw: *std.io.BufferedWriter,
) anyerror!void {
) !void {
comptime var fmt_str: []const u8 = "";
const names = comptime std.meta.fieldNames(@TypeOf(nodes));
comptime var fields: [2 + names.len]std.builtin.Type.StructField = undefined;
@ -9379,14 +9379,14 @@ pub fn printToFile(self: *Builder, path: []const u8) bool {
return true;
}
pub fn printBuffered(self: *Builder, writer: std.io.Writer) anyerror!void {
pub fn printBuffered(self: *Builder, writer: std.io.Writer) std.io.Writer.Error!void {
var buffer: [4096]u8 = undefined;
var bw = writer.buffered(&buffer);
try self.print(&bw);
try bw.flush();
}
pub fn print(self: *Builder, bw: *std.io.BufferedWriter) anyerror!void {
pub fn print(self: *Builder, bw: *std.io.BufferedWriter) std.io.Writer.Error!void {
var need_newline = false;
var metadata_formatter: Metadata.Formatter = .{ .builder = self, .need_comma = undefined };
defer metadata_formatter.map.deinit(self.gpa);
@ -10458,7 +10458,7 @@ fn isValidIdentifier(id: []const u8) bool {
}
const QuoteBehavior = enum { always_quote, quote_unless_valid_identifier };
fn printEscapedString(slice: []const u8, quotes: QuoteBehavior, bw: *std.io.BufferedWriter) anyerror!void {
fn printEscapedString(slice: []const u8, quotes: QuoteBehavior, bw: *std.io.BufferedWriter) std.io.Writer.Error!void {
const need_quotes = switch (quotes) {
.always_quote => true,
.quote_unless_valid_identifier => !isValidIdentifier(slice),

View File

@ -10,6 +10,8 @@ const primitives = std.zig.primitives;
const indent_delta = 4;
const asm_indent_delta = 2;
pub const Error = Ast.RenderError;
pub const Fixups = struct {
/// The key is the mut token (`var`/`const`) of the variable declaration
/// that should have a `_ = foo;` inserted afterwards.
@ -75,7 +77,7 @@ const Render = struct {
fixups: Fixups,
};
pub fn renderTree(gpa: Allocator, bw: *std.io.BufferedWriter, tree: Ast, fixups: Fixups) anyerror!void {
pub fn renderTree(gpa: Allocator, bw: *std.io.BufferedWriter, tree: Ast, fixups: Fixups) Error!void {
assert(tree.errors.len == 0); // Cannot render an invalid tree.
var auto_indenting_stream: AutoIndentingStream = .init(gpa, bw, indent_delta);
defer auto_indenting_stream.deinit();
@ -111,7 +113,7 @@ pub fn renderTree(gpa: Allocator, bw: *std.io.BufferedWriter, tree: Ast, fixups:
}
/// Render all members in the given slice, keeping empty lines where appropriate
fn renderMembers(r: *Render, members: []const Ast.Node.Index) anyerror!void {
fn renderMembers(r: *Render, members: []const Ast.Node.Index) Error!void {
const tree = r.tree;
if (members.len == 0) return;
const container: Container = for (members) |member| {
@ -135,7 +137,7 @@ fn renderMember(
container: Container,
decl: Ast.Node.Index,
space: Space,
) anyerror!void {
) Error!void {
const tree = r.tree;
const ais = r.ais;
if (r.fixups.omit_nodes.contains(decl)) return;
@ -305,7 +307,7 @@ fn renderMember(
}
/// Render all expressions in the slice, keeping empty lines where appropriate
fn renderExpressions(r: *Render, expressions: []const Ast.Node.Index, space: Space) anyerror!void {
fn renderExpressions(r: *Render, expressions: []const Ast.Node.Index, space: Space) Error!void {
if (expressions.len == 0) return;
try renderExpression(r, expressions[0], space);
for (expressions[1..]) |expression| {
@ -314,7 +316,7 @@ fn renderExpressions(r: *Render, expressions: []const Ast.Node.Index, space: Spa
}
}
fn renderExpression(r: *Render, node: Ast.Node.Index, space: Space) anyerror!void {
fn renderExpression(r: *Render, node: Ast.Node.Index, space: Space) Error!void {
const tree = r.tree;
const ais = r.ais;
if (r.fixups.replace_nodes_with_string.get(node)) |replacement| {
@ -886,7 +888,7 @@ fn renderExpression(r: *Render, node: Ast.Node.Index, space: Space) anyerror!voi
/// Same as `renderExpression`, but afterwards looks for any
/// append_string_after_node fixups to apply
fn renderExpressionFixup(r: *Render, node: Ast.Node.Index, space: Space) anyerror!void {
fn renderExpressionFixup(r: *Render, node: Ast.Node.Index, space: Space) Error!void {
const ais = r.ais;
try renderExpression(r, node, space);
if (r.fixups.append_string_after_node.get(node)) |bytes| {
@ -898,7 +900,7 @@ fn renderArrayType(
r: *Render,
array_type: Ast.full.ArrayType,
space: Space,
) anyerror!void {
) Error!void {
const tree = r.tree;
const ais = r.ais;
const rbracket = tree.firstToken(array_type.ast.elem_type) - 1;
@ -916,7 +918,7 @@ fn renderArrayType(
return renderExpression(r, array_type.ast.elem_type, space);
}
fn renderPtrType(r: *Render, ptr_type: Ast.full.PtrType, space: Space) anyerror!void {
fn renderPtrType(r: *Render, ptr_type: Ast.full.PtrType, space: Space) Error!void {
const tree = r.tree;
const main_token = ptr_type.ast.main_token;
switch (ptr_type.size) {
@ -1010,7 +1012,7 @@ fn renderSlice(
slice_node: Ast.Node.Index,
slice: Ast.full.Slice,
space: Space,
) anyerror!void {
) Error!void {
const tree = r.tree;
const after_start_space_bool = nodeCausesSliceOpSpace(tree.nodeTag(slice.ast.start)) or
if (slice.ast.end.unwrap()) |end| nodeCausesSliceOpSpace(tree.nodeTag(end)) else false;
@ -1043,7 +1045,7 @@ fn renderAsmOutput(
r: *Render,
asm_output: Ast.Node.Index,
space: Space,
) anyerror!void {
) Error!void {
const tree = r.tree;
assert(tree.nodeTag(asm_output) == .asm_output);
const symbolic_name = tree.nodeMainToken(asm_output);
@ -1069,7 +1071,7 @@ fn renderAsmInput(
r: *Render,
asm_input: Ast.Node.Index,
space: Space,
) anyerror!void {
) Error!void {
const tree = r.tree;
assert(tree.nodeTag(asm_input) == .asm_input);
const symbolic_name = tree.nodeMainToken(asm_input);
@ -1091,7 +1093,7 @@ fn renderVarDecl(
ignore_comptime_token: bool,
/// `comma_space` and `space` are used for destructure LHS decls.
space: Space,
) anyerror!void {
) Error!void {
try renderVarDeclWithoutFixups(r, var_decl, ignore_comptime_token, space);
if (r.fixups.unused_var_decls.contains(var_decl.ast.mut_token + 1)) {
// Discard the variable like this: `_ = foo;`
@ -1109,7 +1111,7 @@ fn renderVarDeclWithoutFixups(
ignore_comptime_token: bool,
/// `comma_space` and `space` are used for destructure LHS decls.
space: Space,
) anyerror!void {
) Error!void {
const tree = r.tree;
const ais = r.ais;
@ -1221,7 +1223,7 @@ fn renderVarDeclWithoutFixups(
ais.popIndent();
}
fn renderIf(r: *Render, if_node: Ast.full.If, space: Space) anyerror!void {
fn renderIf(r: *Render, if_node: Ast.full.If, space: Space) Error!void {
return renderWhile(r, .{
.ast = .{
.while_token = if_node.ast.if_token,
@ -1240,7 +1242,7 @@ fn renderIf(r: *Render, if_node: Ast.full.If, space: Space) anyerror!void {
/// Note that this function is additionally used to render if expressions, with
/// respective values set to null.
fn renderWhile(r: *Render, while_node: Ast.full.While, space: Space) anyerror!void {
fn renderWhile(r: *Render, while_node: Ast.full.While, space: Space) Error!void {
const tree = r.tree;
if (while_node.label_token) |label| {
@ -1310,7 +1312,7 @@ fn renderThenElse(
maybe_error_token: ?Ast.TokenIndex,
opt_else_expr: Ast.Node.OptionalIndex,
space: Space,
) anyerror!void {
) Error!void {
const tree = r.tree;
const ais = r.ais;
const then_expr_is_block = nodeIsBlock(tree.nodeTag(then_expr));
@ -1365,7 +1367,7 @@ fn renderThenElse(
}
}
fn renderFor(r: *Render, for_node: Ast.full.For, space: Space) anyerror!void {
fn renderFor(r: *Render, for_node: Ast.full.For, space: Space) Error!void {
const tree = r.tree;
const ais = r.ais;
const token_tags = tree.tokens.items(.tag);
@ -1440,7 +1442,7 @@ fn renderContainerField(
container: Container,
field_param: Ast.full.ContainerField,
space: Space,
) anyerror!void {
) Error!void {
const tree = r.tree;
const ais = r.ais;
var field = field_param;
@ -1549,7 +1551,7 @@ fn renderBuiltinCall(
builtin_token: Ast.TokenIndex,
params: []const Ast.Node.Index,
space: Space,
) anyerror!void {
) Error!void {
const tree = r.tree;
const ais = r.ais;
@ -1622,7 +1624,7 @@ fn renderBuiltinCall(
}
}
fn renderFnProto(r: *Render, fn_proto: Ast.full.FnProto, space: Space) anyerror!void {
fn renderFnProto(r: *Render, fn_proto: Ast.full.FnProto, space: Space) Error!void {
const tree = r.tree;
const ais = r.ais;
@ -1847,7 +1849,7 @@ fn renderSwitchCase(
r: *Render,
switch_case: Ast.full.SwitchCase,
space: Space,
) anyerror!void {
) Error!void {
const ais = r.ais;
const tree = r.tree;
const trailing_comma = tree.tokenTag(switch_case.ast.arrow_token - 1) == .comma;
@ -1909,7 +1911,7 @@ fn renderBlock(
block_node: Ast.Node.Index,
statements: []const Ast.Node.Index,
space: Space,
) anyerror!void {
) Error!void {
const tree = r.tree;
const ais = r.ais;
const lbrace = tree.nodeMainToken(block_node);
@ -1934,7 +1936,7 @@ fn finishRenderBlock(
block_node: Ast.Node.Index,
statements: []const Ast.Node.Index,
space: Space,
) anyerror!void {
) Error!void {
const tree = r.tree;
const ais = r.ais;
for (statements, 0..) |stmt, i| {
@ -1962,7 +1964,7 @@ fn renderStructInit(
struct_node: Ast.Node.Index,
struct_init: Ast.full.StructInit,
space: Space,
) anyerror!void {
) Error!void {
const tree = r.tree;
const ais = r.ais;
@ -2033,7 +2035,7 @@ fn renderArrayInit(
r: *Render,
array_init: Ast.full.ArrayInit,
space: Space,
) anyerror!void {
) Error!void {
const tree = r.tree;
const ais = r.ais;
const gpa = r.gpa;
@ -2263,7 +2265,7 @@ fn renderContainerDecl(
container_decl_node: Ast.Node.Index,
container_decl: Ast.full.ContainerDecl,
space: Space,
) anyerror!void {
) Error!void {
const tree = r.tree;
const ais = r.ais;
@ -2382,7 +2384,7 @@ fn renderAsm(
r: *Render,
asm_node: Ast.full.Asm,
space: Space,
) anyerror!void {
) Error!void {
const tree = r.tree;
const ais = r.ais;
@ -2548,7 +2550,7 @@ fn renderCall(
r: *Render,
call: Ast.full.Call,
space: Space,
) anyerror!void {
) Error!void {
if (call.async_token) |async_token| {
try renderToken(r, async_token, .space);
}
@ -2561,7 +2563,7 @@ fn renderParamList(
lparen: Ast.TokenIndex,
params: []const Ast.Node.Index,
space: Space,
) anyerror!void {
) Error!void {
const tree = r.tree;
const ais = r.ais;
@ -2614,7 +2616,7 @@ fn renderParamList(
/// Render an expression, and the comma that follows it, if it is present in the source.
/// If a comma is present, and `space` is `Space.comma`, render only a single comma.
fn renderExpressionComma(r: *Render, node: Ast.Node.Index, space: Space) anyerror!void {
fn renderExpressionComma(r: *Render, node: Ast.Node.Index, space: Space) Error!void {
const tree = r.tree;
const maybe_comma = tree.lastToken(node) + 1;
if (tree.tokenTag(maybe_comma) == .comma and space != .comma) {
@ -2627,7 +2629,7 @@ fn renderExpressionComma(r: *Render, node: Ast.Node.Index, space: Space) anyerro
/// Render a token, and the comma that follows it, if it is present in the source.
/// If a comma is present, and `space` is `Space.comma`, render only a single comma.
fn renderTokenComma(r: *Render, token: Ast.TokenIndex, space: Space) anyerror!void {
fn renderTokenComma(r: *Render, token: Ast.TokenIndex, space: Space) Error!void {
const tree = r.tree;
const maybe_comma = token + 1;
if (tree.tokenTag(maybe_comma) == .comma and space != .comma) {
@ -2640,7 +2642,7 @@ fn renderTokenComma(r: *Render, token: Ast.TokenIndex, space: Space) anyerror!vo
/// Render an identifier, and the comma that follows it, if it is present in the source.
/// If a comma is present, and `space` is `Space.comma`, render only a single comma.
fn renderIdentifierComma(r: *Render, token: Ast.TokenIndex, space: Space, quote: QuoteBehavior) anyerror!void {
fn renderIdentifierComma(r: *Render, token: Ast.TokenIndex, space: Space, quote: QuoteBehavior) Error!void {
const tree = r.tree;
const maybe_comma = token + 1;
if (tree.tokenTag(maybe_comma) == .comma and space != .comma) {
@ -2672,7 +2674,7 @@ const Space = enum {
skip,
};
fn renderToken(r: *Render, token_index: Ast.TokenIndex, space: Space) anyerror!void {
fn renderToken(r: *Render, token_index: Ast.TokenIndex, space: Space) Error!void {
const tree = r.tree;
const ais = r.ais;
const lexeme = tokenSliceForRender(tree, token_index);
@ -2680,7 +2682,7 @@ fn renderToken(r: *Render, token_index: Ast.TokenIndex, space: Space) anyerror!v
try renderSpace(r, token_index, lexeme.len, space);
}
fn renderTokenOverrideSpaceMode(r: *Render, token_index: Ast.TokenIndex, space: Space, override_space: Space) anyerror!void {
fn renderTokenOverrideSpaceMode(r: *Render, token_index: Ast.TokenIndex, space: Space, override_space: Space) Error!void {
const tree = r.tree;
const ais = r.ais;
const lexeme = tokenSliceForRender(tree, token_index);
@ -2690,7 +2692,7 @@ fn renderTokenOverrideSpaceMode(r: *Render, token_index: Ast.TokenIndex, space:
try renderSpace(r, token_index, lexeme.len, space);
}
fn renderSpace(r: *Render, token_index: Ast.TokenIndex, lexeme_len: usize, space: Space) anyerror!void {
fn renderSpace(r: *Render, token_index: Ast.TokenIndex, lexeme_len: usize, space: Space) Error!void {
const tree = r.tree;
const ais = r.ais;
@ -2735,7 +2737,7 @@ fn renderSpace(r: *Render, token_index: Ast.TokenIndex, lexeme_len: usize, space
}
}
fn renderOnlySpace(r: *Render, space: Space) anyerror!void {
fn renderOnlySpace(r: *Render, space: Space) Error!void {
const ais = r.ais;
switch (space) {
.none => {},
@ -2754,7 +2756,7 @@ const QuoteBehavior = enum {
eagerly_unquote_except_underscore,
};
fn renderIdentifier(r: *Render, token_index: Ast.TokenIndex, space: Space, quote: QuoteBehavior) anyerror!void {
fn renderIdentifier(r: *Render, token_index: Ast.TokenIndex, space: Space, quote: QuoteBehavior) Error!void {
const tree = r.tree;
assert(tree.tokenTag(token_index) == .identifier);
const lexeme = tokenSliceForRender(tree, token_index);
@ -2940,7 +2942,7 @@ fn hasMultilineString(tree: Ast, start_token: Ast.TokenIndex, end_token: Ast.Tok
/// Assumes that start is the first byte past the previous token and
/// that end is the last byte before the next token.
fn renderComments(r: *Render, start: usize, end: usize) anyerror!bool {
fn renderComments(r: *Render, start: usize, end: usize) Error!bool {
const tree = r.tree;
const ais = r.ais;
@ -3003,12 +3005,12 @@ fn renderComments(r: *Render, start: usize, end: usize) anyerror!bool {
return index != start;
}
fn renderExtraNewline(r: *Render, node: Ast.Node.Index) anyerror!void {
fn renderExtraNewline(r: *Render, node: Ast.Node.Index) Error!void {
return renderExtraNewlineToken(r, r.tree.firstToken(node));
}
/// Check if there is an empty line immediately before the given token. If so, render it.
fn renderExtraNewlineToken(r: *Render, token_index: Ast.TokenIndex) anyerror!void {
fn renderExtraNewlineToken(r: *Render, token_index: Ast.TokenIndex) Error!void {
const tree = r.tree;
const ais = r.ais;
const token_start = tree.tokenStart(token_index);
@ -3036,7 +3038,7 @@ fn renderExtraNewlineToken(r: *Render, token_index: Ast.TokenIndex) anyerror!voi
/// end_token is the token one past the last doc comment token. This function
/// searches backwards from there.
fn renderDocComments(r: *Render, end_token: Ast.TokenIndex) anyerror!void {
fn renderDocComments(r: *Render, end_token: Ast.TokenIndex) Error!void {
const tree = r.tree;
// Search backwards for the first doc comment.
if (end_token == 0) return;
@ -3067,7 +3069,7 @@ fn renderDocComments(r: *Render, end_token: Ast.TokenIndex) anyerror!void {
}
/// start_token is first container doc comment token.
fn renderContainerDocComments(r: *Render, start_token: Ast.TokenIndex) anyerror!void {
fn renderContainerDocComments(r: *Render, start_token: Ast.TokenIndex) Error!void {
const tree = r.tree;
var tok = start_token;
while (tree.tokenTag(tok) == .container_doc_comment) : (tok += 1) {
@ -3081,7 +3083,7 @@ fn renderContainerDocComments(r: *Render, start_token: Ast.TokenIndex) anyerror!
}
}
fn discardAllParams(r: *Render, fn_proto_node: Ast.Node.Index) anyerror!void {
fn discardAllParams(r: *Render, fn_proto_node: Ast.Node.Index) Error!void {
const tree = &r.tree;
const ais = r.ais;
var buf: [1]Ast.Node.Index = undefined;
@ -3129,7 +3131,7 @@ fn anythingBetween(tree: Ast, start_token: Ast.TokenIndex, end_token: Ast.TokenI
return false;
}
fn writeFixingWhitespace(bw: *std.io.BufferedWriter, slice: []const u8) anyerror!void {
fn writeFixingWhitespace(bw: *std.io.BufferedWriter, slice: []const u8) Error!void {
for (slice) |byte| switch (byte) {
'\t' => try bw.splatByteAll(' ', indent_delta),
'\r' => {},
@ -3308,7 +3310,7 @@ const AutoIndentingStream = struct {
self.space_stack.deinit();
}
pub fn writeAll(ais: *AutoIndentingStream, bytes: []const u8) anyerror!void {
pub fn writeAll(ais: *AutoIndentingStream, bytes: []const u8) Error!void {
if (bytes.len == 0) return;
try ais.applyIndent();
if (ais.disabled_offset == null) try ais.underlying_writer.writeAll(bytes);
@ -3317,19 +3319,19 @@ const AutoIndentingStream = struct {
/// Assumes that if the printed data ends with a newline, it is directly
/// contained in the format string.
pub fn print(ais: *AutoIndentingStream, comptime format: []const u8, args: anytype) anyerror!void {
pub fn print(ais: *AutoIndentingStream, comptime format: []const u8, args: anytype) Error!void {
try ais.applyIndent();
if (ais.disabled_offset == null) try ais.underlying_writer.print(format, args);
if (format[format.len - 1] == '\n') ais.resetLine();
}
pub fn writeByte(ais: *AutoIndentingStream, byte: u8) anyerror!void {
pub fn writeByte(ais: *AutoIndentingStream, byte: u8) Error!void {
try ais.applyIndent();
if (ais.disabled_offset == null) try ais.underlying_writer.writeByte(byte);
assert(byte != '\n');
}
pub fn splatByteAll(ais: *AutoIndentingStream, byte: u8, n: usize) anyerror!void {
pub fn splatByteAll(ais: *AutoIndentingStream, byte: u8, n: usize) Error!void {
assert(byte != '\n');
try ais.applyIndent();
if (ais.disabled_offset == null) try ais.underlying_writer.splatByteAll(byte, n);
@ -3350,13 +3352,13 @@ const AutoIndentingStream = struct {
ais.indent_delta = new_indent_delta;
}
pub fn insertNewline(ais: *AutoIndentingStream) anyerror!void {
pub fn insertNewline(ais: *AutoIndentingStream) Error!void {
if (ais.disabled_offset == null) try ais.underlying_writer.writeByte('\n');
ais.resetLine();
}
/// Insert a newline unless the current line is blank
pub fn maybeInsertNewline(ais: *AutoIndentingStream) anyerror!void {
pub fn maybeInsertNewline(ais: *AutoIndentingStream) Error!void {
if (!ais.current_line_empty)
try ais.insertNewline();
}
@ -3483,7 +3485,7 @@ const AutoIndentingStream = struct {
}
/// Writes ' ' bytes if the current line is empty
fn applyIndent(ais: *AutoIndentingStream) anyerror!void {
fn applyIndent(ais: *AutoIndentingStream) Error!void {
const current_indent = ais.currentIndent();
if (ais.current_line_empty and current_indent > 0) {
if (ais.disabled_offset == null) {

View File

@ -119,7 +119,7 @@ const Value = extern struct {
}
}
pub fn format(value: Value, bw: *std.io.BufferedWriter, comptime fmt: []const u8) anyerror!void {
pub fn format(value: Value, bw: *std.io.BufferedWriter, comptime fmt: []const u8) !void {
comptime assert(fmt.len == 0);
// Work around x86_64 backend limitation.