std: start reworking std.io

hello world is compiling
This commit is contained in:
Andrew Kelley 2025-02-12 19:55:09 -08:00
parent ee6d19480d
commit 00c6c836a6
31 changed files with 2218 additions and 1737 deletions

View File

@ -436,7 +436,6 @@ set(ZIG_STAGE2_SOURCES
lib/std/elf.zig
lib/std/fifo.zig
lib/std/fmt.zig
lib/std/fmt/format_float.zig
lib/std/fmt/parse_float.zig
lib/std/fs.zig
lib/std/fs/AtomicFile.zig
@ -454,12 +453,9 @@ set(ZIG_STAGE2_SOURCES
lib/std/io/Reader.zig
lib/std/io/Writer.zig
lib/std/io/buffered_atomic_file.zig
lib/std/io/buffered_writer.zig
lib/std/io/change_detection_stream.zig
lib/std/io/counting_reader.zig
lib/std/io/counting_writer.zig
lib/std/io/find_byte_writer.zig
lib/std/io/fixed_buffer_stream.zig
lib/std/io/limited_reader.zig
lib/std/io/seekable_stream.zig
lib/std/json.zig

View File

@ -286,11 +286,9 @@ pub const HashHelper = struct {
pub fn binToHex(bin_digest: BinDigest) HexDigest {
var out_digest: HexDigest = undefined;
_ = fmt.bufPrint(
&out_digest,
"{s}",
.{fmt.fmtSliceHexLower(&bin_digest)},
) catch unreachable;
var bw: std.io.BufferedWriter = undefined;
bw.initFixed(&out_digest);
bw.printHex(&bin_digest, .lower) catch unreachable;
return out_digest;
}
@ -1133,11 +1131,11 @@ pub const Manifest = struct {
const writer = contents.writer();
try writer.writeAll(manifest_header ++ "\n");
for (self.files.keys()) |file| {
try writer.print("{d} {d} {d} {} {d} {s}\n", .{
try writer.print("{d} {d} {d} {x} {d} {s}\n", .{
file.stat.size,
file.stat.inode,
file.stat.mtime,
fmt.fmtSliceHexLower(&file.bin_digest),
&file.bin_digest,
file.prefixed_path.prefix,
file.prefixed_path.sub_path,
});

View File

@ -963,7 +963,7 @@ const MachODumper = struct {
.UUID => {
const uuid = lc.cast(macho.uuid_command).?;
try writer.writeByte('\n');
try writer.print("uuid {x}", .{std.fmt.fmtSliceHexLower(&uuid.uuid)});
try writer.print("uuid {x}", .{&uuid.uuid});
},
.DATA_IN_CODE,

View File

@ -1696,9 +1696,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
if (compile.build_id orelse b.build_id) |build_id| {
try zig_args.append(switch (build_id) {
.hexstring => |hs| b.fmt("--build-id=0x{s}", .{
std.fmt.fmtSliceHexLower(hs.toSlice()),
}),
.hexstring => |hs| b.fmt("--build-id=0x{x}", .{hs.toSlice()}),
.none, .fast, .uuid, .sha1, .md5 => b.fmt("--build-id={s}", .{@tagName(build_id)}),
});
}
@ -1793,11 +1791,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
var args_hash: [Sha256.digest_length]u8 = undefined;
Sha256.hash(args, &args_hash, .{});
var args_hex_hash: [Sha256.digest_length * 2]u8 = undefined;
_ = try std.fmt.bufPrint(
&args_hex_hash,
"{s}",
.{std.fmt.fmtSliceHexLower(&args_hash)},
);
_ = try std.fmt.bufPrint(&args_hex_hash, "{x}", .{&args_hash});
const args_file = "args" ++ fs.path.sep_str ++ args_hex_hash;
try b.cache_root.handle.writeFile(.{ .sub_path = args_file, .data = args });

View File

@ -338,23 +338,66 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) ty
@memcpy(self.items[old_len..][0..items.len], items);
}
pub const Writer = if (T != u8)
@compileError("The Writer interface is only defined for ArrayList(u8) " ++
"but the given type is ArrayList(" ++ @typeName(T) ++ ")")
else
std.io.Writer(*Self, Allocator.Error, appendWrite);
/// Initializes a Writer which will append to the list.
pub fn writer(self: *Self) Writer {
return .{ .context = self };
/// Initializes a `std.io.Writer` which will append to the list.
pub fn writer(self: *Self) std.io.Writer {
comptime assert(T == u8);
return .{
.context = self,
.vtable = &.{
.writev = expanding_writev,
.writeFile = expanding_writeFile,
},
};
}
/// Same as `append` except it returns the number of bytes written, which is always the same
/// as `m.len`. The purpose of this function existing is to match `std.io.Writer` API.
/// Invalidates element pointers if additional memory is needed.
fn appendWrite(self: *Self, m: []const u8) Allocator.Error!usize {
try self.appendSlice(m);
return m.len;
fn expanding_writev(context: *anyopaque, data: []const []const u8) anyerror!usize {
const self: *Self = @alignCast(@ptrCast(context));
const original_len = self.items.len;
var new_capacity: usize = self.capacity;
for (data) |bytes| new_capacity += bytes.len;
try self.ensureTotalCapacity(new_capacity);
for (data) |bytes| self.appendSliceAssumeCapacity(bytes);
return self.items.len - original_len;
}
fn expanding_writeFile(
context: *anyopaque,
file: std.fs.File,
offset: u64,
len: std.io.Writer.VTable.FileLen,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
const self: *Self = @alignCast(@ptrCast(context));
const trailers = headers_and_trailers[headers_len..];
const original_len = self.items.len;
if (len == .entire_file) {
var new_capacity: usize = self.capacity + std.atomic.cache_line;
for (headers_and_trailers) |bytes| new_capacity += bytes.len;
try self.ensureTotalCapacity(new_capacity);
for (headers_and_trailers[0..headers_len]) |bytes| self.appendSliceAssumeCapacity(bytes);
const dest = self.items.ptr[self.items.len..self.capacity];
const n = try file.pread(dest, offset);
if (n == 0) {
new_capacity = self.capacity;
for (trailers) |bytes| new_capacity += bytes.len;
try self.ensureTotalCapacity(new_capacity);
for (trailers) |bytes| self.appendSliceAssumeCapacity(bytes);
return self.items.len - original_len;
}
self.items.len += n;
return self.items.len - original_len;
}
var new_capacity: usize = self.capacity + len.int();
for (headers_and_trailers) |bytes| new_capacity += bytes.len;
try self.ensureTotalCapacity(new_capacity);
for (headers_and_trailers[0..headers_len]) |bytes| self.appendSliceAssumeCapacity(bytes);
const dest = self.items.ptr[self.items.len..][0..len.int()];
const n = try file.pread(dest, offset);
self.items.len += n;
if (n < dest.len) return self.items.len - original_len;
for (trailers) |bytes| self.appendSliceAssumeCapacity(bytes);
return self.items.len - original_len;
}
pub const FixedWriter = std.io.Writer(*Self, Allocator.Error, appendWriteFixed);

View File

@ -124,9 +124,9 @@ test "curve25519" {
const p = try Curve25519.basePoint.clampedMul(s);
try p.rejectIdentity();
var buf: [128]u8 = undefined;
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&p.toBytes())}), "E6F2A4D1C28EE5C7AD0329268255A468AD407D2672824C0C0EB30EA6EF450145");
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{&p.toBytes()}), "E6F2A4D1C28EE5C7AD0329268255A468AD407D2672824C0C0EB30EA6EF450145");
const q = try p.clampedMul(s);
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&q.toBytes())}), "3614E119FFE55EC55B87D6B19971A9F4CBC78EFE80BEC55B96392BABCC712537");
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{&q.toBytes()}), "3614E119FFE55EC55B87D6B19971A9F4CBC78EFE80BEC55B96392BABCC712537");
try Curve25519.rejectNonCanonical(s);
s[31] |= 0x80;

View File

@ -509,8 +509,8 @@ test "key pair creation" {
_ = try fmt.hexToBytes(seed[0..], "8052030376d47112be7f73ed7a019293dd12ad910b654455798b4667d73de166");
const key_pair = try Ed25519.KeyPair.generateDeterministic(seed);
var buf: [256]u8 = undefined;
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&key_pair.secret_key.toBytes())}), "8052030376D47112BE7F73ED7A019293DD12AD910B654455798B4667D73DE1662D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&key_pair.public_key.toBytes())}), "2D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{&key_pair.secret_key.toBytes()}), "8052030376D47112BE7F73ED7A019293DD12AD910B654455798B4667D73DE1662D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{&key_pair.public_key.toBytes()}), "2D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
}
test "signature" {
@ -520,7 +520,7 @@ test "signature" {
const sig = try key_pair.sign("test", null);
var buf: [128]u8 = undefined;
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&sig.toBytes())}), "10A442B4A80CC4225B154F43BEF28D2472CA80221951262EB8E0DF9091575E2687CC486E77263C3418C757522D54F84B0359236ABBBD4ACD20DC297FDCA66808");
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{&sig.toBytes()}), "10A442B4A80CC4225B154F43BEF28D2472CA80221951262EB8E0DF9091575E2687CC486E77263C3418C757522D54F84B0359236ABBBD4ACD20DC297FDCA66808");
try sig.verify("test", key_pair.public_key);
try std.testing.expectError(error.SignatureVerificationFailed, sig.verify("TEST", key_pair.public_key));
}

View File

@ -546,7 +546,7 @@ test "packing/unpacking" {
var b = Edwards25519.basePoint;
const pk = try b.mul(s);
var buf: [128]u8 = undefined;
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&pk.toBytes())}), "074BC7E0FCBD587FDBC0969444245FADC562809C8F6E97E949AF62484B5B81A6");
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{&pk.toBytes()}), "074BC7E0FCBD587FDBC0969444245FADC562809C8F6E97E949AF62484B5B81A6");
const small_order_ss: [7][32]u8 = .{
.{

View File

@ -175,21 +175,21 @@ pub const Ristretto255 = struct {
test "ristretto255" {
const p = Ristretto255.basePoint;
var buf: [256]u8 = undefined;
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&p.toBytes())}), "E2F2AE0A6ABC4E71A884A961C500515F58E30B6AA582DD8DB6A65945E08D2D76");
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{&p.toBytes()}), "E2F2AE0A6ABC4E71A884A961C500515F58E30B6AA582DD8DB6A65945E08D2D76");
var r: [Ristretto255.encoded_length]u8 = undefined;
_ = try fmt.hexToBytes(r[0..], "6a493210f7499cd17fecb510ae0cea23a110e8d5b901f8acadd3095c73a3b919");
var q = try Ristretto255.fromBytes(r);
q = q.dbl().add(p);
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&q.toBytes())}), "E882B131016B52C1D3337080187CF768423EFCCBB517BB495AB812C4160FF44E");
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{&q.toBytes()}), "E882B131016B52C1D3337080187CF768423EFCCBB517BB495AB812C4160FF44E");
const s = [_]u8{15} ++ [_]u8{0} ** 31;
const w = try p.mul(s);
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&w.toBytes())}), "E0C418F7C8D9C4CDD7395B93EA124F3AD99021BB681DFC3302A9D99A2E53E64E");
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{&w.toBytes()}), "E0C418F7C8D9C4CDD7395B93EA124F3AD99021BB681DFC3302A9D99A2E53E64E");
try std.testing.expect(p.dbl().dbl().dbl().dbl().equivalent(w.add(p)));
const h = [_]u8{69} ** 32 ++ [_]u8{42} ** 32;
const ph = Ristretto255.fromUniform(h);
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&ph.toBytes())}), "DCCA54E037A4311EFBEEF413ACD21D35276518970B7A61DC88F8587B493D5E19");
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{&ph.toBytes()}), "DCCA54E037A4311EFBEEF413ACD21D35276518970B7A61DC88F8587B493D5E19");
}

View File

@ -850,10 +850,10 @@ test "scalar25519" {
var y = x.toBytes();
try rejectNonCanonical(y);
var buf: [128]u8 = undefined;
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&y)}), "1E979B917937F3DE71D18077F961F6CEFF01030405060708010203040506070F");
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{&y}), "1E979B917937F3DE71D18077F961F6CEFF01030405060708010203040506070F");
const reduced = reduce(field_order_s);
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&reduced)}), "0000000000000000000000000000000000000000000000000000000000000000");
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{&reduced}), "0000000000000000000000000000000000000000000000000000000000000000");
}
test "non-canonical scalar25519" {
@ -867,7 +867,7 @@ test "mulAdd overflow check" {
const c: [32]u8 = [_]u8{0xff} ** 32;
const x = mulAdd(a, b, c);
var buf: [128]u8 = undefined;
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&x)}), "D14DF91389432C25AD60FF9791B9FD1D67BEF517D273ECCE3D9A307C1B419903");
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{&x}), "D14DF91389432C25AD60FF9791B9FD1D67BEF517D273ECCE3D9A307C1B419903");
}
test "scalar field inversion" {

View File

@ -1145,7 +1145,7 @@ test "xchacha20" {
var c: [m.len]u8 = undefined;
XChaCha20IETF.xor(c[0..], m[0..], 0, key, nonce);
var buf: [2 * c.len]u8 = undefined;
try testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&c)}), "E0A1BCF939654AFDBDC1746EC49832647C19D891F0D1A81FC0C1703B4514BDEA584B512F6908C2C5E9DD18D5CBC1805DE5803FE3B9CA5F193FB8359E91FAB0C3BB40309A292EB1CF49685C65C4A3ADF4F11DB0CD2B6B67FBC174BC2E860E8F769FD3565BBFAD1C845E05A0FED9BE167C240D");
try testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{&c}), "E0A1BCF939654AFDBDC1746EC49832647C19D891F0D1A81FC0C1703B4514BDEA584B512F6908C2C5E9DD18D5CBC1805DE5803FE3B9CA5F193FB8359E91FAB0C3BB40309A292EB1CF49685C65C4A3ADF4F11DB0CD2B6B67FBC174BC2E860E8F769FD3565BBFAD1C845E05A0FED9BE167C240D");
}
{
const ad = "Additional data";
@ -1154,7 +1154,7 @@ test "xchacha20" {
var out: [m.len]u8 = undefined;
try XChaCha20Poly1305.decrypt(out[0..], c[0..m.len], c[m.len..].*, ad, nonce, key);
var buf: [2 * c.len]u8 = undefined;
try testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&c)}), "994D2DD32333F48E53650C02C7A2ABB8E018B0836D7175AEC779F52E961780768F815C58F1AA52D211498DB89B9216763F569C9433A6BBFCEFB4D4A49387A4C5207FBB3B5A92B5941294DF30588C6740D39DC16FA1F0E634F7246CF7CDCB978E44347D89381B7A74EB7084F754B90BDE9AAF5A94B8F2A85EFD0B50692AE2D425E234");
try testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{&c}), "994D2DD32333F48E53650C02C7A2ABB8E018B0836D7175AEC779F52E961780768F815C58F1AA52D211498DB89B9216763F569C9433A6BBFCEFB4D4A49387A4C5207FBB3B5A92B5941294DF30588C6740D39DC16FA1F0E634F7246CF7CDCB978E44347D89381B7A74EB7084F754B90BDE9AAF5A94B8F2A85EFD0B50692AE2D425E234");
try testing.expectEqualSlices(u8, out[0..], m);
c[0] +%= 1;
try testing.expectError(error.AuthenticationFailed, XChaCha20Poly1305.decrypt(out[0..], c[0..m.len], c[m.len..].*, ad, nonce, key));

View File

@ -1741,7 +1741,7 @@ test "NIST KAT test" {
for (0..100) |i| {
g.fill(&seed);
try std.fmt.format(fw, "count = {}\n", .{i});
try std.fmt.format(fw, "seed = {s}\n", .{std.fmt.fmtSliceHexUpper(&seed)});
try std.fmt.format(fw, "seed = {X}\n", .{&seed});
var g2 = NistDRBG.init(seed);
// This is not equivalent to g2.fill(kseed[:]). As the reference
@ -1756,16 +1756,16 @@ test "NIST KAT test" {
const e = kp.public_key.encaps(eseed);
const ss2 = try kp.secret_key.decaps(&e.ciphertext);
try testing.expectEqual(ss2, e.shared_secret);
try std.fmt.format(fw, "pk = {s}\n", .{std.fmt.fmtSliceHexUpper(&kp.public_key.toBytes())});
try std.fmt.format(fw, "sk = {s}\n", .{std.fmt.fmtSliceHexUpper(&kp.secret_key.toBytes())});
try std.fmt.format(fw, "ct = {s}\n", .{std.fmt.fmtSliceHexUpper(&e.ciphertext)});
try std.fmt.format(fw, "ss = {s}\n\n", .{std.fmt.fmtSliceHexUpper(&e.shared_secret)});
try std.fmt.format(fw, "pk = {X}\n", .{&kp.public_key.toBytes()});
try std.fmt.format(fw, "sk = {X}\n", .{&kp.secret_key.toBytes()});
try std.fmt.format(fw, "ct = {X}\n", .{&e.ciphertext});
try std.fmt.format(fw, "ss = {X}\n\n", .{&e.shared_secret});
}
var out: [32]u8 = undefined;
f.final(&out);
var outHex: [64]u8 = undefined;
_ = try std.fmt.bufPrint(&outHex, "{s}", .{std.fmt.fmtSliceHexLower(&out)});
_ = try std.fmt.bufPrint(&outHex, "{x}", .{&out});
try testing.expectEqual(outHex, modeHash[1].*);
}
}

View File

@ -1513,10 +1513,10 @@ fn logSecrets(key_log_file: std.fs.File, context: anytype, secrets: anytype) voi
defer if (locked) key_log_file.unlock();
key_log_file.seekFromEnd(0) catch {};
inline for (@typeInfo(@TypeOf(secrets)).@"struct".fields) |field| key_log_file.writer().print("{s}" ++
(if (@hasField(@TypeOf(context), "counter")) "_{d}" else "") ++ " {} {}\n", .{field.name} ++
(if (@hasField(@TypeOf(context), "counter")) "_{d}" else "") ++ " {x} {x}\n", .{field.name} ++
(if (@hasField(@TypeOf(context), "counter")) .{context.counter} else .{}) ++ .{
std.fmt.fmtSliceHexLower(context.client_random),
std.fmt.fmtSliceHexLower(@field(secrets, field.name)),
context.client_random,
@field(secrets, field.name),
}) catch {};
}

View File

@ -204,13 +204,23 @@ pub fn unlockStdErr() void {
std.Progress.unlockStdErr();
}
/// Allows the caller to freely write to stderr until `unlockStdErr` is called.
///
/// During the lock, any `std.Progress` information is cleared from the terminal.
///
/// Returns a `std.io.BufferedWriter` with empty buffer, meaning that it is
/// in fact unbuffered and does not need to be flushed.
pub fn lockStdErr2() std.io.BufferedWriter {
std.Progress.lockStdErr();
return io.getStdErr().unbufferedWriter();
}
/// Print to stderr, unbuffered, and silently returning on failure. Intended
/// for use in "printf debugging." Use `std.log` functions for proper logging.
pub fn print(comptime fmt: []const u8, args: anytype) void {
lockStdErr();
var bw = lockStdErr2();
defer unlockStdErr();
const stderr = io.getStdErr().writer();
nosuspend stderr.print(fmt, args) catch return;
nosuspend bw.print(fmt, args) catch return;
}
pub fn getStderrMutex() *std.Thread.Mutex {
@ -265,7 +275,7 @@ fn dumpHexInternal(bytes: []const u8, ttyconf: std.io.tty.Config, writer: anytyp
if (window.len < 16) {
var missing_columns = (16 - window.len) * 3;
if (window.len < 8) missing_columns += 1;
try writer.writeByteNTimes(' ', missing_columns);
try writer.splatByteAll(' ', missing_columns);
}
// 3. Print the characters.
@ -313,30 +323,32 @@ test dumpHexInternal {
}
/// Tries to print the current stack trace to stderr, unbuffered, and ignores any error returned.
/// TODO multithreaded awareness
pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
nosuspend {
if (builtin.target.cpu.arch.isWasm()) {
if (native_os == .wasi) {
const stderr = io.getStdErr().writer();
stderr.print("Unable to dump stack trace: not implemented for Wasm\n", .{}) catch return;
}
return;
var stderr = lockStdErr2();
defer unlockStdErr();
nosuspend dumpCurrentStackTraceToWriter(start_addr, &stderr) catch return;
}
/// Prints the current stack trace to the provided writer.
pub fn dumpCurrentStackTraceToWriter(start_addr: ?usize, writer: *std.io.BufferedWriter) !void {
if (builtin.target.cpu.arch.isWasm()) {
if (native_os == .wasi) {
try writer.writeAll("Unable to dump stack trace: not implemented for Wasm\n");
}
const stderr = io.getStdErr().writer();
if (builtin.strip_debug_info) {
stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
return;
}
const debug_info = getSelfDebugInfo() catch |err| {
stderr.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}) catch return;
return;
};
writeCurrentStackTrace(stderr, debug_info, io.tty.detectConfig(io.getStdErr()), start_addr) catch |err| {
stderr.print("Unable to dump stack trace: {s}\n", .{@errorName(err)}) catch return;
return;
};
return;
}
if (builtin.strip_debug_info) {
try writer.writeAll("Unable to dump stack trace: debug info stripped\n");
return;
}
const debug_info = getSelfDebugInfo() catch |err| {
try writer.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)});
return;
};
writeCurrentStackTrace(writer, debug_info, io.tty.detectConfig(io.getStdErr()), start_addr) catch |err| {
try writer.print("Unable to dump stack trace: {s}\n", .{@errorName(err)});
return;
};
}
pub const have_ucontext = posix.ucontext_t != void;
@ -402,16 +414,14 @@ pub inline fn getContext(context: *ThreadContext) bool {
/// Tries to print the stack trace starting from the supplied base pointer to stderr,
/// unbuffered, and ignores any error returned.
/// TODO multithreaded awareness
pub fn dumpStackTraceFromBase(context: *ThreadContext) void {
pub fn dumpStackTraceFromBase(context: *ThreadContext, stderr: *std.io.BufferedWriter) void {
nosuspend {
if (builtin.target.cpu.arch.isWasm()) {
if (native_os == .wasi) {
const stderr = io.getStdErr().writer();
stderr.print("Unable to dump stack trace: not implemented for Wasm\n", .{}) catch return;
}
return;
}
const stderr = io.getStdErr().writer();
if (builtin.strip_debug_info) {
stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
return;
@ -510,21 +520,23 @@ pub fn dumpStackTrace(stack_trace: std.builtin.StackTrace) void {
nosuspend {
if (builtin.target.cpu.arch.isWasm()) {
if (native_os == .wasi) {
const stderr = io.getStdErr().writer();
stderr.print("Unable to dump stack trace: not implemented for Wasm\n", .{}) catch return;
var stderr = lockStdErr2();
defer unlockStdErr();
stderr.writeAll("Unable to dump stack trace: not implemented for Wasm\n") catch return;
}
return;
}
const stderr = io.getStdErr().writer();
var stderr = lockStdErr2();
defer unlockStdErr();
if (builtin.strip_debug_info) {
stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
stderr.writeAll("Unable to dump stack trace: debug info stripped\n") catch return;
return;
}
const debug_info = getSelfDebugInfo() catch |err| {
stderr.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}) catch return;
return;
};
writeStackTrace(stack_trace, stderr, debug_info, io.tty.detectConfig(io.getStdErr())) catch |err| {
writeStackTrace(stack_trace, &stderr, debug_info, io.tty.detectConfig(io.getStdErr())) catch |err| {
stderr.print("Unable to dump stack trace: {s}\n", .{@errorName(err)}) catch return;
return;
};
@ -573,14 +585,14 @@ pub fn panicExtra(
const size = 0x1000;
const trunc_msg = "(msg truncated)";
var buf: [size + trunc_msg.len]u8 = undefined;
var bw: std.io.BufferedWriter = undefined;
bw.initFixed(buf[0..size]);
// a minor annoyance with this is that it will result in the NoSpaceLeft
// error being part of the @panic stack trace (but that error should
// only happen rarely)
const msg = std.fmt.bufPrint(buf[0..size], format, args) catch |err| switch (err) {
error.NoSpaceLeft => blk: {
@memcpy(buf[size..], trunc_msg);
break :blk &buf;
},
const msg = if (bw.print(format, args)) |_| bw.getWritten() else |_| blk: {
@memcpy(buf[size..], trunc_msg);
break :blk &buf;
};
std.builtin.panic.call(msg, ret_addr);
}
@ -675,10 +687,9 @@ pub fn defaultPanic(
_ = panicking.fetchAdd(1, .seq_cst);
{
lockStdErr();
var stderr = lockStdErr2();
defer unlockStdErr();
const stderr = io.getStdErr().writer();
if (builtin.single_threaded) {
stderr.print("panic: ", .{}) catch posix.abort();
} else {
@ -688,7 +699,7 @@ pub fn defaultPanic(
stderr.print("{s}\n", .{msg}) catch posix.abort();
if (@errorReturnTrace()) |t| dumpStackTrace(t.*);
dumpCurrentStackTrace(first_trace_addr orelse @returnAddress());
dumpCurrentStackTraceToWriter(first_trace_addr orelse @returnAddress(), &stderr) catch {};
}
waitForOtherThreadToFinishPanicking();
@ -723,7 +734,7 @@ fn waitForOtherThreadToFinishPanicking() void {
pub fn writeStackTrace(
stack_trace: std.builtin.StackTrace,
out_stream: anytype,
writer: *std.io.BufferedWriter,
debug_info: *SelfInfo,
tty_config: io.tty.Config,
) !void {
@ -736,15 +747,15 @@ pub fn writeStackTrace(
frame_index = (frame_index + 1) % stack_trace.instruction_addresses.len;
}) {
const return_address = stack_trace.instruction_addresses[frame_index];
try printSourceAtAddress(debug_info, out_stream, return_address - 1, tty_config);
try printSourceAtAddress(debug_info, writer, return_address - 1, tty_config);
}
if (stack_trace.index > stack_trace.instruction_addresses.len) {
const dropped_frames = stack_trace.index - stack_trace.instruction_addresses.len;
tty_config.setColor(out_stream, .bold) catch {};
try out_stream.print("({d} additional stack frames skipped...)\n", .{dropped_frames});
tty_config.setColor(out_stream, .reset) catch {};
tty_config.setColor(writer, .bold) catch {};
try writer.print("({d} additional stack frames skipped...)\n", .{dropped_frames});
tty_config.setColor(writer, .reset) catch {};
}
}
@ -954,7 +965,7 @@ pub const StackIterator = struct {
};
pub fn writeCurrentStackTrace(
out_stream: anytype,
writer: *std.io.BufferedWriter,
debug_info: *SelfInfo,
tty_config: io.tty.Config,
start_addr: ?usize,
@ -962,7 +973,7 @@ pub fn writeCurrentStackTrace(
if (native_os == .windows) {
var context: ThreadContext = undefined;
assert(getContext(&context));
return writeStackTraceWindows(out_stream, debug_info, tty_config, &context, start_addr);
return writeStackTraceWindows(writer, debug_info, tty_config, &context, start_addr);
}
var context: ThreadContext = undefined;
const has_context = getContext(&context);
@ -973,7 +984,7 @@ pub fn writeCurrentStackTrace(
defer it.deinit();
while (it.next()) |return_address| {
printLastUnwindError(&it, debug_info, out_stream, tty_config);
printLastUnwindError(&it, debug_info, writer, tty_config);
// On arm64 macOS, the address of the last frame is 0x0 rather than 0x1 as on x86_64 macOS,
// therefore, we do a check for `return_address == 0` before subtracting 1 from it to avoid
@ -981,8 +992,8 @@ pub fn writeCurrentStackTrace(
// condition on the subsequent iteration and return `null` thus terminating the loop.
// same behaviour for x86-windows-msvc
const address = return_address -| 1;
try printSourceAtAddress(debug_info, out_stream, address, tty_config);
} else printLastUnwindError(&it, debug_info, out_stream, tty_config);
try printSourceAtAddress(debug_info, writer, address, tty_config);
} else printLastUnwindError(&it, debug_info, writer, tty_config);
}
pub noinline fn walkStackWindows(addresses: []usize, existing_context: ?*const windows.CONTEXT) usize {
@ -1042,7 +1053,7 @@ pub noinline fn walkStackWindows(addresses: []usize, existing_context: ?*const w
}
pub fn writeStackTraceWindows(
out_stream: anytype,
writer: *std.io.BufferedWriter,
debug_info: *SelfInfo,
tty_config: io.tty.Config,
context: *const windows.CONTEXT,
@ -1058,14 +1069,14 @@ pub fn writeStackTraceWindows(
return;
} else 0;
for (addrs[start_i..]) |addr| {
try printSourceAtAddress(debug_info, out_stream, addr - 1, tty_config);
try printSourceAtAddress(debug_info, writer, addr - 1, tty_config);
}
}
fn printUnknownSource(debug_info: *SelfInfo, out_stream: anytype, address: usize, tty_config: io.tty.Config) !void {
fn printUnknownSource(debug_info: *SelfInfo, writer: *std.io.BufferedWriter, address: usize, tty_config: io.tty.Config) !void {
const module_name = debug_info.getModuleNameForAddress(address);
return printLineInfo(
out_stream,
writer,
null,
address,
"???",
@ -1075,38 +1086,38 @@ fn printUnknownSource(debug_info: *SelfInfo, out_stream: anytype, address: usize
);
}
fn printLastUnwindError(it: *StackIterator, debug_info: *SelfInfo, out_stream: anytype, tty_config: io.tty.Config) void {
fn printLastUnwindError(it: *StackIterator, debug_info: *SelfInfo, writer: *std.io.BufferedWriter, tty_config: io.tty.Config) void {
if (!have_ucontext) return;
if (it.getLastError()) |unwind_error| {
printUnwindError(debug_info, out_stream, unwind_error.address, unwind_error.err, tty_config) catch {};
printUnwindError(debug_info, writer, unwind_error.address, unwind_error.err, tty_config) catch {};
}
}
fn printUnwindError(debug_info: *SelfInfo, out_stream: anytype, address: usize, err: UnwindError, tty_config: io.tty.Config) !void {
fn printUnwindError(debug_info: *SelfInfo, writer: *std.io.BufferedWriter, address: usize, err: UnwindError, tty_config: io.tty.Config) !void {
const module_name = debug_info.getModuleNameForAddress(address) orelse "???";
try tty_config.setColor(out_stream, .dim);
try tty_config.setColor(writer, .dim);
if (err == error.MissingDebugInfo) {
try out_stream.print("Unwind information for `{s}:0x{x}` was not available, trace may be incomplete\n\n", .{ module_name, address });
try writer.print("Unwind information for `{s}:0x{x}` was not available, trace may be incomplete\n\n", .{ module_name, address });
} else {
try out_stream.print("Unwind error at address `{s}:0x{x}` ({}), trace may be incomplete\n\n", .{ module_name, address, err });
try writer.print("Unwind error at address `{s}:0x{x}` ({}), trace may be incomplete\n\n", .{ module_name, address, err });
}
try tty_config.setColor(out_stream, .reset);
try tty_config.setColor(writer, .reset);
}
pub fn printSourceAtAddress(debug_info: *SelfInfo, out_stream: anytype, address: usize, tty_config: io.tty.Config) !void {
pub fn printSourceAtAddress(debug_info: *SelfInfo, writer: *std.io.BufferedWriter, address: usize, tty_config: io.tty.Config) !void {
const module = debug_info.getModuleForAddress(address) catch |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => return printUnknownSource(debug_info, out_stream, address, tty_config),
error.MissingDebugInfo, error.InvalidDebugInfo => return printUnknownSource(debug_info, writer, address, tty_config),
else => return err,
};
const symbol_info = module.getSymbolAtAddress(debug_info.allocator, address) catch |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => return printUnknownSource(debug_info, out_stream, address, tty_config),
error.MissingDebugInfo, error.InvalidDebugInfo => return printUnknownSource(debug_info, writer, address, tty_config),
else => return err,
};
defer if (symbol_info.source_location) |sl| debug_info.allocator.free(sl.file_name);
return printLineInfo(
out_stream,
writer,
symbol_info.source_location,
address,
symbol_info.name,
@ -1117,7 +1128,7 @@ pub fn printSourceAtAddress(debug_info: *SelfInfo, out_stream: anytype, address:
}
fn printLineInfo(
out_stream: anytype,
writer: *std.io.BufferedWriter,
source_location: ?SourceLocation,
address: usize,
symbol_name: []const u8,
@ -1126,34 +1137,34 @@ fn printLineInfo(
comptime printLineFromFile: anytype,
) !void {
nosuspend {
try tty_config.setColor(out_stream, .bold);
try tty_config.setColor(writer, .bold);
if (source_location) |*sl| {
try out_stream.print("{s}:{d}:{d}", .{ sl.file_name, sl.line, sl.column });
try writer.print("{s}:{d}:{d}", .{ sl.file_name, sl.line, sl.column });
} else {
try out_stream.writeAll("???:?:?");
try writer.writeAll("???:?:?");
}
try tty_config.setColor(out_stream, .reset);
try out_stream.writeAll(": ");
try tty_config.setColor(out_stream, .dim);
try out_stream.print("0x{x} in {s} ({s})", .{ address, symbol_name, compile_unit_name });
try tty_config.setColor(out_stream, .reset);
try out_stream.writeAll("\n");
try tty_config.setColor(writer, .reset);
try writer.writeAll(": ");
try tty_config.setColor(writer, .dim);
try writer.print("0x{x} in {s} ({s})", .{ address, symbol_name, compile_unit_name });
try tty_config.setColor(writer, .reset);
try writer.writeAll("\n");
// Show the matching source code line if possible
if (source_location) |sl| {
if (printLineFromFile(out_stream, sl)) {
if (printLineFromFile(writer, sl)) {
if (sl.column > 0) {
// The caret already takes one char
const space_needed = @as(usize, @intCast(sl.column - 1));
try out_stream.writeByteNTimes(' ', space_needed);
try tty_config.setColor(out_stream, .green);
try out_stream.writeAll("^");
try tty_config.setColor(out_stream, .reset);
try writer.splatByteAll(' ', space_needed);
try tty_config.setColor(writer, .green);
try writer.writeAll("^");
try tty_config.setColor(writer, .reset);
}
try out_stream.writeAll("\n");
try writer.writeAll("\n");
} else |err| switch (err) {
error.EndOfFile, error.FileNotFound => {},
error.BadPathName => {},
@ -1164,7 +1175,7 @@ fn printLineInfo(
}
}
fn printLineFromFileAnyOs(out_stream: anytype, source_location: SourceLocation) !void {
fn printLineFromFileAnyOs(writer: *std.io.BufferedWriter, source_location: SourceLocation) !void {
// Need this to always block even in async I/O mode, because this could potentially
// be called from e.g. the event loop code crashing.
var f = try fs.cwd().openFile(source_location.file_name, .{});
@ -1197,24 +1208,24 @@ fn printLineFromFileAnyOs(out_stream: anytype, source_location: SourceLocation)
if (mem.indexOfScalar(u8, slice, '\n')) |pos| {
const line = slice[0 .. pos + 1];
mem.replaceScalar(u8, line, '\t', ' ');
return out_stream.writeAll(line);
return writer.writeAll(line);
} else { // Line is the last inside the buffer, and requires another read to find delimiter. Alternatively the file ends.
mem.replaceScalar(u8, slice, '\t', ' ');
try out_stream.writeAll(slice);
try writer.writeAll(slice);
while (amt_read == buf.len) {
amt_read = try f.read(buf[0..]);
if (mem.indexOfScalar(u8, buf[0..amt_read], '\n')) |pos| {
const line = buf[0 .. pos + 1];
mem.replaceScalar(u8, line, '\t', ' ');
return out_stream.writeAll(line);
return writer.writeAll(line);
} else {
const line = buf[0..amt_read];
mem.replaceScalar(u8, line, '\t', ' ');
try out_stream.writeAll(line);
try writer.writeAll(line);
}
}
// Make sure printing last line of file inserts extra newline
try out_stream.writeByte('\n');
try writer.writeByte('\n');
}
}
@ -1274,9 +1285,9 @@ test printLineFromFileAnyOs {
const overlap = 10;
var writer = file.writer();
try writer.writeByteNTimes('a', std.heap.page_size_min - overlap);
try writer.splatByteAll('a', std.heap.page_size_min - overlap);
try writer.writeByte('\n');
try writer.writeByteNTimes('a', overlap);
try writer.splatByteAll('a', overlap);
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 });
try expectEqualStrings(("a" ** overlap) ++ "\n", output.items);
@ -1289,7 +1300,7 @@ test printLineFromFileAnyOs {
defer allocator.free(path);
var writer = file.writer();
try writer.writeByteNTimes('a', std.heap.page_size_max);
try writer.splatByteAll('a', std.heap.page_size_max);
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
try expectEqualStrings(("a" ** std.heap.page_size_max) ++ "\n", output.items);
@ -1302,7 +1313,7 @@ test printLineFromFileAnyOs {
defer allocator.free(path);
var writer = file.writer();
try writer.writeByteNTimes('a', 3 * std.heap.page_size_max);
try writer.splatByteAll('a', 3 * std.heap.page_size_max);
try expectError(error.EndOfFile, printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 }));
@ -1328,7 +1339,7 @@ test printLineFromFileAnyOs {
var writer = file.writer();
const real_file_start = 3 * std.heap.page_size_min;
try writer.writeByteNTimes('\n', real_file_start);
try writer.splatByteAll('\n', real_file_start);
try writer.writeAll("abc\ndef");
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = real_file_start + 1, .column = 0 });
@ -1461,7 +1472,7 @@ fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopa
}
fn dumpSegfaultInfoPosix(sig: i32, code: i32, addr: usize, ctx_ptr: ?*anyopaque) void {
const stderr = io.getStdErr().writer();
var stderr = io.getStdErr().unbufferedWriter();
_ = switch (sig) {
posix.SIG.SEGV => if (native_arch == .x86_64 and native_os == .linux and code == 128) // SI_KERNEL
// x86_64 doesn't have a full 64-bit virtual address space.
@ -1471,7 +1482,7 @@ fn dumpSegfaultInfoPosix(sig: i32, code: i32, addr: usize, ctx_ptr: ?*anyopaque)
// but can also happen when no addressable memory is involved;
// for example when reading/writing model-specific registers
// by executing `rdmsr` or `wrmsr` in user-space (unprivileged mode).
stderr.print("General protection exception (no address available)\n", .{})
stderr.writeAll("General protection exception (no address available)\n")
else
stderr.print("Segmentation fault at address 0x{x}\n", .{addr}),
posix.SIG.ILL => stderr.print("Illegal instruction at address 0x{x}\n", .{addr}),
@ -1509,7 +1520,7 @@ fn dumpSegfaultInfoPosix(sig: i32, code: i32, addr: usize, ctx_ptr: ?*anyopaque)
}, @ptrCast(ctx)).__mcontext_data;
}
relocateContext(&new_ctx);
dumpStackTraceFromBase(&new_ctx);
dumpStackTraceFromBase(&new_ctx, &stderr);
},
else => {},
}
@ -1557,7 +1568,7 @@ fn handleSegfaultWindowsExtra(info: *windows.EXCEPTION_POINTERS, msg: u8, label:
}
fn dumpSegfaultInfoWindows(info: *windows.EXCEPTION_POINTERS, msg: u8, label: ?[]const u8) void {
const stderr = io.getStdErr().writer();
var stderr = io.getStdErr().unbufferedWriter();
_ = switch (msg) {
0 => stderr.print("{s}\n", .{label.?}),
1 => stderr.print("Segmentation fault at address 0x{x}\n", .{info.ExceptionRecord.ExceptionInformation[1]}),
@ -1565,7 +1576,7 @@ fn dumpSegfaultInfoWindows(info: *windows.EXCEPTION_POINTERS, msg: u8, label: ?[
else => unreachable,
} catch posix.abort();
dumpStackTraceFromBase(info.ContextRecord);
dumpStackTraceFromBase(info.ContextRecord, &stderr);
}
pub fn dumpStackPointerAddr(prefix: []const u8) void {
@ -1688,7 +1699,7 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize
t: @This(),
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
writer: *std.io.BufferedWriter,
) !void {
if (fmt.len != 0) std.fmt.invalidFmtError(fmt, t);
_ = options;

View File

@ -2235,7 +2235,7 @@ pub const ElfModule = struct {
const section_bytes = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
sections[section_index.?] = if ((shdr.sh_flags & elf.SHF_COMPRESSED) > 0) blk: {
var section_stream = std.io.fixedBufferStream(section_bytes);
var section_stream: std.io.FixedBufferStream = .{ .buffer = section_bytes };
const section_reader = section_stream.reader();
const chdr = section_reader.readStruct(elf.Chdr) catch continue;
if (chdr.ch_type != .ZLIB) continue;
@ -2302,11 +2302,7 @@ pub const ElfModule = struct {
};
defer debuginfod_dir.close();
const filename = std.fmt.allocPrint(
gpa,
"{s}/debuginfo",
.{std.fmt.fmtSliceHexLower(id)},
) catch break :blk;
const filename = std.fmt.allocPrint(gpa, "{x}/debuginfo", .{id}) catch break :blk;
defer gpa.free(filename);
const path: Path = .{
@ -2330,12 +2326,8 @@ pub const ElfModule = struct {
var id_prefix_buf: [2]u8 = undefined;
var filename_buf: [38 + extension.len]u8 = undefined;
_ = std.fmt.bufPrint(&id_prefix_buf, "{s}", .{std.fmt.fmtSliceHexLower(id[0..1])}) catch unreachable;
const filename = std.fmt.bufPrint(
&filename_buf,
"{s}" ++ extension,
.{std.fmt.fmtSliceHexLower(id[1..])},
) catch break :blk;
_ = std.fmt.bufPrint(&id_prefix_buf, "{x}", .{id[0..1]}) catch unreachable;
const filename = std.fmt.bufPrint(&filename_buf, "{x}" ++ extension, .{id[1..]}) catch break :blk;
for (global_debug_directories) |global_directory| {
const path: Path = .{

View File

@ -51,7 +51,7 @@ const Opcode = enum(u8) {
pub const hi_user = 0x3f;
};
fn readBlock(stream: *std.io.FixedBufferStream([]const u8)) ![]const u8 {
fn readBlock(stream: *std.io.FixedBufferStream) ![]const u8 {
const reader = stream.reader();
const block_len = try leb.readUleb128(usize, reader);
if (stream.pos + block_len > stream.buffer.len) return error.InvalidOperand;
@ -147,7 +147,7 @@ pub const Instruction = union(Opcode) {
},
pub fn read(
stream: *std.io.FixedBufferStream([]const u8),
stream: *std.io.FixedBufferStream,
addr_size_bytes: u8,
endian: std.builtin.Endian,
) !Instruction {

View File

@ -178,7 +178,7 @@ pub fn StackMachine(comptime options: Options) type {
}
}
pub fn readOperand(stream: *std.io.FixedBufferStream([]const u8), opcode: u8, context: Context) !?Operand {
pub fn readOperand(stream: *std.io.FixedBufferStream, opcode: u8, context: Context) !?Operand {
const reader = stream.reader();
return switch (opcode) {
OP.addr => generic(try reader.readInt(addr_type, options.endian)),
@ -293,7 +293,7 @@ pub fn StackMachine(comptime options: Options) type {
initial_value: ?usize,
) Error!?Value {
if (initial_value) |i| try self.stack.append(allocator, .{ .generic = i });
var stream = std.io.fixedBufferStream(expression);
var stream: std.io.FixedBufferStream = .{ .buffer = expression };
while (try self.step(&stream, allocator, context)) {}
if (self.stack.items.len == 0) return null;
return self.stack.items[self.stack.items.len - 1];
@ -302,7 +302,7 @@ pub fn StackMachine(comptime options: Options) type {
/// Reads an opcode and its operands from `stream`, then executes it
pub fn step(
self: *Self,
stream: *std.io.FixedBufferStream([]const u8),
stream: *std.io.FixedBufferStream,
allocator: std.mem.Allocator,
context: Context,
) Error!bool {
@ -756,7 +756,7 @@ pub fn StackMachine(comptime options: Options) type {
if (isOpcodeRegisterLocation(block[0])) {
if (context.thread_context == null) return error.IncompleteExpressionContext;
var block_stream = std.io.fixedBufferStream(block);
var block_stream: std.io.FixedBufferStream = .{ .buffer = block };
const register = (try readOperand(&block_stream, block[0], context)).?.register;
const value = mem.readInt(usize, (try abi.regBytes(context.thread_context.?, register, context.reg_context))[0..@sizeOf(usize)], native_endian);
try self.stack.append(allocator, .{ .generic = value });

View File

@ -2027,12 +2027,9 @@ pub const VirtualMachine = struct {
var prev_row: Row = self.current_row;
var cie_stream = std.io.fixedBufferStream(cie.initial_instructions);
var fde_stream = std.io.fixedBufferStream(fde.instructions);
var streams = [_]*std.io.FixedBufferStream([]const u8){
&cie_stream,
&fde_stream,
};
var cie_stream: std.io.FixedBufferStream = .{ .buffer = cie.initial_instructions };
var fde_stream: std.io.FixedBufferStream = .{ .buffer = fde.instructions };
const streams: [2]*std.io.FixedBufferStream = .{ &cie_stream, &fde_stream };
for (&streams, 0..) |stream, i| {
while (stream.pos < stream.buffer.len) {

File diff suppressed because it is too large Load Diff

View File

@ -11,7 +11,7 @@ const special_exponent = 0x7fffffff;
pub const min_buffer_size = 53;
/// Returns the minimum buffer size needed to print every float of a specific type and format.
pub fn bufferSize(comptime mode: Format, comptime T: type) comptime_int {
pub fn bufferSize(comptime mode: Mode, comptime T: type) comptime_int {
comptime std.debug.assert(@typeInfo(T) == .float);
return switch (mode) {
.scientific => 53,
@ -27,17 +27,17 @@ pub fn bufferSize(comptime mode: Format, comptime T: type) comptime_int {
};
}
pub const FormatError = error{
pub const Error = error{
BufferTooSmall,
};
pub const Format = enum {
pub const Mode = enum {
scientific,
decimal,
};
pub const FormatOptions = struct {
mode: Format = .scientific,
pub const Options = struct {
mode: Mode = .scientific,
precision: ?usize = null,
};
@ -52,11 +52,11 @@ pub const FormatOptions = struct {
///
/// When printing full precision decimals, use `bufferSize` to get the required space. It is
/// recommended to bound decimal output with a fixed precision to reduce the required buffer size.
pub fn formatFloat(buf: []u8, v_: anytype, options: FormatOptions) FormatError![]const u8 {
const v = switch (@TypeOf(v_)) {
pub fn render(buf: []u8, value: anytype, options: Options) Error![]const u8 {
const v = switch (@TypeOf(value)) {
// comptime_float internally is a f128; this preserves precision.
comptime_float => @as(f128, v_),
else => v_,
comptime_float => @as(f128, value),
else => value,
};
const T = @TypeOf(v);
@ -192,7 +192,7 @@ fn round(comptime T: type, f: FloatDecimal(T), mode: RoundMode, precision: usize
/// will not fit.
///
/// It is recommended to bound decimal formatting with an exact precision.
pub fn formatScientific(comptime T: type, buf: []u8, f_: FloatDecimal(T), precision: ?usize) FormatError![]const u8 {
pub fn formatScientific(comptime T: type, buf: []u8, f_: FloatDecimal(T), precision: ?usize) Error![]const u8 {
std.debug.assert(buf.len >= min_buffer_size);
var f = f_;
@ -263,7 +263,7 @@ pub fn formatScientific(comptime T: type, buf: []u8, f_: FloatDecimal(T), precis
/// The buffer provided must be greater than `min_buffer_size` bytes in length. If no precision is
/// specified, this may still return an error. If precision is specified, `2 + precision` bytes will
/// always be written.
pub fn formatDecimal(comptime T: type, buf: []u8, f_: FloatDecimal(T), precision: ?usize) FormatError![]const u8 {
pub fn formatDecimal(comptime T: type, buf: []u8, f_: FloatDecimal(T), precision: ?usize) Error![]const u8 {
std.debug.assert(buf.len >= min_buffer_size);
var f = f_;
@ -1520,7 +1520,7 @@ fn check(comptime T: type, value: T, comptime expected: []const u8) !void {
var buf: [6000]u8 = undefined;
const value_bits: I = @bitCast(value);
const s = try formatFloat(&buf, value, .{});
const s = try render(&buf, value, .{});
try std.testing.expectEqualStrings(expected, s);
if (T == f80 and builtin.target.os.tag == .windows and builtin.target.cpu.arch == .x86_64) return;

View File

@ -1587,12 +1587,112 @@ pub fn reader(file: File) Reader {
return .{ .context = file };
}
pub const Writer = io.Writer(File, WriteError, write);
pub fn writer(file: File) Writer {
return .{ .context = file };
pub fn writer(file: File) std.io.Writer {
return .{
.context = interface.handleToOpaque(file.handle),
.vtable = &.{
.writev = interface.writev,
.writeFile = interface.writeFile,
},
};
}
pub fn unbufferedWriter(file: File) std.io.BufferedWriter {
return .{
.buffer = &.{},
.unbuffered_writer = writer(file),
};
}
const interface = struct {
/// Number of slices to store on the stack, when trying to send as many byte
/// vectors through the underlying write calls as possible.
const max_buffers_len = 16;
fn writev(context: *anyopaque, data: []const []const u8) anyerror!usize {
const file = opaqueToHandle(context);
if (is_windows) {
// TODO improve this to use WriteFileScatter
if (data.len == 0) return 0;
const first = data[0];
return windows.WriteFile(file, first.base[0..first.len], null);
}
var iovecs_buffer: [max_buffers_len]std.posix.iovec_const = undefined;
const iovecs = iovecs_buffer[0..@min(iovecs_buffer.len, data.len)];
for (iovecs, data[0..iovecs.len]) |*v, d| v.* = .{ .base = d.ptr, .len = d.len };
return std.posix.writev(file, iovecs);
}
fn writeFile(
context: *anyopaque,
in_file: std.fs.File,
in_offset: u64,
in_len: std.io.Writer.VTable.FileLen,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
const out_fd = opaqueToHandle(context);
const in_fd = in_file.handle;
const len_int = switch (in_len) {
.zero => return interface.writev(context, headers_and_trailers),
.entire_file => 0,
else => in_len.int(),
};
var iovecs_buffer: [max_buffers_len]std.posix.iovec_const = undefined;
const iovecs = iovecs_buffer[0..@min(iovecs_buffer.len, headers_and_trailers.len)];
for (iovecs, headers_and_trailers[0..iovecs.len]) |*v, d| v.* = .{ .base = d.ptr, .len = d.len };
const headers = iovecs[0..@min(headers_len, iovecs.len)];
const trailers = iovecs[headers.len..];
const flags = 0;
return posix.sendfile(out_fd, in_fd, in_offset, len_int, headers, trailers, flags) catch |err| switch (err) {
error.Unseekable,
error.FastOpenAlreadyInProgress,
error.MessageTooBig,
error.FileDescriptorNotASocket,
error.NetworkUnreachable,
error.NetworkSubsystemFailed,
=> return writeFileUnseekable(out_fd, in_fd, in_offset, in_len, headers_and_trailers, headers_len),
else => |e| return e,
};
}
fn writeFileUnseekable(
out_fd: Handle,
in_fd: Handle,
in_offset: u64,
in_len: std.io.Writer.VTable.FileLen,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
_ = out_fd;
_ = in_fd;
_ = in_offset;
_ = in_len;
_ = headers_and_trailers;
_ = headers_len;
@panic("TODO writeFileUnseekable");
}
fn handleToOpaque(handle: File.Handle) *anyopaque {
return switch (@typeInfo(Handle)) {
.pointer => @ptrCast(handle),
.int => @ptrFromInt(@as(u32, @bitCast(handle))),
else => @compileError("unhandled"),
};
}
fn opaqueToHandle(userdata: *anyopaque) Handle {
return switch (@typeInfo(Handle)) {
.pointer => @ptrCast(userdata),
.int => @intCast(@intFromPtr(userdata)),
else => @compileError("unhandled"),
};
}
};
pub const SeekableStream = io.SeekableStream(
File,
SeekError,

View File

@ -336,7 +336,7 @@ pub fn GenericWriter(
return @errorCast(self.any().writeStructEndian(value, endian));
}
pub inline fn any(self: *const Self) AnyWriter {
pub inline fn any(self: *const Self) Writer {
return .{
.context = @ptrCast(&self.context),
.writeFn = typeErasedWriteFn,
@ -351,26 +351,23 @@ pub fn GenericWriter(
}
/// Deprecated; consider switching to `AnyReader` or use `GenericReader`
/// to use previous API.
/// to use previous API. To be removed after 0.14.0 is tagged.
pub const Reader = GenericReader;
/// Deprecated; consider switching to `AnyWriter` or use `GenericWriter`
/// to use previous API.
pub const Writer = GenericWriter;
pub const Writer = @import("io/Writer.zig");
pub const AnyReader = @import("io/Reader.zig");
pub const AnyWriter = @import("io/Writer.zig");
/// Deprecated; to be removed after 0.14.0 is tagged.
pub const AnyWriter = Writer;
pub const SeekableStream = @import("io/seekable_stream.zig").SeekableStream;
pub const BufferedWriter = @import("io/buffered_writer.zig").BufferedWriter;
pub const bufferedWriter = @import("io/buffered_writer.zig").bufferedWriter;
pub const BufferedWriter = @import("io/BufferedWriter.zig");
pub const BufferedReader = @import("io/buffered_reader.zig").BufferedReader;
pub const bufferedReader = @import("io/buffered_reader.zig").bufferedReader;
pub const bufferedReaderSize = @import("io/buffered_reader.zig").bufferedReaderSize;
pub const FixedBufferStream = @import("io/fixed_buffer_stream.zig").FixedBufferStream;
pub const fixedBufferStream = @import("io/fixed_buffer_stream.zig").fixedBufferStream;
pub const FixedBufferStream = @import("io/FixedBufferStream.zig");
pub const CWriter = @import("io/c_writer.zig").CWriter;
pub const cWriter = @import("io/c_writer.zig").cWriter;
@ -378,8 +375,7 @@ pub const cWriter = @import("io/c_writer.zig").cWriter;
pub const LimitedReader = @import("io/limited_reader.zig").LimitedReader;
pub const limitedReader = @import("io/limited_reader.zig").limitedReader;
pub const CountingWriter = @import("io/counting_writer.zig").CountingWriter;
pub const countingWriter = @import("io/counting_writer.zig").countingWriter;
pub const CountingWriter = @import("io/CountingWriter.zig");
pub const CountingReader = @import("io/counting_reader.zig").CountingReader;
pub const countingReader = @import("io/counting_reader.zig").countingReader;
@ -404,17 +400,42 @@ pub const StreamSource = @import("io/stream_source.zig").StreamSource;
pub const tty = @import("io/tty.zig");
/// A Writer that doesn't write to anything.
pub const null_writer: NullWriter = .{ .context = {} };
/// A `Writer` that discards all data.
pub const null_writer: Writer = .{
.context = undefined,
.vtable = &.{
.writev = null_writev,
.writeFile = null_writeFile,
},
};
pub const NullWriter = Writer(void, error{}, dummyWrite);
fn dummyWrite(context: void, data: []const u8) error{}!usize {
fn null_writev(context: *anyopaque, data: []const []const u8) anyerror!usize {
_ = context;
return data.len;
var n: usize = 0;
for (data) |bytes| n += bytes.len;
return n;
}
fn null_writeFile(
context: *anyopaque,
file: std.fs.File,
offset: u64,
len: Writer.VTable.FileLen,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
_ = context;
_ = offset;
_ = headers_len;
_ = file;
if (len == .entire_file) return error.Unimplemented;
var n: usize = 0;
for (headers_and_trailers) |bytes| n += bytes.len;
return len.int() + n;
}
test null_writer {
null_writer.writeAll("yay" ** 10) catch |err| switch (err) {};
try null_writer.writeAll("yay");
}
pub fn poll(
@ -820,16 +841,15 @@ pub fn PollFiles(comptime StreamEnum: type) type {
test {
_ = AnyReader;
_ = AnyWriter;
_ = Writer;
_ = CountingWriter;
_ = FixedBufferStream;
_ = @import("io/bit_reader.zig");
_ = @import("io/bit_writer.zig");
_ = @import("io/buffered_atomic_file.zig");
_ = @import("io/buffered_reader.zig");
_ = @import("io/buffered_writer.zig");
_ = @import("io/c_writer.zig");
_ = @import("io/counting_writer.zig");
_ = @import("io/counting_reader.zig");
_ = @import("io/fixed_buffer_stream.zig");
_ = @import("io/seekable_stream.zig");
_ = @import("io/stream_source.zig");
_ = @import("io/test.zig");

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,56 @@
const std = @import("../std.zig");
const CountingWriter = @This();
const assert = std.debug.assert;
const native_endian = @import("builtin").target.cpu.arch.endian();
const Writer = std.io.Writer;
const testing = std.testing;
/// Underlying stream to passthrough bytes to.
child_writer: Writer,
bytes_written: u64 = 0,
pub fn writer(cw: *CountingWriter) Writer {
return .{
.context = cw,
.vtable = &.{
.writev = passthru_writev,
.writeFile = passthru_writeFile,
},
};
}
pub fn unbufferedWriter(cw: *CountingWriter) std.io.BufferedWriter {
return .{
.buffer = &.{},
.unbuffered_writer = writer(cw),
};
}
fn passthru_writev(context: *anyopaque, data: []const []const u8) anyerror!usize {
const cw: *CountingWriter = @alignCast(@ptrCast(context));
const n = try cw.child_writer.writev(data);
cw.bytes_written += n;
return n;
}
fn passthru_writeFile(
context: *anyopaque,
file: std.fs.File,
offset: u64,
len: Writer.VTable.FileLen,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
const cw: *CountingWriter = @alignCast(@ptrCast(context));
const n = try cw.child_writer.writeFile(file, offset, len, headers_and_trailers, headers_len);
cw.bytes_written += n;
return n;
}
test CountingWriter {
var cw: CountingWriter = .{ .child_writer = std.io.null_writer };
var bw = cw.unbufferedWriter();
const bytes = "yay";
try bw.writeAll(bytes);
try testing.expect(cw.bytes_written == bytes.len);
}

View File

@ -0,0 +1,148 @@
//! This turns a const byte buffer into an `io.Reader`, or `io.SeekableStream`.
const std = @import("../std.zig");
const io = std.io;
const testing = std.testing;
const mem = std.mem;
const assert = std.debug.assert;
const FixedBufferStream = @This();
buffer: []const u8,
pos: usize = 0,
pub const ReadError = error{};
pub const SeekError = error{};
pub const GetSeekPosError = error{};
pub const Reader = io.Reader(*Self, ReadError, read);
pub const SeekableStream = io.SeekableStream(
*Self,
SeekError,
GetSeekPosError,
seekTo,
seekBy,
getPos,
getEndPos,
);
const Self = @This();
pub fn reader(self: *Self) Reader {
return .{ .context = self };
}
pub fn seekableStream(self: *Self) SeekableStream {
return .{ .context = self };
}
pub fn read(self: *Self, dest: []u8) ReadError!usize {
const size = @min(dest.len, self.buffer.len - self.pos);
const end = self.pos + size;
@memcpy(dest[0..size], self.buffer[self.pos..end]);
self.pos = end;
return size;
}
pub fn seekTo(self: *Self, pos: u64) SeekError!void {
self.pos = @min(std.math.lossyCast(usize, pos), self.buffer.len);
}
pub fn seekBy(self: *Self, amt: i64) SeekError!void {
if (amt < 0) {
const abs_amt = @abs(amt);
const abs_amt_usize = std.math.cast(usize, abs_amt) orelse std.math.maxInt(usize);
if (abs_amt_usize > self.pos) {
self.pos = 0;
} else {
self.pos -= abs_amt_usize;
}
} else {
const amt_usize = std.math.cast(usize, amt) orelse std.math.maxInt(usize);
const new_pos = std.math.add(usize, self.pos, amt_usize) catch std.math.maxInt(usize);
self.pos = @min(self.buffer.len, new_pos);
}
}
pub fn getEndPos(self: *Self) GetSeekPosError!u64 {
return self.buffer.len;
}
pub fn getPos(self: *Self) GetSeekPosError!u64 {
return self.pos;
}
pub fn getWritten(self: Self) []const u8 {
return self.buffer[0..self.pos];
}
pub fn reset(self: *Self) void {
self.pos = 0;
}
test "output" {
var buf: [255]u8 = undefined;
var fbs: FixedBufferStream = .{ .buffer = &buf };
const stream = fbs.writer();
try stream.print("{s}{s}!", .{ "Hello", "World" });
try testing.expectEqualSlices(u8, "HelloWorld!", fbs.getWritten());
}
test "output at comptime" {
comptime {
var buf: [255]u8 = undefined;
var fbs: FixedBufferStream = .{ .buffer = &buf };
const stream = fbs.writer();
try stream.print("{s}{s}!", .{ "Hello", "World" });
try testing.expectEqualSlices(u8, "HelloWorld!", fbs.getWritten());
}
}
test "output 2" {
var buffer: [10]u8 = undefined;
var fbs: FixedBufferStream = .{ .buffer = &buffer };
try fbs.writer().writeAll("Hello");
try testing.expect(mem.eql(u8, fbs.getWritten(), "Hello"));
try fbs.writer().writeAll("world");
try testing.expect(mem.eql(u8, fbs.getWritten(), "Helloworld"));
try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("!"));
try testing.expect(mem.eql(u8, fbs.getWritten(), "Helloworld"));
fbs.reset();
try testing.expect(fbs.getWritten().len == 0);
try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("Hello world!"));
try testing.expect(mem.eql(u8, fbs.getWritten(), "Hello worl"));
try fbs.seekTo((try fbs.getEndPos()) + 1);
try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("H"));
}
test "input" {
const bytes = [_]u8{ 1, 2, 3, 4, 5, 6, 7 };
var fbs: FixedBufferStream = .{ .buffer = &bytes };
var dest: [4]u8 = undefined;
var amt_read = try fbs.reader().read(&dest);
try testing.expect(amt_read == 4);
try testing.expect(mem.eql(u8, dest[0..4], bytes[0..4]));
amt_read = try fbs.reader().read(&dest);
try testing.expect(amt_read == 3);
try testing.expect(mem.eql(u8, dest[0..3], bytes[4..7]));
amt_read = try fbs.reader().read(&dest);
try testing.expect(amt_read == 0);
try fbs.seekTo((try fbs.getEndPos()) + 1);
amt_read = try fbs.reader().read(&dest);
try testing.expect(amt_read == 0);
}

View File

@ -1,83 +1,100 @@
const std = @import("../std.zig");
const assert = std.debug.assert;
const mem = std.mem;
const native_endian = @import("builtin").target.cpu.arch.endian();
const Writer = @This();
context: *const anyopaque,
writeFn: *const fn (context: *const anyopaque, bytes: []const u8) anyerror!usize,
context: *anyopaque,
vtable: *const VTable,
const Self = @This();
pub const Error = anyerror;
pub const VTable = struct {
/// Each slice in `data` is written in order.
///
/// Number of bytes actually written is returned.
///
/// Number of bytes returned may be zero, which does not mean
/// end-of-stream. A subsequent call may return nonzero, or may signal end
/// of stream via an error.
writev: *const fn (context: *anyopaque, data: []const []const u8) anyerror!usize,
pub fn write(self: Self, bytes: []const u8) anyerror!usize {
return self.writeFn(self.context, bytes);
/// Writes contents from an open file. `headers` are written first, then `len`
/// bytes of `file` starting from `offset`, then `trailers`.
///
/// Number of bytes actually written is returned, which may lie within
/// headers, the file, trailers, or anywhere in between.
///
/// Number of bytes returned may be zero, which does not mean
/// end-of-stream. A subsequent call may return nonzero, or may signal end
/// of stream via an error.
writeFile: *const fn (
context: *anyopaque,
file: std.fs.File,
offset: u64,
/// When zero, it means copy until the end of the file is reached.
len: FileLen,
/// Headers and trailers must be passed together so that in case `len` is
/// zero, they can be forwarded directly to `VTable.writev`.
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize,
pub const FileLen = enum(u64) {
zero = 0,
entire_file = std.math.maxInt(u64),
_,
pub fn init(integer: u64) FileLen {
const result: FileLen = @enumFromInt(integer);
assert(result != .none);
return result;
}
pub fn int(len: FileLen) u64 {
return @intFromEnum(len);
}
};
};
pub fn writev(w: Writer, data: []const []const u8) anyerror!usize {
return w.vtable.writev(w.context, data);
}
pub fn writeAll(self: Self, bytes: []const u8) anyerror!void {
pub fn writeFile(
w: Writer,
file: std.fs.File,
offset: u64,
len: VTable.FileLen,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
return w.vtable.writeFile(w.context, file, offset, len, headers_and_trailers, headers_len);
}
pub fn write(w: Writer, bytes: []const u8) anyerror!usize {
const single: [1][]const u8 = .{bytes};
return w.vtable.writev(w.context, &single);
}
pub fn writeAll(w: Writer, bytes: []const u8) anyerror!void {
var index: usize = 0;
while (index != bytes.len) {
index += try self.write(bytes[index..]);
}
while (index < bytes.len) index += try write(w, bytes[index..]);
}
pub fn print(self: Self, comptime format: []const u8, args: anytype) anyerror!void {
return std.fmt.format(self, format, args);
}
///// Directly calls `writeAll` many times to render the formatted text. To
///// enable buffering, call `std.io.BufferedWriter.print` instead.
//pub fn unbufferedPrint(w: Writer, comptime format: []const u8, args: anytype) anyerror!void {
// return std.fmt.format(w, format, args);
//}
pub fn writeByte(self: Self, byte: u8) anyerror!void {
const array = [1]u8{byte};
return self.writeAll(&array);
}
pub fn writeByteNTimes(self: Self, byte: u8, n: usize) anyerror!void {
var bytes: [256]u8 = undefined;
@memset(bytes[0..], byte);
var remaining: usize = n;
while (remaining > 0) {
const to_write = @min(remaining, bytes.len);
try self.writeAll(bytes[0..to_write]);
remaining -= to_write;
}
}
pub fn writeBytesNTimes(self: Self, bytes: []const u8, n: usize) anyerror!void {
/// The `data` parameter is mutable because this function needs to mutate the
/// fields in order to handle partial writes from `VTable.writev`.
pub fn writevAll(w: Writer, data: [][]const u8) anyerror!void {
var i: usize = 0;
while (i < n) : (i += 1) {
try self.writeAll(bytes);
}
}
pub inline fn writeInt(self: Self, comptime T: type, value: T, endian: std.builtin.Endian) anyerror!void {
var bytes: [@divExact(@typeInfo(T).int.bits, 8)]u8 = undefined;
mem.writeInt(std.math.ByteAlignedInt(@TypeOf(value)), &bytes, value, endian);
return self.writeAll(&bytes);
}
pub fn writeStruct(self: Self, value: anytype) anyerror!void {
// Only extern and packed structs have defined in-memory layout.
comptime assert(@typeInfo(@TypeOf(value)).@"struct".layout != .auto);
return self.writeAll(mem.asBytes(&value));
}
pub fn writeStructEndian(self: Self, value: anytype, endian: std.builtin.Endian) anyerror!void {
// TODO: make sure this value is not a reference type
if (native_endian == endian) {
return self.writeStruct(value);
} else {
var copy = value;
mem.byteSwapAllFields(@TypeOf(value), &copy);
return self.writeStruct(copy);
}
}
pub fn writeFile(self: Self, file: std.fs.File) anyerror!void {
// TODO: figure out how to adjust std lib abstractions so that this ends up
// doing sendfile or maybe even copy_file_range under the right conditions.
var buf: [4000]u8 = undefined;
while (true) {
const n = try file.readAll(&buf);
try self.writeAll(buf[0..n]);
if (n < buf.len) return;
var n = try w.vtable.writev(w.context, data[i..]);
while (n >= data[i].len) {
n -= data[i].len;
i += 1;
if (i >= data.len) return;
}
data[i] = data[i][n..];
}
}

View File

@ -1,43 +0,0 @@
const std = @import("../std.zig");
const io = std.io;
const mem = std.mem;
pub fn BufferedWriter(comptime buffer_size: usize, comptime WriterType: type) type {
return struct {
unbuffered_writer: WriterType,
buf: [buffer_size]u8 = undefined,
end: usize = 0,
pub const Error = WriterType.Error;
pub const Writer = io.Writer(*Self, Error, write);
const Self = @This();
pub fn flush(self: *Self) !void {
try self.unbuffered_writer.writeAll(self.buf[0..self.end]);
self.end = 0;
}
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
pub fn write(self: *Self, bytes: []const u8) Error!usize {
if (self.end + bytes.len > self.buf.len) {
try self.flush();
if (bytes.len > self.buf.len)
return self.unbuffered_writer.write(bytes);
}
const new_end = self.end + bytes.len;
@memcpy(self.buf[self.end..new_end], bytes);
self.end = new_end;
return bytes.len;
}
};
}
pub fn bufferedWriter(underlying_stream: anytype) BufferedWriter(4096, @TypeOf(underlying_stream)) {
return .{ .unbuffered_writer = underlying_stream };
}

View File

@ -1,39 +0,0 @@
const std = @import("../std.zig");
const io = std.io;
const testing = std.testing;
/// A Writer that counts how many bytes has been written to it.
pub fn CountingWriter(comptime WriterType: type) type {
return struct {
bytes_written: u64,
child_stream: WriterType,
pub const Error = WriterType.Error;
pub const Writer = io.Writer(*Self, Error, write);
const Self = @This();
pub fn write(self: *Self, bytes: []const u8) Error!usize {
const amt = try self.child_stream.write(bytes);
self.bytes_written += amt;
return amt;
}
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
};
}
pub fn countingWriter(child_stream: anytype) CountingWriter(@TypeOf(child_stream)) {
return .{ .bytes_written = 0, .child_stream = child_stream };
}
test CountingWriter {
var counting_stream = countingWriter(std.io.null_writer);
const stream = counting_stream.writer();
const bytes = "yay" ** 100;
stream.writeAll(bytes) catch unreachable;
try testing.expect(counting_stream.bytes_written == bytes.len);
}

View File

@ -1,198 +0,0 @@
const std = @import("../std.zig");
const io = std.io;
const testing = std.testing;
const mem = std.mem;
const assert = std.debug.assert;
/// This turns a byte buffer into an `io.Writer`, `io.Reader`, or `io.SeekableStream`.
/// If the supplied byte buffer is const, then `io.Writer` is not available.
pub fn FixedBufferStream(comptime Buffer: type) type {
return struct {
/// `Buffer` is either a `[]u8` or `[]const u8`.
buffer: Buffer,
pos: usize,
pub const ReadError = error{};
pub const WriteError = error{NoSpaceLeft};
pub const SeekError = error{};
pub const GetSeekPosError = error{};
pub const Reader = io.Reader(*Self, ReadError, read);
pub const Writer = io.Writer(*Self, WriteError, write);
pub const SeekableStream = io.SeekableStream(
*Self,
SeekError,
GetSeekPosError,
seekTo,
seekBy,
getPos,
getEndPos,
);
const Self = @This();
pub fn reader(self: *Self) Reader {
return .{ .context = self };
}
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
pub fn seekableStream(self: *Self) SeekableStream {
return .{ .context = self };
}
pub fn read(self: *Self, dest: []u8) ReadError!usize {
const size = @min(dest.len, self.buffer.len - self.pos);
const end = self.pos + size;
@memcpy(dest[0..size], self.buffer[self.pos..end]);
self.pos = end;
return size;
}
/// If the returned number of bytes written is less than requested, the
/// buffer is full. Returns `error.NoSpaceLeft` when no bytes would be written.
/// Note: `error.NoSpaceLeft` matches the corresponding error from
/// `std.fs.File.WriteError`.
pub fn write(self: *Self, bytes: []const u8) WriteError!usize {
if (bytes.len == 0) return 0;
if (self.pos >= self.buffer.len) return error.NoSpaceLeft;
const n = @min(self.buffer.len - self.pos, bytes.len);
@memcpy(self.buffer[self.pos..][0..n], bytes[0..n]);
self.pos += n;
if (n == 0) return error.NoSpaceLeft;
return n;
}
pub fn seekTo(self: *Self, pos: u64) SeekError!void {
self.pos = @min(std.math.lossyCast(usize, pos), self.buffer.len);
}
pub fn seekBy(self: *Self, amt: i64) SeekError!void {
if (amt < 0) {
const abs_amt = @abs(amt);
const abs_amt_usize = std.math.cast(usize, abs_amt) orelse std.math.maxInt(usize);
if (abs_amt_usize > self.pos) {
self.pos = 0;
} else {
self.pos -= abs_amt_usize;
}
} else {
const amt_usize = std.math.cast(usize, amt) orelse std.math.maxInt(usize);
const new_pos = std.math.add(usize, self.pos, amt_usize) catch std.math.maxInt(usize);
self.pos = @min(self.buffer.len, new_pos);
}
}
pub fn getEndPos(self: *Self) GetSeekPosError!u64 {
return self.buffer.len;
}
pub fn getPos(self: *Self) GetSeekPosError!u64 {
return self.pos;
}
pub fn getWritten(self: Self) Buffer {
return self.buffer[0..self.pos];
}
pub fn reset(self: *Self) void {
self.pos = 0;
}
};
}
pub fn fixedBufferStream(buffer: anytype) FixedBufferStream(Slice(@TypeOf(buffer))) {
return .{ .buffer = buffer, .pos = 0 };
}
fn Slice(comptime T: type) type {
switch (@typeInfo(T)) {
.pointer => |ptr_info| {
var new_ptr_info = ptr_info;
switch (ptr_info.size) {
.slice => {},
.one => switch (@typeInfo(ptr_info.child)) {
.array => |info| new_ptr_info.child = info.child,
else => @compileError("invalid type given to fixedBufferStream"),
},
else => @compileError("invalid type given to fixedBufferStream"),
}
new_ptr_info.size = .slice;
return @Type(.{ .pointer = new_ptr_info });
},
else => @compileError("invalid type given to fixedBufferStream"),
}
}
test "output" {
var buf: [255]u8 = undefined;
var fbs = fixedBufferStream(&buf);
const stream = fbs.writer();
try stream.print("{s}{s}!", .{ "Hello", "World" });
try testing.expectEqualSlices(u8, "HelloWorld!", fbs.getWritten());
}
test "output at comptime" {
comptime {
var buf: [255]u8 = undefined;
var fbs = fixedBufferStream(&buf);
const stream = fbs.writer();
try stream.print("{s}{s}!", .{ "Hello", "World" });
try testing.expectEqualSlices(u8, "HelloWorld!", fbs.getWritten());
}
}
test "output 2" {
var buffer: [10]u8 = undefined;
var fbs = fixedBufferStream(&buffer);
try fbs.writer().writeAll("Hello");
try testing.expect(mem.eql(u8, fbs.getWritten(), "Hello"));
try fbs.writer().writeAll("world");
try testing.expect(mem.eql(u8, fbs.getWritten(), "Helloworld"));
try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("!"));
try testing.expect(mem.eql(u8, fbs.getWritten(), "Helloworld"));
fbs.reset();
try testing.expect(fbs.getWritten().len == 0);
try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("Hello world!"));
try testing.expect(mem.eql(u8, fbs.getWritten(), "Hello worl"));
try fbs.seekTo((try fbs.getEndPos()) + 1);
try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("H"));
}
test "input" {
const bytes = [_]u8{ 1, 2, 3, 4, 5, 6, 7 };
var fbs = fixedBufferStream(&bytes);
var dest: [4]u8 = undefined;
var read = try fbs.reader().read(&dest);
try testing.expect(read == 4);
try testing.expect(mem.eql(u8, dest[0..4], bytes[0..4]));
read = try fbs.reader().read(&dest);
try testing.expect(read == 3);
try testing.expect(mem.eql(u8, dest[0..3], bytes[4..7]));
read = try fbs.reader().read(&dest);
try testing.expect(read == 0);
try fbs.seekTo((try fbs.getEndPos()) + 1);
read = try fbs.reader().read(&dest);
try testing.expect(read == 0);
}

View File

@ -148,14 +148,15 @@ pub fn defaultLog(
) void {
const level_txt = comptime message_level.asText();
const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): ";
const stderr = std.io.getStdErr().writer();
var bw = std.io.bufferedWriter(stderr);
const writer = bw.writer();
var buffer: [1024]u8 = undefined;
var bw: std.io.BufferedWriter = .{
.unbuffered_writer = std.io.getStdErr().writer(),
.buffer = &buffer,
};
std.debug.lockStdErr();
defer std.debug.unlockStdErr();
nosuspend {
writer.print(level_txt ++ prefix2 ++ format ++ "\n", args) catch return;
bw.print(level_txt ++ prefix2 ++ format ++ "\n", args) catch return;
bw.flush() catch return;
}
}

View File

@ -67,19 +67,17 @@ pub const Guid = extern struct {
) !void {
_ = options;
if (f.len == 0) {
const fmt = std.fmt.fmtSliceHexLower;
const time_low = @byteSwap(self.time_low);
const time_mid = @byteSwap(self.time_mid);
const time_high_and_version = @byteSwap(self.time_high_and_version);
return std.fmt.format(writer, "{:0>8}-{:0>4}-{:0>4}-{:0>2}{:0>2}-{:0>12}", .{
fmt(std.mem.asBytes(&time_low)),
fmt(std.mem.asBytes(&time_mid)),
fmt(std.mem.asBytes(&time_high_and_version)),
fmt(std.mem.asBytes(&self.clock_seq_high_and_reserved)),
fmt(std.mem.asBytes(&self.clock_seq_low)),
fmt(std.mem.asBytes(&self.node)),
return std.fmt.format(writer, "{x:0>8}-{x:0>4}-{x:0>4}-{x:0>2}{x:0>2}-{x:0>12}", .{
std.mem.asBytes(&time_low),
std.mem.asBytes(&time_mid),
std.mem.asBytes(&time_high_and_version),
std.mem.asBytes(&self.clock_seq_high_and_reserved),
std.mem.asBytes(&self.clock_seq_low),
std.mem.asBytes(&self.node),
});
} else {
std.fmt.invalidFmtError(f, self);