maybe it's better to track bytes written in BufferedWriter

This commit is contained in:
Andrew Kelley 2025-04-10 18:20:57 -07:00
parent faab6d5cbf
commit 646454beb5
13 changed files with 123 additions and 177 deletions

View File

@ -40,22 +40,17 @@ pub const Component = union(enum) {
};
}
pub fn format(
component: Component,
comptime fmt: []const u8,
options: std.fmt.Options,
writer: *std.io.BufferedWriter,
) anyerror!void {
_ = options;
pub fn format(component: Component, bw: *std.io.BufferedWriter, comptime fmt: []const u8) anyerror!usize {
var n: usize = 0;
if (fmt.len == 0) {
try writer.print("std.Uri.Component{{ .{s} = \"{}\" }}", .{
n += try bw.printCount("std.Uri.Component{{ .{s} = \"{}\" }}", .{
@tagName(component),
std.zig.fmtEscapes(switch (component) {
.raw, .percent_encoded => |string| string,
}),
});
} else if (comptime std.mem.eql(u8, fmt, "raw")) switch (component) {
.raw => |raw| try writer.writeAll(raw),
.raw => |raw| n += try bw.writeAllCount(raw),
.percent_encoded => |percent_encoded| {
var start: usize = 0;
var index: usize = 0;
@ -64,51 +59,55 @@ pub const Component = union(enum) {
if (percent_encoded.len - index < 2) continue;
const percent_encoded_char =
std.fmt.parseInt(u8, percent_encoded[index..][0..2], 16) catch continue;
try writer.print("{s}{c}", .{
n += try bw.printCount("{s}{c}", .{
percent_encoded[start..percent],
percent_encoded_char,
});
start = percent + 3;
index = percent + 3;
}
try writer.writeAll(percent_encoded[start..]);
n += try bw.writeAllCount(percent_encoded[start..]);
},
} else if (comptime std.mem.eql(u8, fmt, "%")) switch (component) {
.raw => |raw| try percentEncode(writer, raw, isUnreserved),
.percent_encoded => |percent_encoded| try writer.writeAll(percent_encoded),
.raw => |raw| n += try percentEncode(bw, raw, isUnreserved),
.percent_encoded => |percent_encoded| n += try bw.writeAllCount(percent_encoded),
} else if (comptime std.mem.eql(u8, fmt, "user")) switch (component) {
.raw => |raw| try percentEncode(writer, raw, isUserChar),
.percent_encoded => |percent_encoded| try writer.writeAll(percent_encoded),
.raw => |raw| n += try percentEncode(bw, raw, isUserChar),
.percent_encoded => |percent_encoded| n += try bw.writeAllCount(percent_encoded),
} else if (comptime std.mem.eql(u8, fmt, "password")) switch (component) {
.raw => |raw| try percentEncode(writer, raw, isPasswordChar),
.percent_encoded => |percent_encoded| try writer.writeAll(percent_encoded),
.raw => |raw| n += try percentEncode(bw, raw, isPasswordChar),
.percent_encoded => |percent_encoded| n += try bw.writeAllCount(percent_encoded),
} else if (comptime std.mem.eql(u8, fmt, "host")) switch (component) {
.raw => |raw| try percentEncode(writer, raw, isHostChar),
.percent_encoded => |percent_encoded| try writer.writeAll(percent_encoded),
.raw => |raw| n += try percentEncode(bw, raw, isHostChar),
.percent_encoded => |percent_encoded| n += try bw.writeAllCount(percent_encoded),
} else if (comptime std.mem.eql(u8, fmt, "path")) switch (component) {
.raw => |raw| try percentEncode(writer, raw, isPathChar),
.percent_encoded => |percent_encoded| try writer.writeAll(percent_encoded),
.raw => |raw| n += try percentEncode(bw, raw, isPathChar),
.percent_encoded => |percent_encoded| n += try bw.writeAllCount(percent_encoded),
} else if (comptime std.mem.eql(u8, fmt, "query")) switch (component) {
.raw => |raw| try percentEncode(writer, raw, isQueryChar),
.percent_encoded => |percent_encoded| try writer.writeAll(percent_encoded),
.raw => |raw| n += try percentEncode(bw, raw, isQueryChar),
.percent_encoded => |percent_encoded| n += try bw.writeAllCount(percent_encoded),
} else if (comptime std.mem.eql(u8, fmt, "fragment")) switch (component) {
.raw => |raw| try percentEncode(writer, raw, isFragmentChar),
.percent_encoded => |percent_encoded| try writer.writeAll(percent_encoded),
.raw => |raw| n += try percentEncode(bw, raw, isFragmentChar),
.percent_encoded => |percent_encoded| n += try bw.writeAllCount(percent_encoded),
} else @compileError("invalid format string '" ++ fmt ++ "'");
return n;
}
pub fn percentEncode(
writer: *std.io.BufferedWriter,
bw: *std.io.BufferedWriter,
raw: []const u8,
comptime isValidChar: fn (u8) bool,
) anyerror!void {
) anyerror!usize {
var n: usize = 0;
var start: usize = 0;
for (raw, 0..) |char, index| {
if (isValidChar(char)) continue;
try writer.print("{s}%{X:0>2}", .{ raw[start..index], char });
n += try bw.printCount("{s}%{X:0>2}", .{ raw[start..index], char });
start = index + 1;
}
try writer.writeAll(raw[start..]);
n += try bw.writeAllCount(raw[start..]);
return n;
}
};

View File

@ -34,28 +34,26 @@ pub const StackTrace = struct {
index: usize,
instruction_addresses: []usize,
pub fn format(
self: StackTrace,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
if (fmt.len != 0) std.fmt.invalidFmtError(fmt, self);
pub fn format(st: StackTrace, bw: *std.io.BufferedWriter, comptime fmt: []const u8) anyerror!usize {
comptime std.debug.assert(fmt.len == 0);
// TODO: re-evaluate whether to use format() methods at all.
// Until then, avoid an error when using GeneralPurposeAllocator with WebAssembly
// Until then, avoid an error when using DebugAllocator with WebAssembly
// where it tries to call detectTTYConfig here.
if (builtin.os.tag == .freestanding) return;
if (builtin.os.tag == .freestanding) return 0;
_ = options;
const debug_info = std.debug.getSelfDebugInfo() catch |err| {
return writer.print("\nUnable to print stack trace: Unable to open debug info: {s}\n", .{@errorName(err)});
return bw.printCount("\nUnable to print stack trace: Unable to open debug info: {s}\n", .{
@errorName(err),
});
};
const tty_config = std.io.tty.detectConfig(.stderr());
try writer.writeAll("\n");
std.debug.writeStackTrace(self, writer, debug_info, tty_config) catch |err| {
try writer.print("Unable to print stack trace: {s}\n", .{@errorName(err)});
var n: usize = 0;
n += try bw.writeAllCount("\n");
n += std.debug.writeStackTrace(st, bw, debug_info, tty_config) catch |err| {
try bw.print("Unable to print stack trace: {s}\n", .{@errorName(err)});
};
return n;
}
};

View File

@ -1,66 +1,39 @@
const std = @import("../std.zig");
const deflate = @import("flate/deflate.zig");
const inflate = @import("flate/inflate.zig");
/// Decompress compressed data from reader and write plain data to the writer.
pub fn decompress(reader: anytype, writer: anytype) !void {
pub fn decompress(reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) !void {
try inflate.decompress(.gzip, reader, writer);
}
/// Decompressor type
pub fn Decompressor(comptime ReaderType: type) type {
return inflate.Decompressor(.gzip, ReaderType);
}
/// Create Decompressor which will read compressed data from reader.
pub fn decompressor(reader: anytype) Decompressor(@TypeOf(reader)) {
return inflate.decompressor(.gzip, reader);
}
pub const Decompressor = inflate.Decompressor(.gzip);
/// Compression level, trades between speed and compression size.
pub const Options = deflate.Options;
/// Compress plain data from reader and write compressed data to the writer.
pub fn compress(reader: anytype, writer: anytype, options: Options) !void {
pub fn compress(reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter, options: Options) !void {
try deflate.compress(.gzip, reader, writer, options);
}
/// Compressor type
pub fn Compressor(comptime WriterType: type) type {
return deflate.Compressor(.gzip, WriterType);
}
/// Create Compressor which outputs compressed data to the writer.
pub fn compressor(writer: anytype, options: Options) !Compressor(@TypeOf(writer)) {
return try deflate.compressor(.gzip, writer, options);
}
pub const Compressor = deflate.Compressor(.gzip);
/// Huffman only compression. Without Lempel-Ziv match searching. Faster
/// compression, less memory requirements but bigger compressed sizes.
pub const huffman = struct {
pub fn compress(reader: anytype, writer: anytype) !void {
pub fn compress(reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) !void {
try deflate.huffman.compress(.gzip, reader, writer);
}
pub fn Compressor(comptime WriterType: type) type {
return deflate.huffman.Compressor(.gzip, WriterType);
}
pub fn compressor(writer: anytype) !huffman.Compressor(@TypeOf(writer)) {
return deflate.huffman.compressor(.gzip, writer);
}
pub const Compressor = deflate.huffman.Compressor(.gzip);
};
// No compression store only. Compressed size is slightly bigger than plain.
pub const store = struct {
pub fn compress(reader: anytype, writer: anytype) !void {
pub fn compress(reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) !void {
try deflate.store.compress(.gzip, reader, writer);
}
pub fn Compressor(comptime WriterType: type) type {
return deflate.store.Compressor(.gzip, WriterType);
}
pub fn compressor(writer: anytype) !store.Compressor(@TypeOf(writer)) {
return deflate.store.compressor(.gzip, writer);
}
pub const Compressor = deflate.store.Compressor(.gzip);
};

View File

@ -7,21 +7,10 @@ pub const compressed_block = types.compressed_block;
pub const decompress = @import("zstandard/decompress.zig");
pub const DecompressorOptions = struct {
verify_checksum: bool = true,
window_buffer: []u8,
/// Recommended amount by the standard. Lower than this may result
/// in inability to decompress common streams.
pub const default_window_buffer_len = 8 * 1024 * 1024;
};
pub const Decompressor = struct {
const Self = @This();
const table_size_max = types.compressed_block.table_size_max;
source: std.io.CountingReader,
source: *std.io.BufferedReader,
state: enum { NewFrame, InFrame, LastBlock },
decode_state: decompress.block.DecodeState,
frame_context: decompress.FrameContext,
@ -35,6 +24,15 @@ pub const Decompressor = struct {
checksum: ?u32,
current_frame_decompressed_size: usize,
pub const Options = struct {
verify_checksum: bool = true,
window_buffer: []u8,
/// Recommended amount by the standard. Lower than this may result
/// in inability to decompress common streams.
pub const default_window_buffer_len = 8 * 1024 * 1024;
};
const WindowBuffer = struct {
data: []u8 = undefined,
read_index: usize = 0,
@ -49,9 +47,9 @@ pub const Decompressor = struct {
OutOfMemory,
};
pub fn init(source: *std.io.BufferedReader, options: DecompressorOptions) Self {
pub fn init(source: *std.io.BufferedReader, options: Options) Decompressor {
return .{
.source = std.io.countingReader(source),
.source = source,
.state = .NewFrame,
.decode_state = undefined,
.frame_context = undefined,
@ -67,7 +65,7 @@ pub const Decompressor = struct {
};
}
fn frameInit(self: *Self) !void {
fn frameInit(self: *Decompressor) !void {
const source_reader = self.source;
switch (try decompress.decodeFrameHeader(source_reader)) {
.skippable => |header| {
@ -98,11 +96,11 @@ pub const Decompressor = struct {
}
}
pub fn reader(self: *Self) std.io.Reader {
pub fn reader(self: *Decompressor) std.io.Reader {
return .{ .context = self };
}
pub fn read(self: *Self, buffer: []u8) Error!usize {
pub fn read(self: *Decompressor, buffer: []u8) Error!usize {
if (buffer.len == 0) return 0;
var size: usize = 0;
@ -123,7 +121,7 @@ pub const Decompressor = struct {
return size;
}
fn readInner(self: *Self, buffer: []u8) Error!usize {
fn readInner(self: *Decompressor, buffer: []u8) Error!usize {
std.debug.assert(self.state != .NewFrame);
var ring_buffer = RingBuffer{
@ -198,16 +196,12 @@ pub const Decompressor = struct {
}
};
pub fn decompressor(reader: anytype, options: DecompressorOptions) Decompressor(@TypeOf(reader)) {
return Decompressor(@TypeOf(reader)).init(reader, options);
}
fn testDecompress(data: []const u8) ![]u8 {
const window_buffer = try std.testing.allocator.alloc(u8, 1 << 23);
defer std.testing.allocator.free(window_buffer);
var in_stream = std.io.fixedBufferStream(data);
var zstd_stream = decompressor(in_stream.reader(), .{ .window_buffer = window_buffer });
var zstd_stream: Decompressor = .init(in_stream.reader(), .{ .window_buffer = window_buffer });
const result = zstd_stream.reader().readAllAlloc(std.testing.allocator, std.math.maxInt(usize));
return result;
}
@ -260,7 +254,7 @@ fn expectEqualDecodedStreaming(expected: []const u8, input: []const u8) !void {
defer std.testing.allocator.free(window_buffer);
var in_stream = std.io.fixedBufferStream(input);
var stream = decompressor(in_stream.reader(), .{ .window_buffer = window_buffer });
var stream: Decompressor = .init(in_stream.reader(), .{ .window_buffer = window_buffer });
const result = try stream.reader().readAllAlloc(std.testing.allocator, std.math.maxInt(usize));
defer std.testing.allocator.free(result);
@ -299,7 +293,7 @@ test "declared raw literals size too large" {
var fbs = std.io.fixedBufferStream(input_raw);
var window: [1024]u8 = undefined;
var stream = decompressor(fbs.reader(), .{ .window_buffer = &window });
var stream: Decompressor = .init(fbs.reader(), .{ .window_buffer = &window });
var buf: [1024]u8 = undefined;
try std.testing.expectError(error.MalformedBlock, stream.read(&buf));

View File

@ -629,5 +629,7 @@ pub fn decodeZstandardHeader(
}
test {
std.testing.refAllDecls(@This());
_ = types;
_ = block;
_ = readers;
}

View File

@ -31,11 +31,11 @@ pub const ReversedByteReader = struct {
/// FSE compressed data.
pub const ReverseBitReader = struct {
byte_reader: ReversedByteReader,
bit_reader: std.io.BitReader(.big, ReversedByteReader.Reader),
bit_reader: std.io.BitReader(.big),
pub fn init(self: *ReverseBitReader, bytes: []const u8) error{BitStreamHasNoStartBit}!void {
self.byte_reader = ReversedByteReader.init(bytes);
self.bit_reader = std.io.bitReader(.big, self.byte_reader.reader());
self.bit_reader = .init(self.byte_reader.reader());
if (bytes.len == 0) return;
var i: usize = 0;
while (i < 8 and 0 == self.readBitsNoEof(u1, 1) catch unreachable) : (i += 1) {}
@ -59,24 +59,4 @@ pub const ReverseBitReader = struct {
}
};
pub fn BitReader(comptime Reader: type) type {
return struct {
underlying: std.io.BitReader(.little, Reader),
pub fn readBitsNoEof(self: *@This(), comptime U: type, num_bits: u16) !U {
return self.underlying.readBitsNoEof(U, num_bits);
}
pub fn readBits(self: *@This(), comptime U: type, num_bits: u16, out_bits: *u16) !U {
return self.underlying.readBits(U, num_bits, out_bits);
}
pub fn alignToByte(self: *@This()) void {
self.underlying.alignToByte();
}
};
}
pub fn bitReader(reader: anytype) BitReader(@TypeOf(reader)) {
return .{ .underlying = std.io.bitReader(.little, reader) };
}
pub const BitReader = std.io.BitReader(.little);

View File

@ -733,26 +733,28 @@ pub fn writeStackTrace(
writer: *std.io.BufferedWriter,
debug_info: *SelfInfo,
tty_config: io.tty.Config,
) !void {
) !usize {
if (builtin.strip_debug_info) return error.MissingDebugInfo;
var frame_index: usize = 0;
var frames_left: usize = @min(stack_trace.index, stack_trace.instruction_addresses.len);
var n: usize = 0;
while (frames_left != 0) : ({
frames_left -= 1;
frame_index = (frame_index + 1) % stack_trace.instruction_addresses.len;
}) {
const return_address = stack_trace.instruction_addresses[frame_index];
try printSourceAtAddress(debug_info, writer, return_address - 1, tty_config);
n += try printSourceAtAddress(debug_info, writer, return_address - 1, tty_config);
}
if (stack_trace.index > stack_trace.instruction_addresses.len) {
const dropped_frames = stack_trace.index - stack_trace.instruction_addresses.len;
tty_config.setColor(writer, .bold) catch {};
try writer.print("({d} additional stack frames skipped...)\n", .{dropped_frames});
tty_config.setColor(writer, .reset) catch {};
n += tty_config.setColor(writer, .bold) catch {};
n += try writer.printCount("({d} additional stack frames skipped...)\n", .{dropped_frames});
n += tty_config.setColor(writer, .reset) catch {};
}
return n;
}
pub const UnwindError = if (have_ucontext)
@ -1100,7 +1102,12 @@ fn printUnwindError(debug_info: *SelfInfo, writer: *std.io.BufferedWriter, addre
try tty_config.setColor(writer, .reset);
}
pub fn printSourceAtAddress(debug_info: *SelfInfo, writer: *std.io.BufferedWriter, address: usize, tty_config: io.tty.Config) !void {
pub fn printSourceAtAddress(
debug_info: *SelfInfo,
writer: *std.io.BufferedWriter,
address: usize,
tty_config: io.tty.Config,
) !void {
const module = debug_info.getModuleForAddress(address) catch |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => return printUnknownSource(debug_info, writer, address, tty_config),
else => return err,

View File

@ -436,7 +436,7 @@ pub fn DebugAllocator(comptime config: Config) type {
const stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc);
const page_addr = @intFromPtr(bucket) & ~(page_size - 1);
const addr = page_addr + slot_index * size_class;
log.err("memory address 0x{x} leaked: {}", .{ addr, stack_trace });
log.err("memory address 0x{x} leaked: {f}", .{ addr, stack_trace });
leaks = true;
}
}
@ -463,7 +463,7 @@ pub fn DebugAllocator(comptime config: Config) type {
while (it.next()) |large_alloc| {
if (config.retain_metadata and large_alloc.freed) continue;
const stack_trace = large_alloc.getStackTrace(.alloc);
log.err("memory address 0x{x} leaked: {}", .{
log.err("memory address 0x{x} leaked: {f}", .{
@intFromPtr(large_alloc.bytes.ptr), stack_trace,
});
leaks = true;
@ -522,7 +522,7 @@ pub fn DebugAllocator(comptime config: Config) type {
.index = 0,
};
std.debug.captureStackTrace(ret_addr, &second_free_stack_trace);
log.err("Double free detected. Allocation: {} First free: {} Second free: {}", .{
log.err("Double free detected. Allocation: {f} First free: {f} Second free: {f}", .{
alloc_stack_trace, free_stack_trace, second_free_stack_trace,
});
}
@ -568,7 +568,7 @@ pub fn DebugAllocator(comptime config: Config) type {
.index = 0,
};
std.debug.captureStackTrace(ret_addr, &free_stack_trace);
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
entry.value_ptr.bytes.len,
old_mem.len,
entry.value_ptr.getStackTrace(.alloc),
@ -678,7 +678,7 @@ pub fn DebugAllocator(comptime config: Config) type {
.index = 0,
};
std.debug.captureStackTrace(ret_addr, &free_stack_trace);
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
entry.value_ptr.bytes.len,
old_mem.len,
entry.value_ptr.getStackTrace(.alloc),
@ -907,7 +907,7 @@ pub fn DebugAllocator(comptime config: Config) type {
};
std.debug.captureStackTrace(return_address, &free_stack_trace);
if (old_memory.len != requested_size) {
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
requested_size,
old_memory.len,
bucketStackTrace(bucket, slot_count, slot_index, .alloc),
@ -915,7 +915,7 @@ pub fn DebugAllocator(comptime config: Config) type {
});
}
if (alignment != slot_alignment) {
log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {} Free: {}", .{
log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {f} Free: {f}", .{
slot_alignment.toByteUnits(),
alignment.toByteUnits(),
bucketStackTrace(bucket, slot_count, slot_index, .alloc),
@ -1006,7 +1006,7 @@ pub fn DebugAllocator(comptime config: Config) type {
};
std.debug.captureStackTrace(return_address, &free_stack_trace);
if (memory.len != requested_size) {
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
requested_size,
memory.len,
bucketStackTrace(bucket, slot_count, slot_index, .alloc),
@ -1014,7 +1014,7 @@ pub fn DebugAllocator(comptime config: Config) type {
});
}
if (alignment != slot_alignment) {
log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {} Free: {}", .{
log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {f} Free: {f}", .{
slot_alignment.toByteUnits(),
alignment.toByteUnits(),
bucketStackTrace(bucket, slot_count, slot_index, .alloc),

View File

@ -130,13 +130,9 @@ pub const Request = struct {
write_error: anyerror,
pub const Compression = union(enum) {
pub const DeflateDecompressor = std.compress.zlib.Decompressor(std.io.AnyReader);
pub const GzipDecompressor = std.compress.gzip.Decompressor(std.io.AnyReader);
pub const ZstdDecompressor = std.compress.zstd.Decompressor(std.io.AnyReader);
deflate: DeflateDecompressor,
gzip: GzipDecompressor,
zstd: ZstdDecompressor,
deflate: std.compress.zlib.Decompressor,
gzip: std.compress.gzip.Decompressor,
zstd: std.compress.zstd.Decompressor,
none: void,
};

View File

@ -9,7 +9,7 @@ const native_endian = builtin.cpu.arch.endian();
key: []const u8,
request: *std.http.Server.Request,
recv_fifo: std.fifo.LinearFifo(u8, .Slice),
reader: std.io.AnyReader,
reader: *std.io.BufferedReader,
response: std.http.Server.Response,
/// Number of bytes that have been peeked but not discarded yet.
outstanding_len: usize,

View File

@ -30,8 +30,7 @@ pub const limitedReader = @import("io/limited_reader.zig").limitedReader;
pub const MultiWriter = @import("io/multi_writer.zig").MultiWriter;
pub const multiWriter = @import("io/multi_writer.zig").multiWriter;
pub const BitReader = @import("io/bit_reader.zig").BitReader;
pub const bitReader = @import("io/bit_reader.zig").bitReader;
pub const BitReader = @import("io/bit_reader.zig").Type;
pub const BitWriter = @import("io/bit_writer.zig").BitWriter;
pub const bitWriter = @import("io/bit_writer.zig").bitWriter;

View File

@ -614,23 +614,23 @@ pub fn alignBufferOptions(bw: *BufferedWriter, buffer: []const u8, options: std.
return alignBuffer(bw, buffer, options.width orelse buffer.len, options.alignment, options.fill);
}
pub fn printAddress(bw: *BufferedWriter, value: anytype) anyerror!void {
pub fn printAddress(bw: *BufferedWriter, value: anytype) anyerror!usize {
const T = @TypeOf(value);
var n: usize = 0;
switch (@typeInfo(T)) {
.pointer => |info| {
try bw.writeAll(@typeName(info.child) ++ "@");
n += try bw.writeAllCount(@typeName(info.child) ++ "@");
if (info.size == .slice)
try printIntOptions(bw, @intFromPtr(value.ptr), 16, .lower, .{})
n += try printIntOptions(bw, @intFromPtr(value.ptr), 16, .lower, .{})
else
try printIntOptions(bw, @intFromPtr(value), 16, .lower, .{});
return;
n += try printIntOptions(bw, @intFromPtr(value), 16, .lower, .{});
return n;
},
.optional => |info| {
if (@typeInfo(info.child) == .pointer) {
try bw.writeAll(@typeName(info.child) ++ "@");
try printIntOptions(bw, @intFromPtr(value), 16, .lower, .{});
return;
n += try bw.writeAll(@typeName(info.child) ++ "@");
n += try printIntOptions(bw, @intFromPtr(value), 16, .lower, .{});
return n;
}
},
else => {},

View File

@ -1,4 +1,5 @@
const std = @import("../std.zig");
const bit_reader = @This();
//General note on endianess:
//Big endian is packed starting in the most significant part of the byte and subsequent
@ -13,11 +14,11 @@ const std = @import("../std.zig");
// of the byte.
/// Creates a bit reader which allows for reading bits from an underlying standard reader
pub fn BitReader(comptime endian: std.builtin.Endian) type {
pub fn Type(comptime endian: std.builtin.Endian) type {
return struct {
reader: *std.io.BufferedReader,
bits: u8 = 0,
count: u4 = 0,
bits: u8,
count: u4,
const low_bit_mask = [9]u8{
0b00000000,
@ -31,11 +32,12 @@ pub fn BitReader(comptime endian: std.builtin.Endian) type {
0b11111111,
};
pub fn init(reader: *std.io.BufferedReader) @This() {
return .{ .reader = reader, .bits = 0, .count = 0 };
}
fn Bits(comptime T: type) type {
return struct {
T,
u16,
};
return struct { T, u16 };
}
fn initBits(comptime T: type, out: anytype, num: u16) Bits(T) {
@ -82,7 +84,7 @@ pub fn BitReader(comptime endian: std.builtin.Endian) type {
const full_bytes_left = (num - out_count) / 8;
for (0..full_bytes_left) |_| {
const byte = self.reader.readByte() catch |err| switch (err) {
const byte = self.reader.takeByte() catch |err| switch (err) {
error.EndOfStream => return initBits(T, out, out_count),
else => |e| return e,
};
@ -105,7 +107,7 @@ pub fn BitReader(comptime endian: std.builtin.Endian) type {
if (bits_left == 0) return initBits(T, out, out_count);
const final_byte = self.reader.readByte() catch |err| switch (err) {
const final_byte = self.reader.takeByte() catch |err| switch (err) {
error.EndOfStream => return initBits(T, out, out_count),
else => |e| return e,
};
@ -157,10 +159,6 @@ pub fn BitReader(comptime endian: std.builtin.Endian) type {
};
}
pub fn bitReader(comptime endian: std.builtin.Endian, reader: *std.io.BufferedReader) BitReader(endian) {
return .{ .reader = reader };
}
///////////////////////////////
test "api coverage" {
@ -168,7 +166,7 @@ test "api coverage" {
const mem_le = [_]u8{ 0b00011101, 0b10010101 };
var mem_in_be = std.io.fixedBufferStream(&mem_be);
var bit_stream_be = bitReader(.big, mem_in_be.reader());
var bit_stream_be: bit_reader.Type(.big) = .init(mem_in_be.reader());
var out_bits: u16 = undefined;
@ -205,7 +203,7 @@ test "api coverage" {
try expectError(error.EndOfStream, bit_stream_be.readBitsNoEof(u1, 1));
var mem_in_le = std.io.fixedBufferStream(&mem_le);
var bit_stream_le = bitReader(.little, mem_in_le.reader());
var bit_stream_le: bit_reader.Type(.little) = .init(mem_in_le.reader());
try expect(1 == try bit_stream_le.readBits(u2, 1, &out_bits));
try expect(out_bits == 1);