mirror of
https://github.com/ziglang/zig.git
synced 2026-02-21 08:45:52 +00:00
std: combine BufferedReader into Reader
This commit is contained in:
parent
2ed47f1ed8
commit
3c98e2c826
@ -1082,11 +1082,9 @@ const CorrespondingLines = struct {
|
||||
at_eof: bool = false,
|
||||
span: SourceMappings.CorrespondingSpan,
|
||||
file: std.fs.File,
|
||||
buffered_reader: BufferedReaderType,
|
||||
buffered_reader: *std.io.Reader,
|
||||
code_page: SupportedCodePage,
|
||||
|
||||
const BufferedReaderType = std.io.BufferedReader(512, std.fs.File.Reader);
|
||||
|
||||
pub fn init(cwd: std.fs.Dir, err_details: ErrorDetails, line_for_comparison: []const u8, corresponding_span: SourceMappings.CorrespondingSpan, corresponding_file: []const u8) !CorrespondingLines {
|
||||
// We don't do line comparison for this error, so don't print the note if the line
|
||||
// number is different
|
||||
@ -1105,7 +1103,7 @@ const CorrespondingLines = struct {
|
||||
.buffered_reader = undefined,
|
||||
.code_page = err_details.code_page,
|
||||
};
|
||||
corresponding_lines.buffered_reader = BufferedReaderType{
|
||||
corresponding_lines.buffered_reader = .{
|
||||
.unbuffered_reader = corresponding_lines.file.reader(),
|
||||
};
|
||||
errdefer corresponding_lines.deinit();
|
||||
|
||||
@ -390,7 +390,7 @@ fn receiveWasmMessage(
|
||||
gpa: Allocator,
|
||||
arena: Allocator,
|
||||
context: *Context,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
result: *?Cache.Path,
|
||||
result_error_bundle: *std.zig.ErrorBundle,
|
||||
) !void {
|
||||
|
||||
@ -778,8 +778,7 @@ export fn decl_type_html(decl_index: Decl.Index) String {
|
||||
const Oom = error{OutOfMemory};
|
||||
|
||||
fn unpackInner(tar_bytes: []u8) !void {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(tar_bytes);
|
||||
var br: std.io.Reader = .fixed(tar_bytes);
|
||||
var file_name_buffer: [1024]u8 = undefined;
|
||||
var link_name_buffer: [1024]u8 = undefined;
|
||||
var it = std.tar.Iterator.init(&br, .{
|
||||
|
||||
@ -1238,8 +1238,7 @@ const MachODumper = struct {
|
||||
}
|
||||
|
||||
fn parseRebaseInfo(ctx: ObjectContext, data: []const u8, rebases: *std.ArrayList(u64)) !void {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(@constCast(data));
|
||||
var br: std.io.Reader = .fixed(data);
|
||||
|
||||
var seg_id: ?u8 = null;
|
||||
var offset: u64 = 0;
|
||||
@ -1349,7 +1348,7 @@ const MachODumper = struct {
|
||||
}
|
||||
|
||||
fn parseBindInfo(ctx: ObjectContext, data: []const u8, bindings: *std.ArrayList(Binding)) !void {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(@constCast(data));
|
||||
|
||||
var seg_id: ?u8 = null;
|
||||
@ -1447,7 +1446,7 @@ const MachODumper = struct {
|
||||
defer arena.deinit();
|
||||
|
||||
var exports: std.ArrayList(Export) = .init(arena.allocator());
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(@constCast(data));
|
||||
try parseTrieNode(arena.allocator(), &br, "", &exports);
|
||||
|
||||
@ -1517,7 +1516,7 @@ const MachODumper = struct {
|
||||
|
||||
fn parseTrieNode(
|
||||
arena: Allocator,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
prefix: []const u8,
|
||||
exports: *std.ArrayList(Export),
|
||||
) !void {
|
||||
@ -1705,7 +1704,7 @@ const ElfDumper = struct {
|
||||
|
||||
fn parseAndDumpArchive(step: *Step, check: Check, bytes: []const u8) ![]const u8 {
|
||||
const gpa = step.owner.allocator;
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(@constCast(bytes));
|
||||
|
||||
if (!mem.eql(u8, try br.takeArray(elf.ARMAG.len), elf.ARMAG)) return error.InvalidArchiveMagicNumber;
|
||||
@ -1780,7 +1779,7 @@ const ElfDumper = struct {
|
||||
}
|
||||
|
||||
fn parseSymtab(ctx: *ArchiveContext, data: []const u8, ptr_width: enum { p32, p64 }) !void {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(@constCast(data));
|
||||
const num = switch (ptr_width) {
|
||||
.p32 => try br.takeInt(u32, .big),
|
||||
@ -1851,7 +1850,7 @@ const ElfDumper = struct {
|
||||
|
||||
fn parseAndDumpObject(step: *Step, check: Check, bytes: []const u8) ![]const u8 {
|
||||
const gpa = step.owner.allocator;
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(@constCast(bytes));
|
||||
|
||||
const hdr = try br.takeStruct(elf.Elf64_Ehdr);
|
||||
@ -2354,7 +2353,7 @@ const WasmDumper = struct {
|
||||
|
||||
fn parseAndDump(step: *Step, check: Check, bytes: []const u8) ![]const u8 {
|
||||
const gpa = step.owner.allocator;
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(@constCast(bytes));
|
||||
|
||||
const buf = try br.takeArray(8);
|
||||
@ -2376,10 +2375,10 @@ const WasmDumper = struct {
|
||||
fn parseAndDumpInner(
|
||||
step: *Step,
|
||||
check: Check,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
bw: *std.io.BufferedWriter,
|
||||
) !void {
|
||||
var section_br: std.io.BufferedReader = undefined;
|
||||
var section_br: std.io.Reader = undefined;
|
||||
switch (check.kind) {
|
||||
.headers => while (br.takeEnum(std.wasm.Section, .little)) |section| {
|
||||
section_br.initFixed(try br.take(try br.takeLeb128(u32)));
|
||||
@ -2396,7 +2395,7 @@ const WasmDumper = struct {
|
||||
fn parseAndDumpSection(
|
||||
step: *Step,
|
||||
section: std.wasm.Section,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
bw: *std.io.BufferedWriter,
|
||||
) !void {
|
||||
try bw.print(
|
||||
@ -2445,7 +2444,7 @@ const WasmDumper = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn parseSection(step: *Step, section: std.wasm.Section, br: *std.io.BufferedReader, entries: u32, bw: *std.io.BufferedWriter) !void {
|
||||
fn parseSection(step: *Step, section: std.wasm.Section, br: *std.io.Reader, entries: u32, bw: *std.io.BufferedWriter) !void {
|
||||
switch (section) {
|
||||
.type => {
|
||||
var i: u32 = 0;
|
||||
@ -2576,7 +2575,7 @@ const WasmDumper = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn parseDumpType(step: *Step, comptime E: type, br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !E {
|
||||
fn parseDumpType(step: *Step, comptime E: type, br: *std.io.Reader, bw: *std.io.BufferedWriter) !E {
|
||||
const tag = br.takeEnum(E, .little) catch |err| switch (err) {
|
||||
error.InvalidEnumTag => return step.fail("invalid wasm type value", .{}),
|
||||
else => |e| return e,
|
||||
@ -2585,7 +2584,7 @@ const WasmDumper = struct {
|
||||
return tag;
|
||||
}
|
||||
|
||||
fn parseDumpLimits(br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !void {
|
||||
fn parseDumpLimits(br: *std.io.Reader, bw: *std.io.BufferedWriter) !void {
|
||||
const flags = try br.takeLeb128(u8);
|
||||
const min = try br.takeLeb128(u32);
|
||||
|
||||
@ -2593,7 +2592,7 @@ const WasmDumper = struct {
|
||||
if (flags != 0) try bw.print("max {x}\n", .{try br.takeLeb128(u32)});
|
||||
}
|
||||
|
||||
fn parseDumpInit(step: *Step, br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !void {
|
||||
fn parseDumpInit(step: *Step, br: *std.io.Reader, bw: *std.io.BufferedWriter) !void {
|
||||
const opcode = br.takeEnum(std.wasm.Opcode, .little) catch |err| switch (err) {
|
||||
error.InvalidEnumTag => return step.fail("invalid wasm opcode", .{}),
|
||||
else => |e| return e,
|
||||
@ -2613,8 +2612,8 @@ const WasmDumper = struct {
|
||||
}
|
||||
|
||||
/// https://webassembly.github.io/spec/core/appendix/custom.html
|
||||
fn parseDumpNames(step: *Step, br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !void {
|
||||
var subsection_br: std.io.BufferedReader = undefined;
|
||||
fn parseDumpNames(step: *Step, br: *std.io.Reader, bw: *std.io.BufferedWriter) !void {
|
||||
var subsection_br: std.io.Reader = undefined;
|
||||
while (br.seek < br.buffer.len) {
|
||||
switch (try parseDumpType(step, std.wasm.NameSubsection, br, bw)) {
|
||||
// The module name subsection ... consists of a single name
|
||||
@ -2662,7 +2661,7 @@ const WasmDumper = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn parseDumpProducers(br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !void {
|
||||
fn parseDumpProducers(br: *std.io.Reader, bw: *std.io.BufferedWriter) !void {
|
||||
const field_count = try br.takeLeb128(u32);
|
||||
try bw.print(
|
||||
\\fields {d}
|
||||
@ -2690,7 +2689,7 @@ const WasmDumper = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn parseDumpFeatures(br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !void {
|
||||
fn parseDumpFeatures(br: *std.io.Reader, bw: *std.io.BufferedWriter) !void {
|
||||
const feature_count = try br.takeLeb128(u32);
|
||||
try bw.print(
|
||||
\\features {d}
|
||||
|
||||
@ -1087,10 +1087,9 @@ pub const Coff = struct {
|
||||
const pe_pointer_offset = 0x3C;
|
||||
const pe_magic = "PE\x00\x00";
|
||||
|
||||
var reader: std.io.BufferedReader = undefined;
|
||||
reader.initFixed(data[pe_pointer_offset..]);
|
||||
var reader: std.io.Reader = .fixed(data[pe_pointer_offset..]);
|
||||
const coff_header_offset = try reader.readInt(u32, .little);
|
||||
reader.initFixed(data[coff_header_offset..]);
|
||||
reader = .fixed(data[coff_header_offset..]);
|
||||
const magic = try reader.peek(4);
|
||||
const is_image = mem.eql(u8, pe_magic, magic);
|
||||
|
||||
@ -1121,16 +1120,15 @@ pub const Coff = struct {
|
||||
if (@intFromEnum(DirectoryEntry.DEBUG) >= data_dirs.len) return null;
|
||||
|
||||
const debug_dir = data_dirs[@intFromEnum(DirectoryEntry.DEBUG)];
|
||||
var reader: std.io.BufferedReader = undefined;
|
||||
reader.initFixed(self.data);
|
||||
var reader: std.io.Reader = .fixed(self.data);
|
||||
|
||||
if (self.is_loaded) {
|
||||
reader.initFixed(self.data[debug_dir.virtual_address..]);
|
||||
reader = .fixed(self.data[debug_dir.virtual_address..]);
|
||||
} else {
|
||||
// Find what section the debug_dir is in, in order to convert the RVA to a file offset
|
||||
for (self.getSectionHeaders()) |*sect| {
|
||||
if (debug_dir.virtual_address >= sect.virtual_address and debug_dir.virtual_address < sect.virtual_address + sect.virtual_size) {
|
||||
reader.initFixed(self.data[sect.pointer_to_raw_data + (debug_dir.virtual_address - sect.virtual_address) ..]);
|
||||
reader = .fixed(self.data[sect.pointer_to_raw_data + (debug_dir.virtual_address - sect.virtual_address) ..]);
|
||||
break;
|
||||
}
|
||||
} else return error.InvalidDebugDirectory;
|
||||
@ -1144,7 +1142,7 @@ pub const Coff = struct {
|
||||
const debug_dir_entry = try reader.takeStruct(DebugDirectoryEntry);
|
||||
if (debug_dir_entry.type == .CODEVIEW) {
|
||||
const dir_offset = if (self.is_loaded) debug_dir_entry.address_of_raw_data else debug_dir_entry.pointer_to_raw_data;
|
||||
reader.initFixed(self.data[dir_offset..]);
|
||||
reader = .fixed(self.data[dir_offset..]);
|
||||
break;
|
||||
}
|
||||
} else return null;
|
||||
|
||||
@ -229,8 +229,7 @@ test "compress/decompress" {
|
||||
|
||||
// compress original stream to compressed stream
|
||||
{
|
||||
var original: std.io.BufferedReader = undefined;
|
||||
original.initFixed(@constCast(data));
|
||||
var original: std.io.Reader = .fixed(data);
|
||||
var compressed: std.io.BufferedWriter = undefined;
|
||||
compressed.initFixed(&cmp_buf);
|
||||
var compress: Compress = .init(&original, .raw);
|
||||
@ -246,8 +245,7 @@ test "compress/decompress" {
|
||||
}
|
||||
// decompress compressed stream to decompressed stream
|
||||
{
|
||||
var compressed: std.io.BufferedReader = undefined;
|
||||
compressed.initFixed(cmp_buf[0..compressed_size]);
|
||||
var compressed: std.io.Reader = .fixed(cmp_buf[0..compressed_size]);
|
||||
var decompressed: std.io.BufferedWriter = undefined;
|
||||
decompressed.initFixed(&dcm_buf);
|
||||
try Decompress.pump(container, &compressed, &decompressed);
|
||||
@ -267,8 +265,7 @@ test "compress/decompress" {
|
||||
}
|
||||
// decompressor reader interface
|
||||
{
|
||||
var compressed: std.io.BufferedReader = undefined;
|
||||
compressed.initFixed(cmp_buf[0..compressed_size]);
|
||||
var compressed: std.io.Reader = .fixed(cmp_buf[0..compressed_size]);
|
||||
var dcm = Decompress.pump(container, &compressed);
|
||||
var dcm_rdr = dcm.reader();
|
||||
const n = try dcm_rdr.readAll(&dcm_buf);
|
||||
@ -287,8 +284,7 @@ test "compress/decompress" {
|
||||
|
||||
// compress original stream to compressed stream
|
||||
{
|
||||
var original: std.io.BufferedReader = undefined;
|
||||
original.initFixed(data);
|
||||
var original: std.io.Reader = .fixed(data);
|
||||
var compressed: std.io.BufferedWriter = undefined;
|
||||
compressed.initFixed(&cmp_buf);
|
||||
var cmp = try Compress.Huffman.init(container, &compressed);
|
||||
@ -303,8 +299,7 @@ test "compress/decompress" {
|
||||
}
|
||||
// decompress compressed stream to decompressed stream
|
||||
{
|
||||
var compressed: std.io.BufferedReader = undefined;
|
||||
compressed.initFixed(cmp_buf[0..compressed_size]);
|
||||
var compressed: std.io.Reader = .fixed(cmp_buf[0..compressed_size]);
|
||||
var decompressed: std.io.BufferedWriter = undefined;
|
||||
decompressed.initFixed(&dcm_buf);
|
||||
try Decompress.pump(container, &compressed, &decompressed);
|
||||
@ -323,8 +318,7 @@ test "compress/decompress" {
|
||||
|
||||
// compress original stream to compressed stream
|
||||
{
|
||||
var original: std.io.BufferedReader = undefined;
|
||||
original.initFixed(data);
|
||||
var original: std.io.Reader = .fixed(data);
|
||||
var compressed: std.io.BufferedWriter = undefined;
|
||||
compressed.initFixed(&cmp_buf);
|
||||
var cmp = try Compress.SimpleCompressor(.store, container).init(&compressed);
|
||||
@ -340,8 +334,7 @@ test "compress/decompress" {
|
||||
}
|
||||
// decompress compressed stream to decompressed stream
|
||||
{
|
||||
var compressed: std.io.BufferedReader = undefined;
|
||||
compressed.initFixed(cmp_buf[0..compressed_size]);
|
||||
var compressed: std.io.Reader = .fixed(cmp_buf[0..compressed_size]);
|
||||
var decompressed: std.io.BufferedWriter = undefined;
|
||||
decompressed.initFixed(&dcm_buf);
|
||||
try Decompress.pump(container, &compressed, &decompressed);
|
||||
@ -353,8 +346,7 @@ test "compress/decompress" {
|
||||
}
|
||||
|
||||
fn testDecompress(comptime container: Container, compressed: []const u8, expected_plain: []const u8) !void {
|
||||
var in: std.io.BufferedReader = undefined;
|
||||
in.initFixed(compressed);
|
||||
var in: std.io.Reader = .fixed(compressed);
|
||||
var out: std.io.AllocatingWriter = undefined;
|
||||
out.init(testing.allocator);
|
||||
defer out.deinit();
|
||||
@ -502,8 +494,7 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const
|
||||
var plain: std.io.BufferedWriter = undefined;
|
||||
plain.initFixed(&buffer2);
|
||||
|
||||
var in: std.io.BufferedReader = undefined;
|
||||
in.initFixed(gzip_data);
|
||||
var in: std.io.Reader = .fixed(gzip_data);
|
||||
try pkg.decompress(&in, &plain);
|
||||
try testing.expectEqualSlices(u8, plain_data, plain.getWritten());
|
||||
}
|
||||
@ -515,12 +506,10 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const
|
||||
var compressed: std.io.BufferedWriter = undefined;
|
||||
compressed.initFixed(&buffer1);
|
||||
|
||||
var in: std.io.BufferedReader = undefined;
|
||||
in.initFixed(plain_data);
|
||||
var in: std.io.Reader = .fixed(plain_data);
|
||||
try pkg.compress(&in, &compressed, .{});
|
||||
|
||||
var compressed_br: std.io.BufferedReader = undefined;
|
||||
compressed_br.initFixed(&buffer1);
|
||||
var compressed_br: std.io.Reader = .fixed(&buffer1);
|
||||
try pkg.decompress(&compressed_br, &plain);
|
||||
try testing.expectEqualSlices(u8, plain_data, plain.getWritten());
|
||||
}
|
||||
@ -532,14 +521,12 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const
|
||||
var compressed: std.io.BufferedWriter = undefined;
|
||||
compressed.initFixed(&buffer1);
|
||||
|
||||
var in: std.io.BufferedReader = undefined;
|
||||
in.initFixed(plain_data);
|
||||
var in: std.io.Reader = .fixed(plain_data);
|
||||
var cmp = try pkg.compressor(&compressed, .{});
|
||||
try cmp.compress(&in);
|
||||
try cmp.finish();
|
||||
|
||||
var compressed_br: std.io.BufferedReader = undefined;
|
||||
compressed_br.initFixed(&buffer1);
|
||||
var compressed_br: std.io.Reader = .fixed(&buffer1);
|
||||
var dcp = pkg.decompressor(&compressed_br);
|
||||
try dcp.decompress(&plain);
|
||||
try testing.expectEqualSlices(u8, plain_data, plain.getWritten());
|
||||
@ -554,12 +541,10 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const
|
||||
var compressed: std.io.BufferedWriter = undefined;
|
||||
compressed.initFixed(&buffer1);
|
||||
|
||||
var in: std.io.BufferedReader = undefined;
|
||||
in.initFixed(plain_data);
|
||||
var in: std.io.Reader = .fixed(plain_data);
|
||||
try pkg.huffman.compress(&in, &compressed);
|
||||
|
||||
var compressed_br: std.io.BufferedReader = undefined;
|
||||
compressed_br.initFixed(&buffer1);
|
||||
var compressed_br: std.io.Reader = .fixed(&buffer1);
|
||||
try pkg.decompress(&compressed_br, &plain);
|
||||
try testing.expectEqualSlices(u8, plain_data, plain.getWritten());
|
||||
}
|
||||
@ -571,14 +556,12 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const
|
||||
var compressed: std.io.BufferedWriter = undefined;
|
||||
compressed.initFixed(&buffer1);
|
||||
|
||||
var in: std.io.BufferedReader = undefined;
|
||||
in.initFixed(plain_data);
|
||||
var in: std.io.Reader = .fixed(plain_data);
|
||||
var cmp = try pkg.huffman.compressor(&compressed);
|
||||
try cmp.compress(&in);
|
||||
try cmp.finish();
|
||||
|
||||
var compressed_br: std.io.BufferedReader = undefined;
|
||||
compressed_br.initFixed(&buffer1);
|
||||
var compressed_br: std.io.Reader = .fixed(&buffer1);
|
||||
try pkg.decompress(&compressed_br, &plain);
|
||||
try testing.expectEqualSlices(u8, plain_data, plain.getWritten());
|
||||
}
|
||||
@ -593,12 +576,10 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const
|
||||
var compressed: std.io.BufferedWriter = undefined;
|
||||
compressed.initFixed(&buffer1);
|
||||
|
||||
var in: std.io.BufferedReader = undefined;
|
||||
in.initFixed(plain_data);
|
||||
var in: std.io.Reader = .fixed(plain_data);
|
||||
try pkg.store.compress(&in, &compressed);
|
||||
|
||||
var compressed_br: std.io.BufferedReader = undefined;
|
||||
compressed_br.initFixed(&buffer1);
|
||||
var compressed_br: std.io.Reader = .fixed(&buffer1);
|
||||
try pkg.decompress(&compressed_br, &plain);
|
||||
try testing.expectEqualSlices(u8, plain_data, plain.getWritten());
|
||||
}
|
||||
@ -610,14 +591,12 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const
|
||||
var compressed: std.io.BufferedWriter = undefined;
|
||||
compressed.initFixed(&buffer1);
|
||||
|
||||
var in: std.io.BufferedReader = undefined;
|
||||
in.initFixed(plain_data);
|
||||
var in: std.io.Reader = .fixed(plain_data);
|
||||
var cmp = try pkg.store.compressor(&compressed);
|
||||
try cmp.compress(&in);
|
||||
try cmp.finish();
|
||||
|
||||
var compressed_br: std.io.BufferedReader = undefined;
|
||||
compressed_br.initFixed(&buffer1);
|
||||
var compressed_br: std.io.Reader = .fixed(&buffer1);
|
||||
try pkg.decompress(&compressed_br, &plain);
|
||||
try testing.expectEqualSlices(u8, plain_data, plain.getWritten());
|
||||
}
|
||||
@ -650,8 +629,7 @@ test "zlib should not overshoot" {
|
||||
0x03, 0x00, 0x8b, 0x61, 0x0f, 0xa4, 0x52, 0x5a, 0x94, 0x12,
|
||||
};
|
||||
|
||||
var stream: std.io.BufferedReader = undefined;
|
||||
stream.initFixed(&data);
|
||||
var stream: std.io.Reader = .fixed(&data);
|
||||
const reader = stream.reader();
|
||||
|
||||
var dcp = Decompress.init(reader);
|
||||
|
||||
@ -59,7 +59,7 @@ const huffman = flate.huffman;
|
||||
lookup: Lookup = .{},
|
||||
tokens: Tokens = .{},
|
||||
/// Asserted to have a buffer capacity of at least `flate.max_window_len`.
|
||||
input: *std.io.BufferedReader,
|
||||
input: *std.io.Reader,
|
||||
block_writer: BlockWriter,
|
||||
level: LevelArgs,
|
||||
hasher: Container.Hasher,
|
||||
@ -69,7 +69,7 @@ hasher: Container.Hasher,
|
||||
prev_match: ?Token = null,
|
||||
prev_literal: ?u8 = null,
|
||||
|
||||
pub fn readable(c: *Compress, buffer: []u8) std.io.BufferedReader {
|
||||
pub fn readable(c: *Compress, buffer: []u8) std.io.Reader {
|
||||
return .{
|
||||
.unbuffered_reader = .{
|
||||
.context = c,
|
||||
@ -126,7 +126,7 @@ const LevelArgs = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn init(input: *std.io.BufferedReader, options: Options) Compress {
|
||||
pub fn init(input: *std.io.Reader, options: Options) Compress {
|
||||
return .{
|
||||
.input = input,
|
||||
.block_writer = undefined,
|
||||
@ -1147,7 +1147,7 @@ test "file tokenization" {
|
||||
const data = case.data;
|
||||
|
||||
for (levels, 0..) |level, i| { // for each compression level
|
||||
var original: std.io.BufferedReader = undefined;
|
||||
var original: std.io.Reader = undefined;
|
||||
original.initFixed(data);
|
||||
|
||||
// buffer for decompressed data
|
||||
@ -1222,7 +1222,7 @@ test "store simple compressor" {
|
||||
//0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x21,
|
||||
};
|
||||
|
||||
var fbs: std.io.BufferedReader = undefined;
|
||||
var fbs: std.io.Reader = undefined;
|
||||
fbs.initFixed(data);
|
||||
var al = std.ArrayList(u8).init(testing.allocator);
|
||||
defer al.deinit();
|
||||
|
||||
@ -24,7 +24,7 @@ const Token = @import("Token.zig");
|
||||
const testing = std.testing;
|
||||
const Decompress = @This();
|
||||
|
||||
input: *std.io.BufferedReader,
|
||||
input: *std.io.Reader,
|
||||
// Hashes, produces checksum, of uncompressed data for gzip/zlib footer.
|
||||
hasher: Container.Hasher,
|
||||
|
||||
@ -67,7 +67,7 @@ pub const Error = Container.Error || error{
|
||||
MissingEndOfBlockCode,
|
||||
};
|
||||
|
||||
pub fn init(input: *std.io.BufferedReader, container: Container) Decompress {
|
||||
pub fn init(input: *std.io.Reader, container: Container) Decompress {
|
||||
return .{
|
||||
.input = input,
|
||||
.hasher = .init(container),
|
||||
@ -361,7 +361,7 @@ pub fn reader(self: *Decompress) std.io.Reader {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn readable(self: *Decompress, buffer: []u8) std.io.BufferedReader {
|
||||
pub fn readable(self: *Decompress, buffer: []u8) std.io.Reader {
|
||||
return reader(self).buffered(buffer);
|
||||
}
|
||||
|
||||
@ -727,7 +727,7 @@ test "decompress" {
|
||||
},
|
||||
};
|
||||
for (cases) |c| {
|
||||
var fb: std.io.BufferedReader = undefined;
|
||||
var fb: std.io.Reader = undefined;
|
||||
fb.initFixed(@constCast(c.in));
|
||||
var aw: std.io.AllocatingWriter = undefined;
|
||||
aw.init(testing.allocator);
|
||||
@ -788,7 +788,7 @@ test "gzip decompress" {
|
||||
},
|
||||
};
|
||||
for (cases) |c| {
|
||||
var fb: std.io.BufferedReader = undefined;
|
||||
var fb: std.io.Reader = undefined;
|
||||
fb.initFixed(@constCast(c.in));
|
||||
var aw: std.io.AllocatingWriter = undefined;
|
||||
aw.init(testing.allocator);
|
||||
@ -818,7 +818,7 @@ test "zlib decompress" {
|
||||
},
|
||||
};
|
||||
for (cases) |c| {
|
||||
var fb: std.io.BufferedReader = undefined;
|
||||
var fb: std.io.Reader = undefined;
|
||||
fb.initFixed(@constCast(c.in));
|
||||
var aw: std.io.AllocatingWriter = undefined;
|
||||
aw.init(testing.allocator);
|
||||
@ -880,7 +880,7 @@ test "fuzzing tests" {
|
||||
};
|
||||
|
||||
inline for (cases, 0..) |c, case_no| {
|
||||
var in: std.io.BufferedReader = undefined;
|
||||
var in: std.io.Reader = undefined;
|
||||
in.initFixed(@constCast(@embedFile("testdata/fuzz/" ++ c.input ++ ".input")));
|
||||
var aw: std.io.AllocatingWriter = undefined;
|
||||
aw.init(testing.allocator);
|
||||
@ -903,7 +903,7 @@ test "bug 18966" {
|
||||
const input = @embedFile("testdata/fuzz/bug_18966.input");
|
||||
const expect = @embedFile("testdata/fuzz/bug_18966.expect");
|
||||
|
||||
var in: std.io.BufferedReader = undefined;
|
||||
var in: std.io.Reader = undefined;
|
||||
in.initFixed(@constCast(input));
|
||||
var aw: std.io.AllocatingWriter = undefined;
|
||||
aw.init(testing.allocator);
|
||||
@ -921,7 +921,7 @@ test "reading into empty buffer" {
|
||||
0b0000_0001, 0b0000_1100, 0x00, 0b1111_0011, 0xff, // deflate fixed buffer header len, nlen
|
||||
'H', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', 0x0a, // non compressed data
|
||||
};
|
||||
var in: std.io.BufferedReader = undefined;
|
||||
var in: std.io.Reader = undefined;
|
||||
in.initFixed(@constCast(input));
|
||||
var decomp: Decompress = .init(&in, .raw);
|
||||
var decompress_br = decomp.readable(&.{});
|
||||
|
||||
@ -11,7 +11,7 @@ pub const RangeDecoder = struct {
|
||||
range: u32,
|
||||
code: u32,
|
||||
|
||||
pub fn init(rd: *RangeDecoder, br: *std.io.BufferedReader) std.io.Reader.Error!usize {
|
||||
pub fn init(rd: *RangeDecoder, br: *std.io.Reader) std.io.Reader.Error!usize {
|
||||
const reserved = try br.takeByte();
|
||||
if (reserved != 0) return error.CorruptInput;
|
||||
rd.* = .{
|
||||
@ -25,14 +25,14 @@ pub const RangeDecoder = struct {
|
||||
return self.code == 0;
|
||||
}
|
||||
|
||||
inline fn normalize(self: *RangeDecoder, br: *std.io.BufferedReader) !void {
|
||||
inline fn normalize(self: *RangeDecoder, br: *std.io.Reader) !void {
|
||||
if (self.range < 0x0100_0000) {
|
||||
self.range <<= 8;
|
||||
self.code = (self.code << 8) ^ @as(u32, try br.takeByte());
|
||||
}
|
||||
}
|
||||
|
||||
inline fn getBit(self: *RangeDecoder, br: *std.io.BufferedReader) !bool {
|
||||
inline fn getBit(self: *RangeDecoder, br: *std.io.Reader) !bool {
|
||||
self.range >>= 1;
|
||||
|
||||
const bit = self.code >= self.range;
|
||||
@ -43,7 +43,7 @@ pub const RangeDecoder = struct {
|
||||
return bit;
|
||||
}
|
||||
|
||||
pub fn get(self: *RangeDecoder, br: *std.io.BufferedReader, count: usize) !u32 {
|
||||
pub fn get(self: *RangeDecoder, br: *std.io.Reader, count: usize) !u32 {
|
||||
var result: u32 = 0;
|
||||
var i: usize = 0;
|
||||
while (i < count) : (i += 1)
|
||||
@ -51,7 +51,7 @@ pub const RangeDecoder = struct {
|
||||
return result;
|
||||
}
|
||||
|
||||
pub inline fn decodeBit(self: *RangeDecoder, br: *std.io.BufferedReader, prob: *u16, update: bool) !bool {
|
||||
pub inline fn decodeBit(self: *RangeDecoder, br: *std.io.Reader, prob: *u16, update: bool) !bool {
|
||||
const bound = (self.range >> 11) * prob.*;
|
||||
|
||||
if (self.code < bound) {
|
||||
@ -74,7 +74,7 @@ pub const RangeDecoder = struct {
|
||||
|
||||
fn parseBitTree(
|
||||
self: *RangeDecoder,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
num_bits: u5,
|
||||
probs: []u16,
|
||||
update: bool,
|
||||
@ -90,7 +90,7 @@ pub const RangeDecoder = struct {
|
||||
|
||||
pub fn parseReverseBitTree(
|
||||
self: *RangeDecoder,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
num_bits: u5,
|
||||
probs: []u16,
|
||||
offset: usize,
|
||||
@ -117,7 +117,7 @@ pub const LenDecoder = struct {
|
||||
|
||||
pub fn decode(
|
||||
self: *LenDecoder,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
decoder: *RangeDecoder,
|
||||
pos_state: usize,
|
||||
update: bool,
|
||||
@ -148,7 +148,7 @@ pub fn BitTree(comptime num_bits: usize) type {
|
||||
|
||||
pub fn parse(
|
||||
self: *Self,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
decoder: *RangeDecoder,
|
||||
update: bool,
|
||||
) !u32 {
|
||||
@ -157,7 +157,7 @@ pub fn BitTree(comptime num_bits: usize) type {
|
||||
|
||||
pub fn parseReverse(
|
||||
self: *Self,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
decoder: *RangeDecoder,
|
||||
update: bool,
|
||||
) !u32 {
|
||||
@ -222,7 +222,7 @@ pub const Decode = struct {
|
||||
dict_size: u32,
|
||||
unpacked_size: ?u64,
|
||||
|
||||
pub fn readHeader(br: *std.io.BufferedReader, options: Options) std.io.Reader.Error!Params {
|
||||
pub fn readHeader(br: *std.io.Reader, options: Options) std.io.Reader.Error!Params {
|
||||
var props = try br.readByte();
|
||||
if (props >= 225) {
|
||||
return error.CorruptInput;
|
||||
@ -319,7 +319,7 @@ pub const Decode = struct {
|
||||
fn processNextInner(
|
||||
self: *Decode,
|
||||
allocator: Allocator,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
bw: *std.io.BufferedWriter,
|
||||
buffer: anytype,
|
||||
decoder: *RangeDecoder,
|
||||
@ -416,7 +416,7 @@ pub const Decode = struct {
|
||||
fn processNext(
|
||||
self: *Decode,
|
||||
allocator: Allocator,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
bw: *std.io.BufferedWriter,
|
||||
buffer: anytype,
|
||||
decoder: *RangeDecoder,
|
||||
@ -428,7 +428,7 @@ pub const Decode = struct {
|
||||
pub fn process(
|
||||
self: *Decode,
|
||||
allocator: Allocator,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
bw: *std.io.BufferedWriter,
|
||||
buffer: anytype,
|
||||
decoder: *RangeDecoder,
|
||||
@ -460,7 +460,7 @@ pub const Decode = struct {
|
||||
|
||||
fn decodeLiteral(
|
||||
self: *Decode,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
buffer: anytype,
|
||||
decoder: *RangeDecoder,
|
||||
update: bool,
|
||||
@ -502,7 +502,7 @@ pub const Decode = struct {
|
||||
|
||||
fn decodeDistance(
|
||||
self: *Decode,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
decoder: *RangeDecoder,
|
||||
length: usize,
|
||||
update: bool,
|
||||
@ -542,19 +542,19 @@ pub const Decompress = struct {
|
||||
error{ CorruptInput, EndOfStream, Overflow };
|
||||
|
||||
allocator: Allocator,
|
||||
in_reader: *std.io.BufferedReader,
|
||||
in_reader: *std.io.Reader,
|
||||
to_read: std.ArrayListUnmanaged(u8),
|
||||
|
||||
buffer: LzCircularBuffer,
|
||||
decoder: RangeDecoder,
|
||||
state: Decode,
|
||||
|
||||
pub fn initOptions(allocator: Allocator, br: *std.io.BufferedReader, options: Decode.Options) !Decompress {
|
||||
pub fn initOptions(allocator: Allocator, br: *std.io.Reader, options: Decode.Options) !Decompress {
|
||||
const params = try Decode.Params.readHeader(br, options);
|
||||
return init(allocator, br, params, options.memlimit);
|
||||
}
|
||||
|
||||
pub fn init(allocator: Allocator, source: *std.io.BufferedReader, params: Decode.Params, memlimit: ?usize) !Decompress {
|
||||
pub fn init(allocator: Allocator, source: *std.io.Reader, params: Decode.Params, memlimit: ?usize) !Decompress {
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
.in_reader = source,
|
||||
@ -839,7 +839,7 @@ test "Vec2D get addition overflow" {
|
||||
|
||||
fn testDecompress(compressed: []const u8) ![]u8 {
|
||||
const allocator = std.testing.allocator;
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(compressed);
|
||||
var decompressor = try Decompress.initOptions(allocator, &br, .{});
|
||||
defer decompressor.deinit();
|
||||
@ -927,7 +927,7 @@ test "too small uncompressed size in header" {
|
||||
|
||||
test "reading one byte" {
|
||||
const compressed = @embedFile("testdata/good-known_size-with_eopm.lzma");
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(compressed);
|
||||
var decompressor = try Decompress.initOptions(std.testing.allocator, &br, .{});
|
||||
defer decompressor.deinit();
|
||||
|
||||
@ -2,7 +2,7 @@ const std = @import("../std.zig");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const lzma = std.compress.lzma;
|
||||
|
||||
pub fn decompress(gpa: Allocator, reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) std.io.Reader.StreamError!void {
|
||||
pub fn decompress(gpa: Allocator, reader: *std.io.Reader, writer: *std.io.BufferedWriter) std.io.Reader.StreamError!void {
|
||||
var decoder = try Decode.init(gpa);
|
||||
defer decoder.deinit(gpa);
|
||||
return decoder.decompress(gpa, reader, writer);
|
||||
@ -33,7 +33,7 @@ pub const Decode = struct {
|
||||
pub fn decompress(
|
||||
self: *Decode,
|
||||
allocator: Allocator,
|
||||
reader: *std.io.BufferedReader,
|
||||
reader: *std.io.Reader,
|
||||
writer: *std.io.BufferedWriter,
|
||||
) !void {
|
||||
var accum = LzAccumBuffer.init(std.math.maxInt(usize));
|
||||
@ -56,7 +56,7 @@ pub const Decode = struct {
|
||||
fn parseLzma(
|
||||
self: *Decode,
|
||||
allocator: Allocator,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
writer: *std.io.BufferedWriter,
|
||||
accum: *LzAccumBuffer,
|
||||
status: u8,
|
||||
@ -149,7 +149,7 @@ pub const Decode = struct {
|
||||
|
||||
fn parseUncompressed(
|
||||
allocator: Allocator,
|
||||
reader: *std.io.BufferedReader,
|
||||
reader: *std.io.Reader,
|
||||
writer: *std.io.BufferedWriter,
|
||||
accum: *LzAccumBuffer,
|
||||
reset_dict: bool,
|
||||
@ -276,7 +276,7 @@ test decompress {
|
||||
0x01, 0x00, 0x05, 0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x0A, 0x02,
|
||||
0x00, 0x06, 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x21, 0x0A, 0x00,
|
||||
};
|
||||
var stream: std.io.BufferedReader = undefined;
|
||||
var stream: std.io.Reader = undefined;
|
||||
stream.initFixed(&compressed);
|
||||
var decomp: std.io.AllocatingWriter = undefined;
|
||||
const decomp_bw = decomp.init(std.testing.allocator);
|
||||
|
||||
@ -3,10 +3,9 @@ const testing = std.testing;
|
||||
const xz = std.compress.xz;
|
||||
|
||||
fn decompress(data: []const u8) ![]u8 {
|
||||
var in_stream: std.io.BufferedReader = undefined;
|
||||
in_stream.initFixed(data);
|
||||
var r: std.io.Reader = .fixed(data);
|
||||
|
||||
var xz_stream = try xz.decompress(testing.allocator, &in_stream);
|
||||
var xz_stream = try xz.decompress(testing.allocator, &r);
|
||||
defer xz_stream.deinit();
|
||||
|
||||
return xz_stream.reader().readAllAlloc(testing.allocator, std.math.maxInt(usize));
|
||||
|
||||
@ -82,8 +82,7 @@ fn testDecompress(gpa: std.mem.Allocator, compressed: []const u8) ![]u8 {
|
||||
var out: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer out.deinit(gpa);
|
||||
|
||||
var in: std.io.BufferedReader = undefined;
|
||||
in.initFixed(@constCast(compressed));
|
||||
var in: std.io.Reader = .fixed(compressed);
|
||||
var zstd_stream: Decompress = .init(&in, .{});
|
||||
try zstd_stream.reader().readRemainingArrayList(gpa, null, &out, .unlimited, default_window_len);
|
||||
|
||||
@ -103,8 +102,7 @@ fn testExpectDecompressError(err: anyerror, compressed: []const u8) !void {
|
||||
var out: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer out.deinit(gpa);
|
||||
|
||||
var in: std.io.BufferedReader = undefined;
|
||||
in.initFixed(@constCast(compressed));
|
||||
var in: std.io.Reader = .fixed(compressed);
|
||||
var zstd_stream: Decompress = .init(&in, .{});
|
||||
try std.testing.expectError(
|
||||
error.ReadFailed,
|
||||
|
||||
@ -4,10 +4,9 @@ const assert = std.debug.assert;
|
||||
const Reader = std.io.Reader;
|
||||
const Limit = std.io.Limit;
|
||||
const BufferedWriter = std.io.BufferedWriter;
|
||||
const BufferedReader = std.io.BufferedReader;
|
||||
const zstd = @import("../zstd.zig");
|
||||
|
||||
input: *BufferedReader,
|
||||
input: *Reader,
|
||||
state: State,
|
||||
verify_checksum: bool,
|
||||
err: ?Error = null,
|
||||
@ -63,7 +62,7 @@ pub const Error = error{
|
||||
WindowSizeUnknown,
|
||||
};
|
||||
|
||||
pub fn init(input: *BufferedReader, options: Options) Decompress {
|
||||
pub fn init(input: *Reader, options: Options) Decompress {
|
||||
return .{
|
||||
.input = input,
|
||||
.state = .new_frame,
|
||||
@ -305,7 +304,7 @@ pub const Frame = struct {
|
||||
|
||||
pub const DecodeError = Reader.Error || error{ReservedBitSet};
|
||||
|
||||
pub fn decode(in: *BufferedReader) DecodeError!Header {
|
||||
pub fn decode(in: *Reader) DecodeError!Header {
|
||||
const descriptor: Descriptor = @bitCast(try in.takeByte());
|
||||
|
||||
if (descriptor.reserved) return error.ReservedBitSet;
|
||||
@ -446,7 +445,7 @@ pub const Frame = struct {
|
||||
/// FSE tables from `in`.
|
||||
pub fn prepare(
|
||||
self: *Decode,
|
||||
in: *BufferedReader,
|
||||
in: *Reader,
|
||||
remaining: *Limit,
|
||||
literals: LiteralsSection,
|
||||
sequences_header: SequencesSection.Header,
|
||||
@ -536,7 +535,7 @@ pub const Frame = struct {
|
||||
/// TODO: don't use `@field`
|
||||
fn updateFseTable(
|
||||
self: *Decode,
|
||||
in: *BufferedReader,
|
||||
in: *Reader,
|
||||
remaining: *Limit,
|
||||
comptime choice: DataType,
|
||||
mode: SequencesSection.Header.Mode,
|
||||
@ -858,7 +857,7 @@ pub const LiteralsSection = struct {
|
||||
compressed_size: ?u18,
|
||||
|
||||
/// Decode a literals section header.
|
||||
pub fn decode(in: *BufferedReader, remaining: *Limit) !Header {
|
||||
pub fn decode(in: *Reader, remaining: *Limit) !Header {
|
||||
remaining.* = remaining.subtract(1) orelse return error.EndOfStream;
|
||||
const byte0 = try in.takeByte();
|
||||
const block_type: BlockType = @enumFromInt(byte0 & 0b11);
|
||||
@ -965,7 +964,7 @@ pub const LiteralsSection = struct {
|
||||
MissingStartBit,
|
||||
};
|
||||
|
||||
pub fn decode(in: *BufferedReader, remaining: *Limit) HuffmanTree.DecodeError!HuffmanTree {
|
||||
pub fn decode(in: *Reader, remaining: *Limit) HuffmanTree.DecodeError!HuffmanTree {
|
||||
remaining.* = remaining.subtract(1) orelse return error.EndOfStream;
|
||||
const header = try in.takeByte();
|
||||
if (header < 128) {
|
||||
@ -976,7 +975,7 @@ pub const LiteralsSection = struct {
|
||||
}
|
||||
|
||||
fn decodeDirect(
|
||||
in: *BufferedReader,
|
||||
in: *Reader,
|
||||
remaining: *Limit,
|
||||
encoded_symbol_count: usize,
|
||||
) HuffmanTree.DecodeError!HuffmanTree {
|
||||
@ -993,7 +992,7 @@ pub const LiteralsSection = struct {
|
||||
}
|
||||
|
||||
fn decodeFse(
|
||||
in: *BufferedReader,
|
||||
in: *Reader,
|
||||
remaining: *Limit,
|
||||
compressed_size: usize,
|
||||
) HuffmanTree.DecodeError!HuffmanTree {
|
||||
@ -1162,7 +1161,7 @@ pub const LiteralsSection = struct {
|
||||
MissingStartBit,
|
||||
};
|
||||
|
||||
pub fn decode(in: *BufferedReader, remaining: *Limit, buffer: []u8) DecodeError!LiteralsSection {
|
||||
pub fn decode(in: *Reader, remaining: *Limit, buffer: []u8) DecodeError!LiteralsSection {
|
||||
const header = try Header.decode(in, remaining);
|
||||
switch (header.block_type) {
|
||||
.raw => {
|
||||
@ -1233,7 +1232,7 @@ pub const SequencesSection = struct {
|
||||
ReadFailed,
|
||||
};
|
||||
|
||||
pub fn decode(in: *BufferedReader, remaining: *Limit) DecodeError!Header {
|
||||
pub fn decode(in: *Reader, remaining: *Limit) DecodeError!Header {
|
||||
var sequence_count: u24 = undefined;
|
||||
|
||||
remaining.* = remaining.subtract(1) orelse return error.EndOfStream;
|
||||
|
||||
@ -154,8 +154,7 @@ pub const Tag = struct {
|
||||
|
||||
test Tag {
|
||||
const buf = [_]u8{0xa3};
|
||||
var stream: std.io.BufferedReader = undefined;
|
||||
stream.initFixed(&buf);
|
||||
var stream: std.io.Reader = .fixed(&buf);
|
||||
const t = Tag.decode(stream.reader());
|
||||
try std.testing.expectEqual(Tag.init(@enumFromInt(3), true, .context_specific), t);
|
||||
}
|
||||
@ -185,8 +184,7 @@ pub const Element = struct {
|
||||
/// - Ensures length is within `bytes`
|
||||
/// - Ensures length is less than `std.math.maxInt(Index)`
|
||||
pub fn decode(bytes: []const u8, index: Index) DecodeError!Element {
|
||||
var reader: std.io.BufferedReader = undefined;
|
||||
reader.initFixed(bytes[index..]);
|
||||
var reader: std.io.Reader = .fixed(bytes[index..]);
|
||||
|
||||
const tag = try Tag.decode(reader);
|
||||
const size_or_len_size = try reader.readByte();
|
||||
|
||||
@ -155,20 +155,20 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
|
||||
}
|
||||
|
||||
// Read a DER-encoded integer.
|
||||
// Asserts `br` has storage capacity >= 2.
|
||||
fn readDerInt(out: []u8, br: *std.io.BufferedReader) EncodingError!void {
|
||||
const buf = br.take(2) catch return error.InvalidEncoding;
|
||||
// Asserts `r` has storage capacity >= 2.
|
||||
fn readDerInt(out: []u8, r: *std.io.Reader) EncodingError!void {
|
||||
const buf = r.take(2) catch return error.InvalidEncoding;
|
||||
if (buf[0] != 0x02) return error.InvalidEncoding;
|
||||
var expected_len: usize = buf[1];
|
||||
if (expected_len == 0 or expected_len > 1 + out.len) return error.InvalidEncoding;
|
||||
var has_top_bit = false;
|
||||
if (expected_len == 1 + out.len) {
|
||||
if ((br.takeByte() catch return error.InvalidEncoding) != 0) return error.InvalidEncoding;
|
||||
if ((r.takeByte() catch return error.InvalidEncoding) != 0) return error.InvalidEncoding;
|
||||
expected_len -= 1;
|
||||
has_top_bit = true;
|
||||
}
|
||||
const out_slice = out[out.len - expected_len ..];
|
||||
br.readSlice(out_slice) catch return error.InvalidEncoding;
|
||||
r.readSlice(out_slice) catch return error.InvalidEncoding;
|
||||
if (@intFromBool(has_top_bit) != out[0] >> 7) return error.InvalidEncoding;
|
||||
}
|
||||
|
||||
@ -176,14 +176,13 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
|
||||
/// Returns InvalidEncoding if the DER encoding is invalid.
|
||||
pub fn fromDer(der: []const u8) EncodingError!Signature {
|
||||
if (der.len < 2) return error.InvalidEncoding;
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(@constCast(der));
|
||||
const buf = br.take(2) catch return error.InvalidEncoding;
|
||||
var r: std.io.Reader = .fixed(der);
|
||||
const buf = r.take(2) catch return error.InvalidEncoding;
|
||||
if (buf[0] != 0x30 or @as(usize, buf[1]) + 2 != der.len) return error.InvalidEncoding;
|
||||
var sig: Signature = mem.zeroInit(Signature, .{});
|
||||
try readDerInt(&sig.r, &br);
|
||||
try readDerInt(&sig.s, &br);
|
||||
if (br.seek != der.len) return error.InvalidEncoding;
|
||||
try readDerInt(&sig.r, &r);
|
||||
try readDerInt(&sig.s, &r);
|
||||
if (r.seek != der.len) return error.InvalidEncoding;
|
||||
return sig;
|
||||
}
|
||||
};
|
||||
|
||||
@ -655,7 +655,7 @@ pub const Decoder = struct {
|
||||
}
|
||||
|
||||
/// Use this function to increase `their_end`.
|
||||
pub fn readAtLeast(d: *Decoder, stream: *std.io.BufferedReader, their_amt: usize) !void {
|
||||
pub fn readAtLeast(d: *Decoder, stream: *std.io.Reader, their_amt: usize) !void {
|
||||
assert(!d.disable_reads);
|
||||
const existing_amt = d.cap - d.idx;
|
||||
d.their_end = d.idx + their_amt;
|
||||
@ -672,7 +672,7 @@ pub const Decoder = struct {
|
||||
|
||||
/// Same as `readAtLeast` but also increases `our_end` by exactly `our_amt`.
|
||||
/// Use when `our_amt` is calculated by us, not by them.
|
||||
pub fn readAtLeastOurAmt(d: *Decoder, stream: *std.io.BufferedReader, our_amt: usize) !void {
|
||||
pub fn readAtLeastOurAmt(d: *Decoder, stream: *std.io.Reader, our_amt: usize) !void {
|
||||
assert(!d.disable_reads);
|
||||
try readAtLeast(d, stream, our_amt);
|
||||
d.our_end = d.idx + our_amt;
|
||||
|
||||
@ -21,7 +21,7 @@ const array = tls.array;
|
||||
/// here via `reader`.
|
||||
///
|
||||
/// The buffer is asserted to have capacity at least `min_buffer_len`.
|
||||
input: *std.io.BufferedReader,
|
||||
input: *std.io.Reader,
|
||||
|
||||
/// The encrypted stream from the client to the server. Bytes are pushed here
|
||||
/// via `writer`.
|
||||
@ -85,7 +85,7 @@ pub const SslKeyLog = struct {
|
||||
}
|
||||
};
|
||||
|
||||
/// The `std.io.BufferedReader` supplied to `init` requires a buffer capacity
|
||||
/// The `std.io.Reader` supplied to `init` requires a buffer capacity
|
||||
/// at least this amount.
|
||||
pub const min_buffer_len = tls.max_ciphertext_record_len;
|
||||
|
||||
@ -175,7 +175,7 @@ const InitError = error{
|
||||
/// `input` is asserted to have buffer capacity at least `min_buffer_len`.
|
||||
pub fn init(
|
||||
client: *Client,
|
||||
input: *std.io.BufferedReader,
|
||||
input: *std.io.Reader,
|
||||
output: *std.io.BufferedWriter,
|
||||
options: Options,
|
||||
) InitError!void {
|
||||
|
||||
@ -2235,8 +2235,7 @@ pub const ElfModule = struct {
|
||||
|
||||
const section_bytes = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
|
||||
sections[section_index.?] = if ((shdr.sh_flags & elf.SHF_COMPRESSED) > 0) blk: {
|
||||
var section_reader: std.io.BufferedReader = undefined;
|
||||
section_reader.initFixed(@constCast(section_bytes));
|
||||
var section_reader: std.io.Reader = .fixed(section_bytes);
|
||||
const chdr = section_reader.takeStruct(elf.Chdr) catch continue;
|
||||
if (chdr.ch_type != .ZLIB) continue;
|
||||
const ch_size = chdr.ch_size;
|
||||
|
||||
@ -136,7 +136,7 @@ pub const Instruction = union(Opcode) {
|
||||
},
|
||||
|
||||
pub fn read(
|
||||
reader: *std.io.BufferedReader,
|
||||
reader: *std.io.Reader,
|
||||
addr_size_bytes: u8,
|
||||
endian: std.builtin.Endian,
|
||||
) !Instruction {
|
||||
|
||||
@ -774,8 +774,7 @@ pub fn StackMachine(comptime options: Options) type {
|
||||
}
|
||||
|
||||
fn nextLeb128(expression: []const u8, i: *usize, comptime I: type) !I {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(@constCast(expression));
|
||||
var br: std.io.Reader = .fixed(expression);
|
||||
br.seek = i.*;
|
||||
assert(br.seek <= br.end);
|
||||
const result = br.takeLeb128(I) catch |err| switch (err) {
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
//! Optimized for performance in debug builds.
|
||||
|
||||
// TODO I'm pretty sure this can be deleted thanks to the new std.io.BufferedReader semantics
|
||||
// TODO I'm pretty sure this can be deleted thanks to the new std.io.Reader semantics
|
||||
|
||||
const std = @import("../std.zig");
|
||||
const MemoryAccessor = std.debug.MemoryAccessor;
|
||||
@ -52,7 +52,7 @@ pub fn readIntChecked(
|
||||
}
|
||||
|
||||
pub fn readLeb128(fbr: *FixedBufferReader, comptime T: type) Error!T {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(@constCast(fbr.buf));
|
||||
br.seek = fbr.pos;
|
||||
const result = br.takeLeb128(T);
|
||||
|
||||
@ -2025,9 +2025,10 @@ pub const VirtualMachine = struct {
|
||||
assert(self.cie_row == null);
|
||||
if (pc < fde.pc_begin or pc >= fde.pc_begin + fde.pc_range) return error.AddressOutOfRange;
|
||||
|
||||
var readers: [2]std.io.BufferedReader = undefined;
|
||||
readers[0].initFixed(@constCast(cie.initial_instructions));
|
||||
readers[1].initFixed(@constCast(fde.instructions));
|
||||
var readers: [2]std.io.Reader = .{
|
||||
.fixed(cie.initial_instructions),
|
||||
.fixed(fde.instructions),
|
||||
};
|
||||
|
||||
var prev_row: Row = self.current_row;
|
||||
for (&readers, [2]bool{ true, false }) |*reader, is_initial| {
|
||||
|
||||
@ -510,10 +510,10 @@ pub const Header = struct {
|
||||
|
||||
pub const ReadError = std.io.Reader.Error || ParseError;
|
||||
|
||||
pub fn read(br: *std.io.BufferedReader) ReadError!Header {
|
||||
const buf = try br.peek(@sizeOf(Elf64_Ehdr));
|
||||
pub fn read(r: *std.io.Reader) ReadError!Header {
|
||||
const buf = try r.peek(@sizeOf(Elf64_Ehdr));
|
||||
const result = try parse(@ptrCast(buf));
|
||||
br.toss(if (result.is_64) @sizeOf(Elf64_Ehdr) else @sizeOf(Elf32_Ehdr));
|
||||
r.toss(if (result.is_64) @sizeOf(Elf64_Ehdr) else @sizeOf(Elf32_Ehdr));
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@ -904,6 +904,7 @@ pub const Reader = struct {
|
||||
size: ?u64 = null,
|
||||
size_err: ?GetEndPosError = null,
|
||||
seek_err: ?Reader.SeekError = null,
|
||||
interface: std.io.Reader,
|
||||
|
||||
pub const SeekError = File.SeekError || error{
|
||||
/// Seeking fell back to reading, and reached the end before the requested seek position.
|
||||
@ -940,18 +941,24 @@ pub const Reader = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn interface(r: *Reader) std.io.Reader {
|
||||
pub fn initInterface(buffer: []u8) std.io.Reader {
|
||||
return .{
|
||||
.context = r,
|
||||
.context = undefined,
|
||||
.vtable = &.{
|
||||
.read = Reader.stream,
|
||||
.stream = Reader.stream,
|
||||
.discard = Reader.discard,
|
||||
},
|
||||
.buffer = buffer,
|
||||
.seek = 0,
|
||||
.end = 0,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn readable(r: *Reader, buffer: []u8) std.io.BufferedReader {
|
||||
return interface(r).buffered(buffer);
|
||||
pub fn init(file: File, buffer: []u8) Reader {
|
||||
return .{
|
||||
.file = file,
|
||||
.interface = initInterface(buffer),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getSize(r: *Reader) GetEndPosError!u64 {
|
||||
@ -1021,11 +1028,11 @@ pub const Reader = struct {
|
||||
const max_buffers_len = 16;
|
||||
|
||||
fn stream(
|
||||
context: ?*anyopaque,
|
||||
io_reader: *std.io.Reader,
|
||||
bw: *BufferedWriter,
|
||||
limit: std.io.Limit,
|
||||
) std.io.Reader.StreamError!usize {
|
||||
const r: *Reader = @ptrCast(@alignCast(context));
|
||||
const r: *Reader = @fieldParentPtr("interface", io_reader);
|
||||
switch (r.mode) {
|
||||
.positional, .streaming => return bw.writeFile(r, limit, &.{}, 0) catch |write_err| switch (write_err) {
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
@ -1051,8 +1058,8 @@ pub const Reader = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn discard(context: ?*anyopaque, limit: std.io.Limit) std.io.Reader.Error!usize {
|
||||
const r: *Reader = @ptrCast(@alignCast(context));
|
||||
fn discard(io_reader: *std.io.Reader, limit: std.io.Limit) std.io.Reader.Error!usize {
|
||||
const r: *Reader = @fieldParentPtr("interface", io_reader);
|
||||
const file = r.file;
|
||||
const pos = r.pos;
|
||||
switch (r.mode) {
|
||||
@ -1357,8 +1364,8 @@ pub const Writer = struct {
|
||||
///
|
||||
/// Positional is more threadsafe, since the global seek position is not
|
||||
/// affected.
|
||||
pub fn reader(file: File) Reader {
|
||||
return .{ .file = file };
|
||||
pub fn reader(file: File, buffer: []u8) Reader {
|
||||
return .init(file, buffer);
|
||||
}
|
||||
|
||||
/// Positional is more threadsafe, since the global seek position is not
|
||||
|
||||
@ -325,7 +325,7 @@ pub const Header = struct {
|
||||
};
|
||||
|
||||
pub const Reader = struct {
|
||||
in: *std.io.BufferedReader,
|
||||
in: *std.io.Reader,
|
||||
/// Keeps track of whether the stream is ready to accept a new request,
|
||||
/// making invalid API usage cause assertion failures rather than HTTP
|
||||
/// protocol violations.
|
||||
@ -703,7 +703,7 @@ pub const Reader = struct {
|
||||
|
||||
pub const Decompressor = struct {
|
||||
compression: Compression,
|
||||
buffered_reader: std.io.BufferedReader,
|
||||
buffered_reader: std.io.Reader,
|
||||
|
||||
pub const Compression = union(enum) {
|
||||
deflate: std.compress.flate.Decompressor,
|
||||
|
||||
@ -232,7 +232,7 @@ pub const Connection = struct {
|
||||
writer: std.io.BufferedWriter,
|
||||
/// HTTP protocol from server to client.
|
||||
/// This either comes directly from `stream_reader`, or from a TLS client.
|
||||
reader: std.io.BufferedReader,
|
||||
reader: std.io.Reader,
|
||||
/// Entry in `ConnectionPool.used` or `ConnectionPool.free`.
|
||||
pool_node: std.DoublyLinkedList.Node,
|
||||
port: u16,
|
||||
@ -299,7 +299,7 @@ pub const Connection = struct {
|
||||
/// Data from `client` to `Connection.stream`.
|
||||
writer: std.io.BufferedWriter,
|
||||
/// Data from `Connection.stream` to `client`.
|
||||
reader: std.io.BufferedReader,
|
||||
reader: std.io.Reader,
|
||||
client: std.crypto.tls.Client,
|
||||
connection: Connection,
|
||||
|
||||
|
||||
@ -20,7 +20,7 @@ reader: http.Reader,
|
||||
/// header, otherwise `receiveHead` returns `error.HttpHeadersOversize`.
|
||||
///
|
||||
/// The returned `Server` is ready for `receiveHead` to be called.
|
||||
pub fn init(in: *std.io.BufferedReader, out: *std.io.BufferedWriter) Server {
|
||||
pub fn init(in: *std.io.Reader, out: *std.io.BufferedWriter) Server {
|
||||
return .{
|
||||
.reader = .{
|
||||
.in = in,
|
||||
@ -610,7 +610,7 @@ pub const Request = struct {
|
||||
/// See https://tools.ietf.org/html/rfc6455
|
||||
pub const WebSocket = struct {
|
||||
key: []const u8,
|
||||
input: *std.io.BufferedReader,
|
||||
input: *std.io.Reader,
|
||||
output: *std.io.BufferedWriter,
|
||||
|
||||
pub const Header0 = packed struct(u8) {
|
||||
|
||||
@ -72,8 +72,6 @@ pub const Limit = enum(usize) {
|
||||
pub const Reader = @import("io/Reader.zig");
|
||||
pub const Writer = @import("io/Writer.zig");
|
||||
|
||||
pub const BufferedReader = @import("io/BufferedReader.zig");
|
||||
pub const BufferedWriter = @import("io/BufferedWriter.zig");
|
||||
pub const AllocatingWriter = @import("io/AllocatingWriter.zig");
|
||||
|
||||
pub const ChangeDetectionStream = @import("io/change_detection_stream.zig").ChangeDetectionStream;
|
||||
@ -131,7 +129,7 @@ pub fn Poller(comptime StreamEnum: type) type {
|
||||
const PollFd = if (is_windows) void else posix.pollfd;
|
||||
|
||||
gpa: Allocator,
|
||||
readers: [enum_fields.len]BufferedReader,
|
||||
readers: [enum_fields.len]Reader,
|
||||
poll_fds: [enum_fields.len]PollFd,
|
||||
windows: if (is_windows) struct {
|
||||
first_read_done: bool,
|
||||
@ -163,7 +161,7 @@ pub fn Poller(comptime StreamEnum: type) type {
|
||||
_ = windows.kernel32.CancelIo(h);
|
||||
}
|
||||
}
|
||||
inline for (&self.readers) |*br| gpa.free(br.buffer);
|
||||
inline for (&self.readers) |*r| gpa.free(r.buffer);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
@ -183,7 +181,7 @@ pub fn Poller(comptime StreamEnum: type) type {
|
||||
}
|
||||
}
|
||||
|
||||
pub inline fn reader(self: *Self, comptime which: StreamEnum) *BufferedReader {
|
||||
pub inline fn reader(self: *Self, comptime which: StreamEnum) *Reader {
|
||||
return &self.readers[@intFromEnum(which)];
|
||||
}
|
||||
|
||||
@ -295,18 +293,18 @@ pub fn Poller(comptime StreamEnum: type) type {
|
||||
}
|
||||
|
||||
var keep_polling = false;
|
||||
inline for (&self.poll_fds, &self.readers) |*poll_fd, *br| {
|
||||
inline for (&self.poll_fds, &self.readers) |*poll_fd, *r| {
|
||||
// Try reading whatever is available before checking the error
|
||||
// conditions.
|
||||
// It's still possible to read after a POLL.HUP is received,
|
||||
// always check if there's some data waiting to be read first.
|
||||
if (poll_fd.revents & posix.POLL.IN != 0) {
|
||||
const buf = try br.writableSliceGreedyAlloc(gpa, bump_amt);
|
||||
const buf = try r.writableSliceGreedyAlloc(gpa, bump_amt);
|
||||
const amt = posix.read(poll_fd.fd, buf) catch |err| switch (err) {
|
||||
error.BrokenPipe => 0, // Handle the same as EOF.
|
||||
else => |e| return e,
|
||||
};
|
||||
br.advanceBufferEnd(amt);
|
||||
r.advanceBufferEnd(amt);
|
||||
if (amt == 0) {
|
||||
// Remove the fd when the EOF condition is met.
|
||||
poll_fd.fd = -1;
|
||||
@ -337,14 +335,14 @@ var win_dummy_bytes_read: u32 = undefined;
|
||||
fn windowsAsyncReadToFifoAndQueueSmallRead(
|
||||
handle: windows.HANDLE,
|
||||
overlapped: *windows.OVERLAPPED,
|
||||
br: *BufferedReader,
|
||||
r: *Reader,
|
||||
small_buf: *[128]u8,
|
||||
bump_amt: usize,
|
||||
) !enum { empty, populated, closed_populated, closed } {
|
||||
var read_any_data = false;
|
||||
while (true) {
|
||||
const fifo_read_pending = while (true) {
|
||||
const buf = try br.writableWithSize(bump_amt);
|
||||
const buf = try r.writableWithSize(bump_amt);
|
||||
const buf_len = math.cast(u32, buf.len) orelse math.maxInt(u32);
|
||||
|
||||
if (0 == windows.kernel32.ReadFile(
|
||||
@ -366,7 +364,7 @@ fn windowsAsyncReadToFifoAndQueueSmallRead(
|
||||
};
|
||||
|
||||
read_any_data = true;
|
||||
br.update(num_bytes_read);
|
||||
r.update(num_bytes_read);
|
||||
|
||||
if (num_bytes_read == buf_len) {
|
||||
// We filled the buffer, so there's probably more data available.
|
||||
@ -396,7 +394,7 @@ fn windowsAsyncReadToFifoAndQueueSmallRead(
|
||||
.aborted => break :cancel_read,
|
||||
};
|
||||
read_any_data = true;
|
||||
br.update(num_bytes_read);
|
||||
r.update(num_bytes_read);
|
||||
}
|
||||
|
||||
// Try to queue the 1-byte read.
|
||||
@ -421,7 +419,7 @@ fn windowsAsyncReadToFifoAndQueueSmallRead(
|
||||
.closed => return if (read_any_data) .closed_populated else .closed,
|
||||
.aborted => unreachable,
|
||||
};
|
||||
try br.write(small_buf[0..num_bytes_read]);
|
||||
try r.write(small_buf[0..num_bytes_read]);
|
||||
read_any_data = true;
|
||||
}
|
||||
}
|
||||
@ -488,8 +486,6 @@ pub fn PollFiles(comptime StreamEnum: type) type {
|
||||
|
||||
test {
|
||||
_ = AllocatingWriter;
|
||||
_ = BufferedReader;
|
||||
_ = BufferedWriter;
|
||||
_ = Reader;
|
||||
_ = Writer;
|
||||
_ = @import("io/test.zig");
|
||||
|
||||
@ -6,7 +6,7 @@
|
||||
//! `std.io.BufferedWriter` state such that it writes to the unused capacity of
|
||||
//! an array list, filling it up completely before making a call through the
|
||||
//! vtable, causing a resize. Consequently, the same, optimized, non-generic
|
||||
//! machine code that uses `std.io.BufferedReader`, such as formatted printing,
|
||||
//! machine code that uses `std.io.Reader`, such as formatted printing,
|
||||
//! takes the hot paths when using this API.
|
||||
|
||||
const std = @import("../std.zig");
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -5,21 +5,27 @@ const Reader = std.io.Reader;
|
||||
const BufferedWriter = std.io.BufferedWriter;
|
||||
const Limit = std.io.Limit;
|
||||
|
||||
unlimited_reader: Reader,
|
||||
unlimited: *Reader,
|
||||
remaining: Limit,
|
||||
interface: Reader,
|
||||
|
||||
pub fn reader(l: *Limited) Reader {
|
||||
pub fn init(reader: *Reader, limit: Limit, buffer: []u8) Limited {
|
||||
return .{
|
||||
.context = l,
|
||||
.vtable = &.{
|
||||
.read = passthruRead,
|
||||
.readVec = passthruReadVec,
|
||||
.discard = passthruDiscard,
|
||||
.unlimited = reader,
|
||||
.remaining = limit,
|
||||
.interface = .{
|
||||
.vtable = &.{
|
||||
.stream = stream,
|
||||
.discard = discard,
|
||||
},
|
||||
.buffer = buffer,
|
||||
.seek = 0,
|
||||
.end = 0,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn passthruRead(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) Reader.StreamError!usize {
|
||||
fn stream(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) Reader.StreamError!usize {
|
||||
const l: *Limited = @alignCast(@ptrCast(context));
|
||||
const combined_limit = limit.min(l.remaining);
|
||||
const n = try l.unlimited_reader.read(bw, combined_limit);
|
||||
@ -27,30 +33,10 @@ fn passthruRead(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) Reader.
|
||||
return n;
|
||||
}
|
||||
|
||||
fn passthruDiscard(context: ?*anyopaque, limit: Limit) Reader.Error!usize {
|
||||
fn discard(context: ?*anyopaque, limit: Limit) Reader.Error!usize {
|
||||
const l: *Limited = @alignCast(@ptrCast(context));
|
||||
const combined_limit = limit.min(l.remaining);
|
||||
const n = try l.unlimited_reader.discard(combined_limit);
|
||||
l.remaining = l.remaining.subtract(n).?;
|
||||
return n;
|
||||
}
|
||||
|
||||
fn passthruReadVec(context: ?*anyopaque, data: []const []u8) Reader.Error!usize {
|
||||
const l: *Limited = @alignCast(@ptrCast(context));
|
||||
if (data.len == 0) return 0;
|
||||
if (data[0].len >= @intFromEnum(l.remaining)) {
|
||||
const n = try l.unlimited_reader.readVec(&.{l.remaining.slice(data[0])});
|
||||
l.remaining = l.remaining.subtract(n).?;
|
||||
return n;
|
||||
}
|
||||
var total: usize = 0;
|
||||
for (data, 0..) |buf, i| {
|
||||
total += buf.len;
|
||||
if (total > @intFromEnum(l.remaining)) {
|
||||
const n = try l.unlimited_reader.readVec(data[0..i]);
|
||||
l.remaining = l.remaining.subtract(n).?;
|
||||
return n;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -114,32 +114,27 @@ test writeSignedFixed {
|
||||
}
|
||||
|
||||
fn test_read_stream_ileb128(comptime T: type, encoded: []const u8) !T {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(encoded);
|
||||
var br: std.io.Reader = .fixed(encoded);
|
||||
return br.takeIleb128(T);
|
||||
}
|
||||
|
||||
fn test_read_stream_uleb128(comptime T: type, encoded: []const u8) !T {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(encoded);
|
||||
var br: std.io.Reader = .fixed(encoded);
|
||||
return br.takeUleb128(T);
|
||||
}
|
||||
|
||||
fn test_read_ileb128(comptime T: type, encoded: []const u8) !T {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(encoded);
|
||||
var br: std.io.Reader = .fixed(encoded);
|
||||
return br.readIleb128(T);
|
||||
}
|
||||
|
||||
fn test_read_uleb128(comptime T: type, encoded: []const u8) !T {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(encoded);
|
||||
var br: std.io.Reader = .fixed(encoded);
|
||||
return br.readUleb128(T);
|
||||
}
|
||||
|
||||
fn test_read_ileb128_seq(comptime T: type, comptime N: usize, encoded: []const u8) !void {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(encoded);
|
||||
var br: std.io.Reader = .fixed(encoded);
|
||||
var i: usize = 0;
|
||||
while (i < N) : (i += 1) {
|
||||
_ = try br.readIleb128(T);
|
||||
@ -147,8 +142,7 @@ fn test_read_ileb128_seq(comptime T: type, comptime N: usize, encoded: []const u
|
||||
}
|
||||
|
||||
fn test_read_uleb128_seq(comptime T: type, comptime N: usize, encoded: []const u8) !void {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(encoded);
|
||||
var br: std.io.Reader = .fixed(encoded);
|
||||
var i: usize = 0;
|
||||
while (i < N) : (i += 1) {
|
||||
_ = try br.readUleb128(T);
|
||||
@ -248,7 +242,7 @@ fn test_write_leb128(value: anytype) !void {
|
||||
const t_signed = signedness == .signed;
|
||||
|
||||
const writeStream = if (t_signed) std.io.BufferedWriter.writeIleb128 else std.io.BufferedWriter.writeUleb128;
|
||||
const readStream = if (t_signed) std.io.BufferedReader.readIleb128 else std.io.BufferedReader.readUleb128;
|
||||
const readStream = if (t_signed) std.io.Reader.readIleb128 else std.io.Reader.readUleb128;
|
||||
|
||||
// decode to a larger bit size too, to ensure sign extension
|
||||
// is working as expected
|
||||
@ -275,8 +269,7 @@ fn test_write_leb128(value: anytype) !void {
|
||||
try testing.expect(bw.buffer.items.len == bytes_needed);
|
||||
|
||||
// stream read
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(&buf);
|
||||
var br: std.io.Reader = .fixed(&buf);
|
||||
const sr = try readStream(&br, T);
|
||||
try testing.expect(br.seek == bytes_needed);
|
||||
try testing.expect(sr == value);
|
||||
|
||||
@ -1378,7 +1378,7 @@ fn parseHosts(
|
||||
name: []const u8,
|
||||
family: posix.sa_family_t,
|
||||
port: u16,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
) error{ OutOfMemory, ReadFailed }!void {
|
||||
while (true) {
|
||||
const line = br.takeDelimiterExclusive('\n') catch |err| switch (err) {
|
||||
@ -1592,7 +1592,7 @@ const ResolvConf = struct {
|
||||
};
|
||||
}
|
||||
|
||||
fn parse(rc: *ResolvConf, br: *std.io.BufferedReader) !void {
|
||||
fn parse(rc: *ResolvConf, br: *std.io.Reader) !void {
|
||||
const gpa = rc.gpa;
|
||||
while (br.takeSentinel('\n')) |line_with_comment| {
|
||||
const line = line: {
|
||||
|
||||
@ -348,15 +348,15 @@ pub const RunResult = struct {
|
||||
stderr: []u8,
|
||||
};
|
||||
|
||||
fn writeBufferedReaderToArrayList(allocator: Allocator, list: *std.ArrayListUnmanaged(u8), br: *std.io.BufferedReader) !void {
|
||||
assert(br.seek == 0);
|
||||
fn writeBufferedReaderToArrayList(allocator: Allocator, list: *std.ArrayListUnmanaged(u8), r: *std.io.Reader) !void {
|
||||
assert(r.seek == 0);
|
||||
if (list.capacity == 0) {
|
||||
list.* = .{
|
||||
.items = br.bufferContents(),
|
||||
.capacity = br.buffer.len,
|
||||
.items = r.bufferContents(),
|
||||
.capacity = r.buffer.len,
|
||||
};
|
||||
} else {
|
||||
try list.appendSlice(allocator, br.bufferContents());
|
||||
try list.appendSlice(allocator, r.bufferContents());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -302,7 +302,7 @@ pub const FileKind = enum {
|
||||
|
||||
/// Iterator over entries in the tar file represented by reader.
|
||||
pub const Iterator = struct {
|
||||
reader: *std.io.BufferedReader,
|
||||
reader: *std.io.Reader,
|
||||
diagnostics: ?*Diagnostics = null,
|
||||
|
||||
// buffers for heeader and file attributes
|
||||
@ -328,7 +328,7 @@ pub const Iterator = struct {
|
||||
|
||||
/// Iterates over files in tar archive.
|
||||
/// `next` returns each file in tar archive.
|
||||
pub fn init(reader: *std.io.BufferedReader, options: Options) Iterator {
|
||||
pub fn init(reader: *std.io.Reader, options: Options) Iterator {
|
||||
return .{
|
||||
.reader = reader,
|
||||
.diagnostics = options.diagnostics,
|
||||
@ -345,7 +345,7 @@ pub const Iterator = struct {
|
||||
kind: FileKind = .file,
|
||||
|
||||
unread_bytes: *u64,
|
||||
parent_reader: *std.io.BufferedReader,
|
||||
parent_reader: *std.io.Reader,
|
||||
|
||||
pub fn reader(self: *File) std.io.Reader {
|
||||
return .{
|
||||
@ -537,14 +537,14 @@ const pax_max_size_attr_len = 64;
|
||||
|
||||
pub const PaxIterator = struct {
|
||||
size: usize, // cumulative size of all pax attributes
|
||||
reader: *std.io.BufferedReader,
|
||||
reader: *std.io.Reader,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
const Attribute = struct {
|
||||
kind: PaxAttributeKind,
|
||||
len: usize, // length of the attribute value
|
||||
reader: *std.io.BufferedReader, // reader positioned at value start
|
||||
reader: *std.io.Reader, // reader positioned at value start
|
||||
|
||||
// Copies pax attribute value into destination buffer.
|
||||
// Must be called with destination buffer of size at least Attribute.len.
|
||||
@ -611,13 +611,13 @@ pub const PaxIterator = struct {
|
||||
}
|
||||
|
||||
// Checks that each record ends with new line.
|
||||
fn validateAttributeEnding(reader: *std.io.BufferedReader) !void {
|
||||
fn validateAttributeEnding(reader: *std.io.Reader) !void {
|
||||
if (try reader.takeByte() != '\n') return error.PaxInvalidAttributeEnd;
|
||||
}
|
||||
};
|
||||
|
||||
/// Saves tar file content to the file systems.
|
||||
pub fn pipeToFileSystem(dir: std.fs.Dir, reader: *std.io.BufferedReader, options: PipeOptions) !void {
|
||||
pub fn pipeToFileSystem(dir: std.fs.Dir, reader: *std.io.Reader, options: PipeOptions) !void {
|
||||
var file_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
|
||||
var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
|
||||
var iter: Iterator = .init(reader, .{
|
||||
@ -818,7 +818,7 @@ test PaxIterator {
|
||||
var buffer: [1024]u8 = undefined;
|
||||
|
||||
outer: for (cases) |case| {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(case.data);
|
||||
var iter: PaxIterator = .init(&br, case.data.len);
|
||||
|
||||
@ -955,7 +955,7 @@ test Iterator {
|
||||
// example/empty/
|
||||
|
||||
const data = @embedFile("tar/testdata/example.tar");
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(data);
|
||||
|
||||
// User provided buffers to the iterator
|
||||
@ -1015,7 +1015,7 @@ test pipeToFileSystem {
|
||||
// example/empty/
|
||||
|
||||
const data = @embedFile("tar/testdata/example.tar");
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(data);
|
||||
|
||||
var tmp = testing.tmpDir(.{ .no_follow = true });
|
||||
@ -1047,7 +1047,7 @@ test pipeToFileSystem {
|
||||
|
||||
test "pipeToFileSystem root_dir" {
|
||||
const data = @embedFile("tar/testdata/example.tar");
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(data);
|
||||
|
||||
// with strip_components = 1
|
||||
@ -1096,7 +1096,7 @@ test "pipeToFileSystem root_dir" {
|
||||
|
||||
test "findRoot with single file archive" {
|
||||
const data = @embedFile("tar/testdata/22752.tar");
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(data);
|
||||
|
||||
var tmp = testing.tmpDir(.{});
|
||||
@ -1111,7 +1111,7 @@ test "findRoot with single file archive" {
|
||||
|
||||
test "findRoot without explicit root dir" {
|
||||
const data = @embedFile("tar/testdata/19820.tar");
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(data);
|
||||
|
||||
var tmp = testing.tmpDir(.{});
|
||||
@ -1126,7 +1126,7 @@ test "findRoot without explicit root dir" {
|
||||
|
||||
test "pipeToFileSystem strip_components" {
|
||||
const data = @embedFile("tar/testdata/example.tar");
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(data);
|
||||
|
||||
var tmp = testing.tmpDir(.{ .no_follow = true });
|
||||
@ -1188,7 +1188,7 @@ test "executable bit" {
|
||||
const data = @embedFile("tar/testdata/example.tar");
|
||||
|
||||
for ([_]PipeOptions.ModeMode{ .ignore, .executable_bit_only }) |opt| {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(data);
|
||||
|
||||
var tmp = testing.tmpDir(.{ .no_follow = true });
|
||||
|
||||
@ -67,7 +67,7 @@ pub fn writeFileStream(
|
||||
w: *Writer,
|
||||
sub_path: []const u8,
|
||||
size: usize,
|
||||
reader: *std.io.BufferedReader,
|
||||
reader: *std.io.Reader,
|
||||
options: Options,
|
||||
) std.io.Reader.StreamError!void {
|
||||
try w.writeHeader(.regular, sub_path, "", @intCast(size), options);
|
||||
@ -441,7 +441,7 @@ test "write files" {
|
||||
for (files) |file|
|
||||
try wrt.writeFileBytes(file.path, file.content, .{});
|
||||
|
||||
var input: std.io.BufferedReader = undefined;
|
||||
var input: std.io.Reader = undefined;
|
||||
input.initFixed(output.getWritten());
|
||||
var iter = std.tar.iterator(&input, .{
|
||||
.file_name_buffer = &file_name_buffer,
|
||||
@ -476,12 +476,12 @@ test "write files" {
|
||||
var wrt: Writer = .{ .underlying_writer = &output.buffered_writer };
|
||||
defer output.deinit();
|
||||
for (files) |file| {
|
||||
var content: std.io.BufferedReader = undefined;
|
||||
var content: std.io.Reader = undefined;
|
||||
content.initFixed(file.content);
|
||||
try wrt.writeFileStream(file.path, file.content.len, &content, .{});
|
||||
}
|
||||
|
||||
var input: std.io.BufferedReader = undefined;
|
||||
var input: std.io.Reader = undefined;
|
||||
input.initFixed(output.getWritten());
|
||||
var iter = std.tar.iterator(&input, .{
|
||||
.file_name_buffer = &file_name_buffer,
|
||||
|
||||
@ -346,8 +346,7 @@ test "run test cases" {
|
||||
var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
|
||||
|
||||
for (cases) |case| {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(case.data);
|
||||
var br: std.io.Reader = .fixed(case.data);
|
||||
var iter = tar.iterator(&br, .{
|
||||
.file_name_buffer = &file_name_buffer,
|
||||
.link_name_buffer = &link_name_buffer,
|
||||
@ -391,8 +390,7 @@ test "pax/gnu long names with small buffer" {
|
||||
const long_name_cases = [_]Case{ cases[11], cases[25], cases[28] };
|
||||
|
||||
for (long_name_cases) |case| {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(case.data);
|
||||
var br: std.io.Reader = .fixed(case.data);
|
||||
var iter = tar.iterator(&br, .{
|
||||
.file_name_buffer = &min_file_name_buffer,
|
||||
.link_name_buffer = &min_link_name_buffer,
|
||||
@ -413,8 +411,7 @@ test "insufficient buffer in Header name filed" {
|
||||
var min_file_name_buffer: [9]u8 = undefined;
|
||||
var min_link_name_buffer: [100]u8 = undefined;
|
||||
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(cases[0].data);
|
||||
var br: std.io.Reader = .fixed(cases[0].data);
|
||||
var iter = tar.iterator(&br, .{
|
||||
.file_name_buffer = &min_file_name_buffer,
|
||||
.link_name_buffer = &min_link_name_buffer,
|
||||
@ -469,22 +466,21 @@ test "should not overwrite existing file" {
|
||||
// This ensures that file is not overwritten.
|
||||
//
|
||||
const data = @embedFile("testdata/overwrite_file.tar");
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(data);
|
||||
var r: std.io.Reader = .fixed(data);
|
||||
|
||||
// Unpack with strip_components = 1 should fail
|
||||
var root = std.testing.tmpDir(.{});
|
||||
defer root.cleanup();
|
||||
try testing.expectError(
|
||||
error.PathAlreadyExists,
|
||||
tar.pipeToFileSystem(root.dir, &br, .{ .mode_mode = .ignore, .strip_components = 1 }),
|
||||
tar.pipeToFileSystem(root.dir, &r, .{ .mode_mode = .ignore, .strip_components = 1 }),
|
||||
);
|
||||
|
||||
// Unpack with strip_components = 0 should pass
|
||||
br.initFixed(data);
|
||||
r = .fixed(data);
|
||||
var root2 = std.testing.tmpDir(.{});
|
||||
defer root2.cleanup();
|
||||
try tar.pipeToFileSystem(root2.dir, &br, .{ .mode_mode = .ignore, .strip_components = 0 });
|
||||
try tar.pipeToFileSystem(root2.dir, &r, .{ .mode_mode = .ignore, .strip_components = 0 });
|
||||
}
|
||||
|
||||
test "case sensitivity" {
|
||||
@ -498,13 +494,12 @@ test "case sensitivity" {
|
||||
// 18089/alacritty/Darkermatrix.yml
|
||||
//
|
||||
const data = @embedFile("testdata/18089.tar");
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(data);
|
||||
var r: std.io.Reader = .fixed(data);
|
||||
|
||||
var root = std.testing.tmpDir(.{});
|
||||
defer root.cleanup();
|
||||
|
||||
tar.pipeToFileSystem(root.dir, &br, .{ .mode_mode = .ignore, .strip_components = 1 }) catch |err| {
|
||||
tar.pipeToFileSystem(root.dir, &r, .{ .mode_mode = .ignore, .strip_components = 1 }) catch |err| {
|
||||
// on case insensitive fs we fail on overwrite existing file
|
||||
try testing.expectEqual(error.PathAlreadyExists, err);
|
||||
return;
|
||||
|
||||
@ -54,7 +54,7 @@ pub const Tz = struct {
|
||||
},
|
||||
};
|
||||
|
||||
pub fn parse(allocator: std.mem.Allocator, reader: *std.io.BufferedReader) !Tz {
|
||||
pub fn parse(allocator: std.mem.Allocator, reader: *std.io.Reader) !Tz {
|
||||
var legacy_header = try reader.takeStruct(Header);
|
||||
if (!std.mem.eql(u8, &legacy_header.magic, "TZif")) return error.BadHeader;
|
||||
if (legacy_header.version != 0 and legacy_header.version != '2' and legacy_header.version != '3') return error.BadVersion;
|
||||
@ -215,7 +215,7 @@ pub const Tz = struct {
|
||||
|
||||
test "slim" {
|
||||
const data = @embedFile("tz/asia_tokyo.tzif");
|
||||
var in_stream: std.io.BufferedReader = undefined;
|
||||
var in_stream: std.io.Reader = undefined;
|
||||
in_stream.initFixed(data);
|
||||
|
||||
var tz = try std.Tz.parse(std.testing.allocator, &in_stream);
|
||||
@ -229,7 +229,7 @@ test "slim" {
|
||||
|
||||
test "fat" {
|
||||
const data = @embedFile("tz/antarctica_davis.tzif");
|
||||
var in_stream: std.io.BufferedReader = undefined;
|
||||
var in_stream: std.io.Reader = undefined;
|
||||
in_stream.initFixed(data);
|
||||
|
||||
var tz = try std.Tz.parse(std.testing.allocator, &in_stream);
|
||||
@ -243,7 +243,7 @@ test "fat" {
|
||||
test "legacy" {
|
||||
// Taken from Slackware 8.0, from 2001
|
||||
const data = @embedFile("tz/europe_vatican.tzif");
|
||||
var in_stream: std.io.BufferedReader = undefined;
|
||||
var in_stream: std.io.Reader = undefined;
|
||||
in_stream.initFixed(data);
|
||||
|
||||
var tz = try std.Tz.parse(std.testing.allocator, &in_stream);
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
in: *std.io.BufferedReader,
|
||||
in: *std.io.Reader,
|
||||
out: *std.io.BufferedWriter,
|
||||
|
||||
pub const Message = struct {
|
||||
@ -93,7 +93,7 @@ pub const Message = struct {
|
||||
};
|
||||
|
||||
pub const Options = struct {
|
||||
in: *std.io.BufferedReader,
|
||||
in: *std.io.Reader,
|
||||
out: *std.io.BufferedWriter,
|
||||
zig_version: []const u8,
|
||||
};
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
allocator: std.mem.Allocator,
|
||||
record_arena: std.heap.ArenaAllocator.State,
|
||||
br: *std.io.BufferedReader,
|
||||
reader: *std.io.Reader,
|
||||
keep_names: bool,
|
||||
bit_buffer: u32,
|
||||
bit_offset: u5,
|
||||
@ -93,14 +93,14 @@ pub const Record = struct {
|
||||
};
|
||||
|
||||
pub const InitOptions = struct {
|
||||
br: *std.io.BufferedReader,
|
||||
reader: *std.io.Reader,
|
||||
keep_names: bool = false,
|
||||
};
|
||||
pub fn init(allocator: std.mem.Allocator, options: InitOptions) BitcodeReader {
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
.record_arena = .{},
|
||||
.br = options.br,
|
||||
.reader = options.reader,
|
||||
.keep_names = options.keep_names,
|
||||
.bit_buffer = 0,
|
||||
.bit_offset = 0,
|
||||
@ -172,7 +172,7 @@ pub fn next(bc: *BitcodeReader) !?Item {
|
||||
|
||||
pub fn skipBlock(bc: *BitcodeReader, block: Block) !void {
|
||||
assert(bc.bit_offset == 0);
|
||||
try bc.br.discard(4 * @as(u34, block.len));
|
||||
try bc.reader.discard(4 * @as(u34, block.len));
|
||||
try bc.endBlock();
|
||||
}
|
||||
|
||||
@ -371,17 +371,17 @@ fn align32Bits(bc: *BitcodeReader) void {
|
||||
|
||||
fn read32Bits(bc: *BitcodeReader) !u32 {
|
||||
assert(bc.bit_offset == 0);
|
||||
return bc.br.takeInt(u32, .little);
|
||||
return bc.reader.takeInt(u32, .little);
|
||||
}
|
||||
|
||||
fn readBytes(bc: *BitcodeReader, bytes: []u8) !void {
|
||||
assert(bc.bit_offset == 0);
|
||||
try bc.br.read(bytes);
|
||||
try bc.reader.read(bytes);
|
||||
|
||||
const trailing_bytes = bytes.len % 4;
|
||||
if (trailing_bytes > 0) {
|
||||
var bit_buffer: [4]u8 = @splat(0);
|
||||
try bc.br.read(bit_buffer[trailing_bytes..]);
|
||||
try bc.reader.read(bit_buffer[trailing_bytes..]);
|
||||
bc.bit_buffer = std.mem.readInt(u32, &bit_buffer, .little);
|
||||
bc.bit_offset = @intCast(8 * trailing_bytes);
|
||||
}
|
||||
|
||||
@ -342,9 +342,8 @@ fn testParser(
|
||||
expected_model: *const Target.Cpu.Model,
|
||||
input: []const u8,
|
||||
) !void {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(@constCast(input));
|
||||
const result = try parser.parse(arch, &br);
|
||||
var r: std.io.Reader = .fixed(input);
|
||||
const result = try parser.parse(arch, &r);
|
||||
try testing.expectEqual(expected_model, result.?.model);
|
||||
try testing.expect(expected_model.features.eql(result.?.features));
|
||||
}
|
||||
|
||||
@ -161,14 +161,14 @@ pub const EndRecord = extern struct {
|
||||
|
||||
pub const Decompress = union {
|
||||
inflate: std.compress.flate.Decompress,
|
||||
store: *std.io.BufferedReader,
|
||||
store: *std.io.Reader,
|
||||
|
||||
fn readable(
|
||||
d: *Decompress,
|
||||
reader: *std.io.BufferedReader,
|
||||
reader: *std.io.Reader,
|
||||
method: CompressionMethod,
|
||||
buffer: []u8,
|
||||
) std.io.BufferedReader {
|
||||
) std.io.Reader {
|
||||
switch (method) {
|
||||
.store => {
|
||||
d.* = .{ .store = reader };
|
||||
|
||||
@ -51,7 +51,7 @@ const FileStore = struct {
|
||||
uncompressed_size: usize,
|
||||
};
|
||||
|
||||
fn makeZip(file_writer: *std.fs.File.Writer, files: []const File, options: WriteZipOptions) !std.io.BufferedReader {
|
||||
fn makeZip(file_writer: *std.fs.File.Writer, files: []const File, options: WriteZipOptions) !std.io.Reader {
|
||||
const store = try std.testing.allocator.alloc(FileStore, files.len);
|
||||
defer std.testing.allocator.free(store);
|
||||
return makeZipWithStore(file_writer, files, options, store);
|
||||
@ -198,7 +198,7 @@ const Zipper = struct {
|
||||
},
|
||||
.deflate => {
|
||||
const offset = writer.count;
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(@constCast(opt.content));
|
||||
var compress: std.compress.flate.Compress = .init(&br, .{});
|
||||
var compress_br = compress.readable(&.{});
|
||||
|
||||
@ -1076,9 +1076,8 @@ pub const CObject = struct {
|
||||
var buffer: [1024]u8 = undefined;
|
||||
const file = try std.fs.cwd().openFile(path, .{});
|
||||
defer file.close();
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.init(file.reader(), &buffer);
|
||||
var bc = std.zig.llvm.BitcodeReader.init(gpa, .{ .br = &br });
|
||||
var file_reader = file.reader(&buffer);
|
||||
var bc = std.zig.llvm.BitcodeReader.init(gpa, .{ .reader = &file_reader.interface });
|
||||
defer bc.deinit();
|
||||
|
||||
var file_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .empty;
|
||||
|
||||
@ -79,7 +79,7 @@ pub const Oid = union(Format) {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn readBytes(oid_format: Format, reader: *std.io.BufferedReader) std.io.Reader.Error!Oid {
|
||||
pub fn readBytes(oid_format: Format, reader: *std.io.Reader) std.io.Reader.Error!Oid {
|
||||
return switch (oid_format) {
|
||||
.sha1 => {
|
||||
var result: Oid = .{ .sha1 = undefined };
|
||||
@ -593,7 +593,7 @@ const Packet = union(enum) {
|
||||
const max_data_length = 65516;
|
||||
|
||||
/// Reads a packet in pkt-line format.
|
||||
fn read(reader: *std.io.BufferedReader, buf: *[max_data_length]u8) !Packet {
|
||||
fn read(reader: *std.io.Reader, buf: *[max_data_length]u8) !Packet {
|
||||
const length = std.fmt.parseUnsigned(u16, &try reader.readBytesNoEof(4), 16) catch return error.InvalidPacket;
|
||||
switch (length) {
|
||||
0 => return .flush,
|
||||
@ -1107,7 +1107,7 @@ const PackHeader = struct {
|
||||
const signature = "PACK";
|
||||
const supported_version = 2;
|
||||
|
||||
fn read(reader: *std.io.BufferedReader) !PackHeader {
|
||||
fn read(reader: *std.io.Reader) !PackHeader {
|
||||
const actual_signature = try reader.take(4);
|
||||
if (!mem.eql(u8, actual_signature, signature)) return error.InvalidHeader;
|
||||
const version = try reader.takeInt(u32, .big);
|
||||
@ -1161,7 +1161,7 @@ const EntryHeader = union(Type) {
|
||||
};
|
||||
}
|
||||
|
||||
fn read(format: Oid.Format, reader: *std.io.BufferedReader) !EntryHeader {
|
||||
fn read(format: Oid.Format, reader: *std.io.Reader) !EntryHeader {
|
||||
const InitialByte = packed struct { len: u4, type: u3, has_next: bool };
|
||||
const initial: InitialByte = @bitCast(try reader.takeByte());
|
||||
const rest_len = if (initial.has_next) try readSizeVarInt(reader) else 0;
|
||||
@ -1187,7 +1187,7 @@ const EntryHeader = union(Type) {
|
||||
}
|
||||
};
|
||||
|
||||
fn readSizeVarInt(r: *std.io.BufferedReader) !u64 {
|
||||
fn readSizeVarInt(r: *std.io.Reader) !u64 {
|
||||
const Byte = packed struct { value: u7, has_next: bool };
|
||||
var b: Byte = @bitCast(try r.takeByte());
|
||||
var value: u64 = b.value;
|
||||
@ -1200,7 +1200,7 @@ fn readSizeVarInt(r: *std.io.BufferedReader) !u64 {
|
||||
return value;
|
||||
}
|
||||
|
||||
fn readOffsetVarInt(r: *std.io.BufferedReader) !u64 {
|
||||
fn readOffsetVarInt(r: *std.io.Reader) !u64 {
|
||||
const Byte = packed struct { value: u7, has_next: bool };
|
||||
var b: Byte = @bitCast(try r.takeByte());
|
||||
var value: u64 = b.value;
|
||||
@ -1219,7 +1219,7 @@ const IndexHeader = struct {
|
||||
const supported_version = 2;
|
||||
const size = 4 + 4 + @sizeOf([256]u32);
|
||||
|
||||
fn read(index_header: *IndexHeader, br: *std.io.BufferedReader) !void {
|
||||
fn read(index_header: *IndexHeader, br: *std.io.Reader) !void {
|
||||
const sig = try br.take(4);
|
||||
if (!mem.eql(u8, sig, signature)) return error.InvalidHeader;
|
||||
const version = try br.takeInt(u32, .big);
|
||||
@ -1493,7 +1493,7 @@ fn resolveDeltaChain(
|
||||
}
|
||||
|
||||
/// Reads the complete contents of an object from `reader`.
|
||||
fn readObjectRaw(gpa: Allocator, reader: *std.io.BufferedReader, size: u64) ![]u8 {
|
||||
fn readObjectRaw(gpa: Allocator, reader: *std.io.Reader, size: u64) ![]u8 {
|
||||
const alloc_size = std.math.cast(usize, size) orelse return error.ObjectTooLarge;
|
||||
var decompress: zlib.Decompressor = .init(reader);
|
||||
var buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
@ -1505,7 +1505,7 @@ fn readObjectRaw(gpa: Allocator, reader: *std.io.BufferedReader, size: u64) ![]u
|
||||
|
||||
/// The format of the delta data is documented in
|
||||
/// [pack-format](https://git-scm.com/docs/pack-format).
|
||||
fn expandDelta(base_object: []const u8, delta_reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) !void {
|
||||
fn expandDelta(base_object: []const u8, delta_reader: *std.io.Reader, writer: *std.io.BufferedWriter) !void {
|
||||
var base_offset: u32 = 0;
|
||||
while (true) {
|
||||
const inst: packed struct { value: u7, copy: bool } = @bitCast(delta_reader.takeByte() catch |e| switch (e) {
|
||||
|
||||
@ -2828,7 +2828,7 @@ pub fn loadZirCache(gpa: Allocator, cache_file: std.fs.File) !Zir {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_br: *std.io.BufferedReader) !Zir {
|
||||
pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_br: *std.io.Reader) !Zir {
|
||||
var instructions: std.MultiArrayList(Zir.Inst) = .{};
|
||||
errdefer instructions.deinit(gpa);
|
||||
|
||||
@ -2947,7 +2947,7 @@ pub fn saveZoirCache(cache_file: std.fs.File, stat: std.fs.File.Stat, zoir: Zoir
|
||||
};
|
||||
}
|
||||
|
||||
pub fn loadZoirCacheBody(gpa: Allocator, header: Zoir.Header, cache_br: *std.io.BufferedReader) !Zoir {
|
||||
pub fn loadZoirCacheBody(gpa: Allocator, header: Zoir.Header, cache_br: *std.io.Reader) !Zoir {
|
||||
var zoir: Zoir = .{
|
||||
.nodes = .empty,
|
||||
.extra = &.{},
|
||||
|
||||
@ -372,7 +372,7 @@ fn parseGpRegister(low_enc: u3, is_extended: bool, rex: Rex, bit_size: u64) Regi
|
||||
}
|
||||
|
||||
fn parseImm(dis: *Disassembler, kind: Encoding.Op) !Immediate {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(dis.code[dis.pos..]);
|
||||
defer dis.pos += br.seek;
|
||||
return switch (kind) {
|
||||
@ -388,7 +388,7 @@ fn parseImm(dis: *Disassembler, kind: Encoding.Op) !Immediate {
|
||||
}
|
||||
|
||||
fn parseOffset(dis: *Disassembler) !u64 {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(dis.code[dis.pos..]);
|
||||
defer dis.pos += br.seek;
|
||||
return br.takeInt(u64, .little);
|
||||
@ -464,8 +464,7 @@ fn parseSibByte(dis: *Disassembler) !Sib {
|
||||
}
|
||||
|
||||
fn parseDisplacement(dis: *Disassembler, modrm: ModRm, sib: ?Sib) !i32 {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(dis.code[dis.pos..]);
|
||||
var br: std.io.Reader = .fixed(dis.code[dis.pos..]);
|
||||
defer dis.pos += br.seek;
|
||||
if (sib) |info| {
|
||||
if (info.base == 0b101 and modrm.mod == 0) {
|
||||
|
||||
@ -793,8 +793,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
|
||||
// twice, which causes a "duplicate symbol" assembler error.
|
||||
var versions_written = std.AutoArrayHashMap(Version, void).init(arena);
|
||||
|
||||
var inc_br: std.io.BufferedReader = undefined;
|
||||
inc_br.initFixed(metadata.inclusions);
|
||||
var inc_br: std.io.Reader = .fixed(metadata.inclusions);
|
||||
|
||||
const fn_inclusions_len = try inc_br.takeInt(u16, .little);
|
||||
|
||||
|
||||
@ -142,8 +142,7 @@ const DebugInfo = struct {
|
||||
&abbrev_code_buf,
|
||||
debug_info.section.off(dwarf) + unit_ptr.off + unit_ptr.header_len + entry_ptr.off,
|
||||
) != abbrev_code_buf.len) return error.InputOutput;
|
||||
var abbrev_code_br: std.io.BufferedReader = undefined;
|
||||
abbrev_code_br.initFixed(&abbrev_code_buf);
|
||||
var abbrev_code_br: std.io.Reader = .fixed(&abbrev_code_buf);
|
||||
return @enumFromInt(abbrev_code_br.takeLeb128(@typeInfo(AbbrevCode).@"enum".tag_type) catch unreachable);
|
||||
}
|
||||
|
||||
@ -2757,7 +2756,7 @@ fn finishWipNavFuncInner(
|
||||
try dibw.writeLeb128(@intFromEnum(AbbrevCode.null));
|
||||
} else {
|
||||
const abbrev_code_buf = wip_nav.debug_info.getWritten()[0..AbbrevCode.decl_bytes];
|
||||
var abbrev_code_br: std.io.BufferedReader = undefined;
|
||||
var abbrev_code_br: std.io.Reader = undefined;
|
||||
abbrev_code_br.initFixed(abbrev_code_buf);
|
||||
const abbrev_code: AbbrevCode = @enumFromInt(abbrev_code_br.takeLeb128(@typeInfo(AbbrevCode).@"enum".tag_type) catch unreachable);
|
||||
std.leb.writeUnsignedFixed(
|
||||
|
||||
@ -1192,18 +1192,17 @@ pub fn codeDecompressAlloc(self: *Object, elf_file: *Elf, atom_index: Atom.Index
|
||||
const atom_ptr = self.atom(atom_index).?;
|
||||
const shdr = atom_ptr.inputShdr(elf_file);
|
||||
const handle = elf_file.fileHandle(self.file_handle);
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(try self.preadShdrContentsAlloc(gpa, handle, atom_ptr.input_section_index));
|
||||
defer if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) gpa.free(br.storageBuffer());
|
||||
var r: std.io.Reader = .fixed(try self.preadShdrContentsAlloc(gpa, handle, atom_ptr.input_section_index));
|
||||
defer if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) gpa.free(r.storageBuffer());
|
||||
|
||||
if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) {
|
||||
const chdr = (try br.takeStruct(elf.Elf64_Chdr)).*;
|
||||
const chdr = (try r.takeStruct(elf.Elf64_Chdr)).*;
|
||||
switch (chdr.ch_type) {
|
||||
.ZLIB => {
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(try gpa.alloc(u8, std.math.cast(usize, chdr.ch_size) orelse return error.Overflow));
|
||||
errdefer gpa.free(bw.buffer);
|
||||
try std.compress.zlib.decompress(&br, &bw);
|
||||
try std.compress.zlib.decompress(&r, &bw);
|
||||
if (bw.end != bw.buffer.len) return error.InputOutput;
|
||||
return bw.buffer;
|
||||
},
|
||||
@ -1211,7 +1210,7 @@ pub fn codeDecompressAlloc(self: *Object, elf_file: *Elf, atom_index: Atom.Index
|
||||
}
|
||||
}
|
||||
|
||||
return br.storageBuffer();
|
||||
return r.storageBuffer();
|
||||
}
|
||||
|
||||
fn locals(self: *Object) []Symbol {
|
||||
|
||||
@ -187,7 +187,7 @@ pub const Cie = struct {
|
||||
};
|
||||
|
||||
pub const Iterator = struct {
|
||||
br: std.io.BufferedReader,
|
||||
reader: std.io.Reader,
|
||||
|
||||
pub const Record = struct {
|
||||
tag: enum { fde, cie },
|
||||
@ -196,18 +196,18 @@ pub const Iterator = struct {
|
||||
};
|
||||
|
||||
pub fn next(it: *Iterator) !?Record {
|
||||
if (it.br.seek >= it.br.storageBuffer().len) return null;
|
||||
if (it.reader.seek >= it.reader.storageBuffer().len) return null;
|
||||
|
||||
const size = try it.br.takeInt(u32, .little);
|
||||
const size = try it.reader.takeInt(u32, .little);
|
||||
if (size == 0xFFFFFFFF) @panic("DWARF CFI is 32bit on macOS");
|
||||
|
||||
const id = try it.br.takeInt(u32, .little);
|
||||
const id = try it.reader.takeInt(u32, .little);
|
||||
const record: Record = .{
|
||||
.tag = if (id == 0) .cie else .fde,
|
||||
.offset = it.br.seek,
|
||||
.offset = it.reader.seek,
|
||||
.size = size,
|
||||
};
|
||||
try it.br.discard(size);
|
||||
try it.reader.discard(size);
|
||||
|
||||
return record;
|
||||
}
|
||||
|
||||
@ -273,10 +273,9 @@ pub const InfoReader = struct {
|
||||
}
|
||||
|
||||
pub fn readLeb128(p: *InfoReader, comptime Type: type) !Type {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(p.bytes()[p.pos..]);
|
||||
defer p.pos += br.seek;
|
||||
return br.takeLeb128(Type);
|
||||
var r: std.io.Reader = .fixed(p.bytes()[p.pos..]);
|
||||
defer p.pos += r.seek;
|
||||
return r.takeLeb128(Type);
|
||||
}
|
||||
|
||||
pub fn seekTo(p: *InfoReader, off: u64) !void {
|
||||
@ -331,10 +330,9 @@ pub const AbbrevReader = struct {
|
||||
}
|
||||
|
||||
pub fn readLeb128(p: *AbbrevReader, comptime Type: type) !Type {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(p.bytes()[p.pos..]);
|
||||
defer p.pos += br.seek;
|
||||
return br.takeLeb128(Type);
|
||||
var r: std.io.Reader = .fixed(p.bytes()[p.pos..]);
|
||||
defer p.pos += r.seek;
|
||||
return r.takeLeb128(Type);
|
||||
}
|
||||
|
||||
pub fn seekTo(p: *AbbrevReader, off: u64) !void {
|
||||
|
||||
@ -167,7 +167,7 @@ pub fn addExport(self: *Dylib, allocator: Allocator, name: []const u8, flags: Ex
|
||||
|
||||
fn parseTrieNode(
|
||||
self: *Dylib,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
allocator: Allocator,
|
||||
arena: Allocator,
|
||||
prefix: []const u8,
|
||||
@ -216,9 +216,8 @@ fn parseTrie(self: *Dylib, data: []const u8, macho_file: *MachO) !void {
|
||||
var arena = std.heap.ArenaAllocator.init(gpa);
|
||||
defer arena.deinit();
|
||||
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(data);
|
||||
try self.parseTrieNode(&br, gpa, arena.allocator(), "");
|
||||
var r: std.io.Reader = .fixed(data);
|
||||
try self.parseTrieNode(&r, gpa, arena.allocator(), "");
|
||||
}
|
||||
|
||||
fn parseTbd(self: *Dylib, macho_file: *MachO) !void {
|
||||
|
||||
@ -12,34 +12,33 @@ pub const Cie = struct {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(cie.getData(macho_file));
|
||||
var r: std.io.Reader = .fixed(cie.getData(macho_file));
|
||||
|
||||
try br.discard(9);
|
||||
const aug = try br.takeSentinel(0);
|
||||
try r.discard(9);
|
||||
const aug = try r.takeSentinel(0);
|
||||
if (aug[0] != 'z') return; // TODO should we error out?
|
||||
|
||||
_ = try br.takeLeb128(u64); // code alignment factor
|
||||
_ = try br.takeLeb128(u64); // data alignment factor
|
||||
_ = try br.takeLeb128(u64); // return address register
|
||||
_ = try br.takeLeb128(u64); // augmentation data length
|
||||
_ = try r.takeLeb128(u64); // code alignment factor
|
||||
_ = try r.takeLeb128(u64); // data alignment factor
|
||||
_ = try r.takeLeb128(u64); // return address register
|
||||
_ = try r.takeLeb128(u64); // augmentation data length
|
||||
|
||||
for (aug[1..]) |ch| switch (ch) {
|
||||
'R' => {
|
||||
const enc = try br.takeByte();
|
||||
const enc = try r.takeByte();
|
||||
if (enc != DW_EH_PE.pcrel | DW_EH_PE.absptr) {
|
||||
@panic("unexpected pointer encoding"); // TODO error
|
||||
}
|
||||
},
|
||||
'P' => {
|
||||
const enc = try br.takeByte();
|
||||
const enc = try r.takeByte();
|
||||
if (enc != DW_EH_PE.pcrel | DW_EH_PE.indirect | DW_EH_PE.sdata4) {
|
||||
@panic("unexpected personality pointer encoding"); // TODO error
|
||||
}
|
||||
_ = try br.takeInt(u32, .little); // personality pointer
|
||||
_ = try r.takeInt(u32, .little); // personality pointer
|
||||
},
|
||||
'L' => {
|
||||
const enc = try br.takeByte();
|
||||
const enc = try r.takeByte();
|
||||
switch (enc & DW_EH_PE.type_mask) {
|
||||
DW_EH_PE.sdata4 => cie.lsda_size = .p32,
|
||||
DW_EH_PE.absptr => cie.lsda_size = .p64,
|
||||
@ -143,7 +142,7 @@ pub const Fde = struct {
|
||||
const object = fde.getObject(macho_file);
|
||||
const sect = object.sections.items(.header)[object.eh_frame_sect_index.?];
|
||||
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(fde.getData(macho_file));
|
||||
|
||||
try br.discard(4);
|
||||
@ -267,7 +266,7 @@ pub const Fde = struct {
|
||||
};
|
||||
|
||||
pub const Iterator = struct {
|
||||
br: std.io.BufferedReader,
|
||||
br: std.io.Reader,
|
||||
|
||||
pub const Record = struct {
|
||||
tag: enum { fde, cie },
|
||||
|
||||
@ -2087,10 +2087,9 @@ pub const Expr = enum(u32) {
|
||||
pub const end = @intFromEnum(std.wasm.Opcode.end);
|
||||
|
||||
pub fn slice(index: Expr, wasm: *const Wasm) [:end]const u8 {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(wasm.string_bytes.items[@intFromEnum(index)..]);
|
||||
Object.skipInit(&br) catch unreachable;
|
||||
return br.storageBuffer()[0 .. br.seek - 1 :end];
|
||||
var r: std.io.Reader = .fixed(wasm.string_bytes.items[@intFromEnum(index)..]);
|
||||
Object.skipInit(&r) catch unreachable;
|
||||
return r.storageBuffer()[0 .. r.seek - 1 :end];
|
||||
}
|
||||
};
|
||||
|
||||
@ -3038,7 +3037,7 @@ fn parseObject(wasm: *Wasm, obj: link.Input.Object) !void {
|
||||
const stat = try obj.file.stat();
|
||||
const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig;
|
||||
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
var br: std.io.Reader = undefined;
|
||||
br.initFixed(try gpa.alloc(u8, size));
|
||||
defer gpa.free(br.storageBuffer());
|
||||
|
||||
|
||||
@ -167,10 +167,8 @@ pub fn parseObject(
|
||||
};
|
||||
|
||||
const object_file_size = try header.parsedSize();
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(file_contents[object_offset + @sizeOf(Header) ..][0..object_file_size]);
|
||||
|
||||
return Object.parse(wasm, &br, path, object_name, host_name, scratch_space, must_link, gc_sections);
|
||||
var r: std.io.Reader = .fixed(file_contents[object_offset + @sizeOf(Header) ..][0..object_file_size]);
|
||||
return Object.parse(wasm, &r, path, object_name, host_name, scratch_space, must_link, gc_sections);
|
||||
}
|
||||
|
||||
const Archive = @This();
|
||||
|
||||
@ -252,7 +252,7 @@ pub const ScratchSpace = struct {
|
||||
|
||||
pub fn parse(
|
||||
wasm: *Wasm,
|
||||
br: *std.io.BufferedReader,
|
||||
br: *std.io.Reader,
|
||||
path: Path,
|
||||
archive_member_name: ?[]const u8,
|
||||
host_name: Wasm.OptionalString,
|
||||
@ -1402,7 +1402,7 @@ pub fn parse(
|
||||
/// Based on the "features" custom section, parses it into a list of
|
||||
/// features that tell the linker what features were enabled and may be mandatory
|
||||
/// to be able to link.
|
||||
fn parseFeatures(wasm: *Wasm, br: *std.io.BufferedReader, path: Path) error{ OutOfMemory, LinkFailure }!Wasm.Feature.Set {
|
||||
fn parseFeatures(wasm: *Wasm, br: *std.io.Reader, path: Path) error{ OutOfMemory, LinkFailure }!Wasm.Feature.Set {
|
||||
const gpa = wasm.base.comp.gpa;
|
||||
const diags = &wasm.base.comp.link_diags;
|
||||
const features_len = try br.takeLeb128(u32);
|
||||
@ -1430,7 +1430,7 @@ fn parseFeatures(wasm: *Wasm, br: *std.io.BufferedReader, path: Path) error{ Out
|
||||
return .fromString(try wasm.internString(@ptrCast(feature_buffer)));
|
||||
}
|
||||
|
||||
fn readLimits(br: *std.io.BufferedReader) std.io.Reader.Error!std.wasm.Limits {
|
||||
fn readLimits(br: *std.io.Reader) std.io.Reader.Error!std.wasm.Limits {
|
||||
const flags: std.wasm.Limits.Flags = @bitCast(try br.takeByte());
|
||||
const min = try br.takeLeb128(u32);
|
||||
const max = if (flags.has_max) try br.takeLeb128(u32) else 0;
|
||||
@ -1441,13 +1441,13 @@ fn readLimits(br: *std.io.BufferedReader) std.io.Reader.Error!std.wasm.Limits {
|
||||
};
|
||||
}
|
||||
|
||||
fn readInit(wasm: *Wasm, br: *std.io.BufferedReader) std.io.Reader.Error!Wasm.Expr {
|
||||
fn readInit(wasm: *Wasm, br: *std.io.Reader) std.io.Reader.Error!Wasm.Expr {
|
||||
const start = br.seek;
|
||||
try skipInit(br); // one after the end opcode
|
||||
return wasm.addExpr(br.storageBuffer()[start..br.seek]);
|
||||
}
|
||||
|
||||
pub fn skipInit(br: *std.io.BufferedReader) std.io.Reader.Error!void {
|
||||
pub fn skipInit(br: *std.io.Reader) std.io.Reader.Error!void {
|
||||
switch (try br.takeEnum(std.wasm.Opcode, .little)) {
|
||||
.i32_const => _ = try br.takeLeb128(i32),
|
||||
.i64_const => _ = try br.takeLeb128(i64),
|
||||
|
||||
@ -13,9 +13,8 @@ pub fn writeSetSubUleb(comptime op: enum { set, sub }, addend: i64, bw: *std.io.
|
||||
switch (op) {
|
||||
.set => try overwriteUleb(@intCast(addend), bw),
|
||||
.sub => {
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(try bw.writableArray(1));
|
||||
const old_value = try br.takeLeb128(u64);
|
||||
var r: std.io.Reader = .fixed(try bw.writableArray(1));
|
||||
const old_value = try r.takeLeb128(u64);
|
||||
try overwriteUleb(old_value -% @as(u64, @intCast(addend)), bw);
|
||||
},
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user