mirror of
https://github.com/ziglang/zig.git
synced 2026-02-21 00:35:10 +00:00
std: fix some compilation errors
This commit is contained in:
parent
bc9db143c9
commit
c9915e949e
@ -2802,8 +2802,8 @@ pub fn dumpBadGetPathHelp(
|
||||
src_builder: *Build,
|
||||
asking_step: ?*Step,
|
||||
) anyerror!void {
|
||||
var fw = stderr.writer();
|
||||
var bw = fw.interface().unbuffered();
|
||||
var fw = stderr.writer(&.{});
|
||||
const bw = &fw.interface;
|
||||
try bw.print(
|
||||
\\getPath() was called on a GeneratedFile that wasn't built yet.
|
||||
\\ source package path: {s}
|
||||
|
||||
@ -661,9 +661,9 @@ pub const Manifest = struct {
|
||||
} {
|
||||
const gpa = self.cache.gpa;
|
||||
const input_file_count = self.files.entries.len;
|
||||
var manifest_reader = self.manifest_file.?.reader(); // Reads positionally from zero.
|
||||
var manifest_reader = self.manifest_file.?.reader(&.{}); // Reads positionally from zero.
|
||||
const limit: std.io.Limit = .limited(manifest_file_size_max);
|
||||
const file_contents = manifest_reader.interface().allocRemaining(gpa, limit) catch |err| switch (err) {
|
||||
const file_contents = manifest_reader.interface.allocRemaining(gpa, limit) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.StreamTooLong => return error.OutOfMemory,
|
||||
error.ReadFailed => {
|
||||
|
||||
@ -287,8 +287,8 @@ pub fn cast(step: *Step, comptime T: type) ?*T {
|
||||
|
||||
/// For debugging purposes, prints identifying information about this Step.
|
||||
pub fn dump(step: *Step, file: std.fs.File) void {
|
||||
var fw = file.writer();
|
||||
var bw = fw.interface().unbuffered();
|
||||
var fw = file.writer(&.{});
|
||||
const bw = &fw.interface;
|
||||
const tty_config = std.io.tty.detectConfig(file);
|
||||
const debug_info = std.debug.getSelfDebugInfo() catch |err| {
|
||||
bw.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{
|
||||
|
||||
@ -753,17 +753,17 @@ fn testReplaceVariablesAutoconfAt(
|
||||
expected: []const u8,
|
||||
values: std.StringArrayHashMap(Value),
|
||||
) !void {
|
||||
var output: std.ArrayList(u8) = .init(allocator);
|
||||
var output: std.io.Writer.Allocating = .init(allocator);
|
||||
defer output.deinit();
|
||||
|
||||
const used = try allocator.alloc(bool, values.count());
|
||||
for (used) |*u| u.* = false;
|
||||
defer allocator.free(used);
|
||||
|
||||
try expand_variables_autoconf_at(&output, contents, values, used);
|
||||
try expand_variables_autoconf_at(&output.interface, contents, values, used);
|
||||
|
||||
for (used) |u| if (!u) return error.UnusedValue;
|
||||
try std.testing.expectEqualStrings(expected, output.items);
|
||||
try std.testing.expectEqualStrings(expected, output.getWritten());
|
||||
}
|
||||
|
||||
fn testReplaceVariablesCMake(
|
||||
|
||||
@ -667,5 +667,5 @@ test Options {
|
||||
\\
|
||||
, options.contents.items);
|
||||
|
||||
_ = try std.zig.Ast.parse(arena.allocator(), try options.contents.toOwnedSliceSentinel(0), .zig);
|
||||
_ = try std.zig.Ast.parse(arena.allocator(), try options.contents.toOwnedSliceSentinel(arena.allocator(), 0), .zig);
|
||||
}
|
||||
|
||||
@ -163,11 +163,12 @@ pub fn setName(self: Thread, name: []const u8) SetNameError!void {
|
||||
} else {
|
||||
var buf: [32]u8 = undefined;
|
||||
const path = try std.fmt.bufPrint(&buf, "/proc/self/task/{d}/comm", .{self.getHandle()});
|
||||
|
||||
const file = try std.fs.cwd().openFile(path, .{ .mode = .write_only });
|
||||
defer file.close();
|
||||
|
||||
try file.writer().writeAll(name);
|
||||
var fw = file.writer(&.{});
|
||||
fw.interface.writeAll(name) catch |err| switch (err) {
|
||||
error.WriteFailed => return fw.err.?,
|
||||
};
|
||||
return;
|
||||
},
|
||||
.windows => {
|
||||
@ -277,13 +278,13 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co
|
||||
} else {
|
||||
var buf: [32]u8 = undefined;
|
||||
const path = try std.fmt.bufPrint(&buf, "/proc/self/task/{d}/comm", .{self.getHandle()});
|
||||
|
||||
const file = try std.fs.cwd().openFile(path, .{});
|
||||
defer file.close();
|
||||
|
||||
const data_len = try file.reader().readAll(buffer_ptr[0 .. max_name_len + 1]);
|
||||
|
||||
return if (data_len >= 1) buffer[0 .. data_len - 1] else null;
|
||||
var fr = file.reader(&.{});
|
||||
const n = fr.interface.readSliceShort(buffer_ptr[0 .. max_name_len + 1]) catch |err| switch (err) {
|
||||
error.ReadFailed => return fr.err.?,
|
||||
};
|
||||
return if (n == 0) null else buffer[0 .. n - 1];
|
||||
},
|
||||
.windows => {
|
||||
const buf_capacity = @sizeOf(windows.UNICODE_STRING) + (@sizeOf(u16) * max_name_len);
|
||||
|
||||
@ -232,16 +232,15 @@ test "compress/decompress" {
|
||||
{
|
||||
var original: std.io.Reader = .fixed(data);
|
||||
var compressed: Writer = .fixed(&cmp_buf);
|
||||
var compress: Compress = .init(&original, .raw);
|
||||
var compress_br = compress.readable(&.{});
|
||||
const n = try compress_br.readRemaining(&compressed, .{ .level = level });
|
||||
var compress: Compress = .init(&original, &.{}, .{ .container = .raw, .level = level });
|
||||
const n = try compress.reader.streamRemaining(&compressed);
|
||||
if (compressed_size == 0) {
|
||||
if (container == .gzip)
|
||||
print("case {d} gzip level {} compressed size: {d}\n", .{ case_no, level, compressed.pos });
|
||||
compressed_size = compressed.pos;
|
||||
compressed_size = compressed.end;
|
||||
}
|
||||
try testing.expectEqual(compressed_size, n);
|
||||
try testing.expectEqual(compressed_size, compressed.pos);
|
||||
try testing.expectEqual(compressed_size, compressed.end);
|
||||
}
|
||||
// decompress compressed stream to decompressed stream
|
||||
{
|
||||
@ -450,12 +449,15 @@ test "gzip header" {
|
||||
}
|
||||
|
||||
test "public interface" {
|
||||
const plain_data = [_]u8{ 'H', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', 0x0a };
|
||||
const plain_data_buf = [_]u8{ 'H', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', 0x0a };
|
||||
|
||||
// deflate final stored block, header + plain (stored) data
|
||||
const deflate_block = [_]u8{
|
||||
0b0000_0001, 0b0000_1100, 0x00, 0b1111_0011, 0xff, // deflate fixed buffer header len, nlen
|
||||
} ++ plain_data;
|
||||
} ++ plain_data_buf;
|
||||
|
||||
const plain_data: []const u8 = &plain_data_buf;
|
||||
const gzip_data: []const u8 = &deflate_block;
|
||||
|
||||
//// gzip header/footer + deflate block
|
||||
//const gzip_data =
|
||||
@ -471,23 +473,21 @@ test "public interface" {
|
||||
// TODO
|
||||
//const gzip = @import("gzip.zig");
|
||||
//const zlib = @import("zlib.zig");
|
||||
const flate = @This();
|
||||
|
||||
//try testInterface(gzip, &gzip_data, &plain_data);
|
||||
//try testInterface(zlib, &zlib_data, &plain_data);
|
||||
try testInterface(flate, &deflate_block, &plain_data);
|
||||
}
|
||||
|
||||
fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const u8) !void {
|
||||
var buffer1: [64]u8 = undefined;
|
||||
var buffer2: [64]u8 = undefined;
|
||||
|
||||
// TODO These used to be functions, need to migrate the tests
|
||||
const decompress = void;
|
||||
const compress = void;
|
||||
const store = void;
|
||||
|
||||
// decompress
|
||||
{
|
||||
var plain: Writer = .fixed(&buffer2);
|
||||
|
||||
var in: std.io.Reader = .fixed(gzip_data);
|
||||
try pkg.decompress(&in, &plain);
|
||||
try decompress(&in, &plain);
|
||||
try testing.expectEqualSlices(u8, plain_data, plain.getWritten());
|
||||
}
|
||||
|
||||
@ -497,10 +497,10 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const
|
||||
var compressed: Writer = .fixed(&buffer1);
|
||||
|
||||
var in: std.io.Reader = .fixed(plain_data);
|
||||
try pkg.compress(&in, &compressed, .{});
|
||||
try compress(&in, &compressed, .{});
|
||||
|
||||
var compressed_br: std.io.Reader = .fixed(&buffer1);
|
||||
try pkg.decompress(&compressed_br, &plain);
|
||||
try decompress(&compressed_br, &plain);
|
||||
try testing.expectEqualSlices(u8, plain_data, plain.getWritten());
|
||||
}
|
||||
|
||||
@ -510,12 +510,12 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const
|
||||
var compressed: Writer = .fixed(&buffer1);
|
||||
|
||||
var in: std.io.Reader = .fixed(plain_data);
|
||||
var cmp = try pkg.compressor(&compressed, .{});
|
||||
var cmp = try Compress(&compressed, .{});
|
||||
try cmp.compress(&in);
|
||||
try cmp.finish();
|
||||
|
||||
var compressed_br: std.io.Reader = .fixed(&buffer1);
|
||||
var dcp = pkg.decompressor(&compressed_br);
|
||||
var dcp = Decompress(&compressed_br);
|
||||
try dcp.decompress(&plain);
|
||||
try testing.expectEqualSlices(u8, plain_data, plain.getWritten());
|
||||
}
|
||||
@ -528,10 +528,10 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const
|
||||
var compressed: Writer = .fixed(&buffer1);
|
||||
|
||||
var in: std.io.Reader = .fixed(plain_data);
|
||||
try pkg.huffman.compress(&in, &compressed);
|
||||
try huffman.compress(&in, &compressed);
|
||||
|
||||
var compressed_br: std.io.Reader = .fixed(&buffer1);
|
||||
try pkg.decompress(&compressed_br, &plain);
|
||||
try decompress(&compressed_br, &plain);
|
||||
try testing.expectEqualSlices(u8, plain_data, plain.getWritten());
|
||||
}
|
||||
|
||||
@ -541,12 +541,12 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const
|
||||
var compressed: Writer = .fixed(&buffer1);
|
||||
|
||||
var in: std.io.Reader = .fixed(plain_data);
|
||||
var cmp = try pkg.huffman.compressor(&compressed);
|
||||
var cmp = try huffman.Compressor(&compressed);
|
||||
try cmp.compress(&in);
|
||||
try cmp.finish();
|
||||
|
||||
var compressed_br: std.io.Reader = .fixed(&buffer1);
|
||||
try pkg.decompress(&compressed_br, &plain);
|
||||
try decompress(&compressed_br, &plain);
|
||||
try testing.expectEqualSlices(u8, plain_data, plain.getWritten());
|
||||
}
|
||||
}
|
||||
@ -559,10 +559,10 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const
|
||||
var compressed: Writer = .fixed(&buffer1);
|
||||
|
||||
var in: std.io.Reader = .fixed(plain_data);
|
||||
try pkg.store.compress(&in, &compressed);
|
||||
try store.compress(&in, &compressed);
|
||||
|
||||
var compressed_br: std.io.Reader = .fixed(&buffer1);
|
||||
try pkg.decompress(&compressed_br, &plain);
|
||||
try decompress(&compressed_br, &plain);
|
||||
try testing.expectEqualSlices(u8, plain_data, plain.getWritten());
|
||||
}
|
||||
|
||||
@ -572,12 +572,12 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const
|
||||
var compressed: Writer = .fixed(&buffer1);
|
||||
|
||||
var in: std.io.Reader = .fixed(plain_data);
|
||||
var cmp = try pkg.store.compressor(&compressed);
|
||||
var cmp = try store.compressor(&compressed);
|
||||
try cmp.compress(&in);
|
||||
try cmp.finish();
|
||||
|
||||
var compressed_br: std.io.Reader = .fixed(&buffer1);
|
||||
try pkg.decompress(&compressed_br, &plain);
|
||||
try decompress(&compressed_br, &plain);
|
||||
try testing.expectEqualSlices(u8, plain_data, plain.getWritten());
|
||||
}
|
||||
}
|
||||
|
||||
@ -48,6 +48,7 @@ const expect = testing.expect;
|
||||
const mem = std.mem;
|
||||
const math = std.math;
|
||||
const Writer = std.io.Writer;
|
||||
const Reader = std.io.Reader;
|
||||
|
||||
const Compress = @This();
|
||||
const Token = @import("Token.zig");
|
||||
@ -64,20 +65,13 @@ input: *std.io.Reader,
|
||||
block_writer: BlockWriter,
|
||||
level: LevelArgs,
|
||||
hasher: Container.Hasher,
|
||||
reader: std.io.Reader,
|
||||
|
||||
// Match and literal at the previous position.
|
||||
// Used for lazy match finding in processWindow.
|
||||
prev_match: ?Token = null,
|
||||
prev_literal: ?u8 = null,
|
||||
|
||||
pub fn reader(c: *Compress, buffer: []u8) std.io.Reader {
|
||||
return .{
|
||||
.context = c,
|
||||
.vtable = .{ .read = read },
|
||||
.buffer = buffer,
|
||||
};
|
||||
}
|
||||
|
||||
pub const Options = struct {
|
||||
level: Level = .default,
|
||||
container: Container = .raw,
|
||||
@ -125,13 +119,17 @@ const LevelArgs = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn init(input: *std.io.Reader, options: Options) Compress {
|
||||
pub fn init(input: *std.io.Reader, buffer: []u8, options: Options) Compress {
|
||||
return .{
|
||||
.input = input,
|
||||
.block_writer = undefined,
|
||||
.level = .get(options.level),
|
||||
.hasher = .init(options.container),
|
||||
.state = .header,
|
||||
.reader = .{
|
||||
.buffer = buffer,
|
||||
.stream = stream,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@ -758,16 +756,12 @@ fn byFreq(context: void, a: LiteralNode, b: LiteralNode) bool {
|
||||
return a.freq < b.freq;
|
||||
}
|
||||
|
||||
fn read(
|
||||
context: ?*anyopaque,
|
||||
bw: *Writer,
|
||||
limit: std.io.Limit,
|
||||
) std.io.Reader.StreamError!usize {
|
||||
const c: *Compress = @ptrCast(@alignCast(context));
|
||||
fn stream(r: *Reader, w: *Writer, limit: std.io.Limit) Reader.StreamError!usize {
|
||||
const c: *Compress = @fieldParentPtr("reader", r);
|
||||
switch (c.state) {
|
||||
.header => |i| {
|
||||
const header = c.hasher.container().header();
|
||||
const n = try bw.write(header[i..]);
|
||||
const n = try w.write(header[i..]);
|
||||
if (header.len - i - n == 0) {
|
||||
c.state = .middle;
|
||||
} else {
|
||||
@ -788,18 +782,18 @@ fn read(
|
||||
const history_plus_lookahead_len = flate.history_len + min_lookahead;
|
||||
if (buffer_contents.len < history_plus_lookahead_len) return 0;
|
||||
const lookahead = buffer_contents[flate.history_len..];
|
||||
const start = bw.count;
|
||||
const n = try c.tokenizeSlice(bw, limit, lookahead) catch |err| switch (err) {
|
||||
const start = w.count;
|
||||
const n = try c.tokenizeSlice(w, limit, lookahead) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.WriteFailed,
|
||||
};
|
||||
c.hasher.update(lookahead[0..n]);
|
||||
c.input.toss(n);
|
||||
return bw.count - start;
|
||||
return w.count - start;
|
||||
},
|
||||
.final => {
|
||||
const buffer_contents = c.input.buffered();
|
||||
const start = bw.count;
|
||||
const n = c.tokenizeSlice(bw, limit, buffer_contents) catch |err| switch (err) {
|
||||
const start = w.count;
|
||||
const n = c.tokenizeSlice(w, limit, buffer_contents) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.WriteFailed,
|
||||
};
|
||||
if (buffer_contents.len - n == 0) {
|
||||
@ -840,12 +834,12 @@ fn read(
|
||||
},
|
||||
}
|
||||
}
|
||||
return bw.count - start;
|
||||
return w.count - start;
|
||||
},
|
||||
.ended => return error.EndOfStream,
|
||||
.footer => |i| {
|
||||
const remaining = c.footer_buffer[i..];
|
||||
const n = try bw.write(limit.slice(remaining));
|
||||
const n = try w.write(limit.slice(remaining));
|
||||
c.state = if (n == remaining) .ended else .{ .footer = i - n };
|
||||
return n;
|
||||
},
|
||||
|
||||
@ -708,7 +708,7 @@ test "decompress" {
|
||||
|
||||
var decompress: Decompress = .init(&fb, .raw);
|
||||
var decompress_br = decompress.readable(&.{});
|
||||
_ = try decompress_br.readRemaining(&aw.interface);
|
||||
_ = try decompress_br.streamRemaining(&aw.interface);
|
||||
try testing.expectEqualStrings(c.out, aw.getWritten());
|
||||
}
|
||||
}
|
||||
@ -767,7 +767,7 @@ test "gzip decompress" {
|
||||
|
||||
var decompress: Decompress = .init(&fb, .gzip);
|
||||
var decompress_br = decompress.readable(&.{});
|
||||
_ = try decompress_br.readRemaining(&aw.interface);
|
||||
_ = try decompress_br.streamRemaining(&aw.interface);
|
||||
try testing.expectEqualStrings(c.out, aw.getWritten());
|
||||
}
|
||||
}
|
||||
@ -795,7 +795,7 @@ test "zlib decompress" {
|
||||
|
||||
var decompress: Decompress = .init(&fb, .zlib);
|
||||
var decompress_br = decompress.readable(&.{});
|
||||
_ = try decompress_br.readRemaining(&aw.interface);
|
||||
_ = try decompress_br.streamRemaining(&aw.interface);
|
||||
try testing.expectEqualStrings(c.out, aw.getWritten());
|
||||
}
|
||||
}
|
||||
@ -857,10 +857,10 @@ test "fuzzing tests" {
|
||||
var decompress: Decompress = .init(&in, .raw);
|
||||
var decompress_br = decompress.readable(&.{});
|
||||
if (c.err) |expected_err| {
|
||||
try testing.expectError(error.ReadFailed, decompress_br.readRemaining(&aw.interface));
|
||||
try testing.expectError(error.ReadFailed, decompress_br.streamRemaining(&aw.interface));
|
||||
try testing.expectError(expected_err, decompress.read_err.?);
|
||||
} else {
|
||||
_ = try decompress_br.readRemaining(&aw.interface);
|
||||
_ = try decompress_br.streamRemaining(&aw.interface);
|
||||
try testing.expectEqualStrings(c.out, aw.getWritten());
|
||||
}
|
||||
}
|
||||
@ -876,7 +876,7 @@ test "bug 18966" {
|
||||
|
||||
var decompress: Decompress = .init(&in, .gzip);
|
||||
var decompress_br = decompress.readable(&.{});
|
||||
_ = try decompress_br.readRemaining(&aw.interface);
|
||||
_ = try decompress_br.streamRemaining(&aw.interface);
|
||||
try testing.expectEqualStrings(expect, aw.getWritten());
|
||||
}
|
||||
|
||||
|
||||
@ -2030,14 +2030,14 @@ pub fn readFileIntoArrayList(
|
||||
var file = try dir.openFile(file_path, .{});
|
||||
defer file.close();
|
||||
|
||||
var file_reader = file.reader();
|
||||
var file_reader = file.reader(&.{});
|
||||
|
||||
// Apply size hint by adjusting the array list's capacity.
|
||||
if (size_hint) |size| {
|
||||
try list.ensureUnusedCapacity(gpa, size);
|
||||
file_reader.size = size;
|
||||
} else if (file_reader.getSize()) |size| {
|
||||
// If the file size doesn't fit a usize it'll be certainly exceed the limit.
|
||||
// If the file size doesn't fit a usize it will certainly exceed the limit.
|
||||
try list.ensureUnusedCapacity(gpa, std.math.cast(usize, size) orelse return error.StreamTooLong);
|
||||
} else |err| switch (err) {
|
||||
// Ignore most errors; size hint is only an optimization.
|
||||
@ -2045,7 +2045,7 @@ pub fn readFileIntoArrayList(
|
||||
else => |e| return e,
|
||||
}
|
||||
|
||||
file_reader.interface().readRemainingArrayList(gpa, alignment, list, limit, 128) catch |err| switch (err) {
|
||||
file_reader.interface.appendRemaining(gpa, alignment, list, limit) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.StreamTooLong => return error.StreamTooLong,
|
||||
error.ReadFailed => return file_reader.err.?,
|
||||
|
||||
@ -170,6 +170,12 @@ pub fn defaultDiscard(r: *Reader, limit: Limit) Error!usize {
|
||||
return n;
|
||||
}
|
||||
|
||||
/// "Pump" exactly `n` bytes from the reader to the writer.
|
||||
pub fn streamExact(r: *Reader, w: *Writer, n: usize) StreamError!void {
|
||||
var remaining = n;
|
||||
while (remaining != 0) remaining -= try r.stream(w, .limited(remaining));
|
||||
}
|
||||
|
||||
/// "Pump" data from the reader to the writer, handling `error.EndOfStream` as
|
||||
/// a success case.
|
||||
///
|
||||
@ -381,15 +387,6 @@ pub fn readVecAll(r: *Reader, data: [][]u8) Error!void {
|
||||
}
|
||||
}
|
||||
|
||||
/// "Pump" data from the reader to the writer.
|
||||
pub fn readAll(r: *Reader, w: *Writer, limit: Limit) StreamError!void {
|
||||
var remaining = limit;
|
||||
while (remaining.nonzero()) {
|
||||
const n = try r.stream(w, remaining);
|
||||
remaining = remaining.subtract(n).?;
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the next `len` bytes from the stream, filling the buffer as
|
||||
/// necessary.
|
||||
///
|
||||
|
||||
@ -381,8 +381,8 @@ pub const Iterator = struct {
|
||||
return n;
|
||||
}
|
||||
|
||||
pub fn readRemaining(file: *File, out: *std.io.Writer) std.io.Reader.StreamRemainingError!void {
|
||||
return file.reader().readRemaining(out);
|
||||
pub fn streamRemaining(file: *File, out: *std.io.Writer) std.io.Reader.StreamRemainingError!usize {
|
||||
return file.reader().streamRemaining(out);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -305,8 +305,7 @@ pub const Iterator = struct {
|
||||
if (locator_end_offset > stream_len)
|
||||
return error.ZipTruncated;
|
||||
try input.seekTo(stream_len - locator_end_offset);
|
||||
var br = input.interface().unbuffered();
|
||||
const locator = br.takeStructEndian(EndLocator64, .little) catch |err| switch (err) {
|
||||
const locator = input.interface.takeStructEndian(EndLocator64, .little) catch |err| switch (err) {
|
||||
error.ReadFailed => return input.err.?,
|
||||
error.EndOfStream => return error.EndOfStream,
|
||||
};
|
||||
@ -319,7 +318,7 @@ pub const Iterator = struct {
|
||||
|
||||
try input.seekTo(locator.record_file_offset);
|
||||
|
||||
const record64 = br.takeStructEndian(EndRecord64, .little) catch |err| switch (err) {
|
||||
const record64 = input.interface.takeStructEndian(EndRecord64, .little) catch |err| switch (err) {
|
||||
error.ReadFailed => return input.err.?,
|
||||
error.EndOfStream => return error.EndOfStream,
|
||||
};
|
||||
@ -375,8 +374,7 @@ pub const Iterator = struct {
|
||||
const header_zip_offset = self.cd_zip_offset + self.cd_record_offset;
|
||||
const input = self.input;
|
||||
try input.seekTo(header_zip_offset);
|
||||
var br = input.interface().unbuffered();
|
||||
const header = br.takeStructEndian(CentralDirectoryFileHeader, .little) catch |err| switch (err) {
|
||||
const header = input.interface.takeStructEndian(CentralDirectoryFileHeader, .little) catch |err| switch (err) {
|
||||
error.ReadFailed => return input.err.?,
|
||||
error.EndOfStream => return error.EndOfStream,
|
||||
};
|
||||
@ -407,7 +405,7 @@ pub const Iterator = struct {
|
||||
const extra = extra_buf[0..header.extra_len];
|
||||
|
||||
try input.seekTo(header_zip_offset + @sizeOf(CentralDirectoryFileHeader) + header.filename_len);
|
||||
br.readSlice(extra) catch |err| switch (err) {
|
||||
input.interface.readSlice(extra) catch |err| switch (err) {
|
||||
error.ReadFailed => return input.err.?,
|
||||
error.EndOfStream => return error.EndOfStream,
|
||||
};
|
||||
@ -472,16 +470,13 @@ pub const Iterator = struct {
|
||||
const filename = filename_buf[0..self.filename_len];
|
||||
{
|
||||
try stream.seekTo(self.header_zip_offset + @sizeOf(CentralDirectoryFileHeader));
|
||||
var stream_br = stream.readable(&.{});
|
||||
try stream_br.readSlice(filename);
|
||||
try stream.interface.readSlice(filename);
|
||||
}
|
||||
|
||||
const local_data_header_offset: u64 = local_data_header_offset: {
|
||||
const local_header = blk: {
|
||||
try stream.seekTo(self.file_offset);
|
||||
var read_buffer: [@sizeOf(LocalFileHeader)]u8 = undefined;
|
||||
var stream_br = stream.readable(&read_buffer);
|
||||
break :blk try stream_br.takeStructEndian(LocalFileHeader, .little);
|
||||
break :blk try stream.interface.takeStructEndian(LocalFileHeader, .little);
|
||||
};
|
||||
if (!std.mem.eql(u8, &local_header.signature, &local_file_header_sig))
|
||||
return error.ZipBadFileOffset;
|
||||
@ -507,8 +502,7 @@ pub const Iterator = struct {
|
||||
|
||||
{
|
||||
try stream.seekTo(self.file_offset + @sizeOf(LocalFileHeader) + local_header.filename_len);
|
||||
var stream_br = stream.readable(&.{});
|
||||
try stream_br.readSlice(extra);
|
||||
try stream.interface.readSlice(extra);
|
||||
}
|
||||
|
||||
var extra_offset: usize = 0;
|
||||
@ -577,7 +571,7 @@ pub const Iterator = struct {
|
||||
@as(u64, @sizeOf(LocalFileHeader)) +
|
||||
local_data_header_offset;
|
||||
try stream.seekTo(local_data_file_offset);
|
||||
var limited_file_reader = stream.interface().limited(.limited(self.compressed_size));
|
||||
var limited_file_reader = stream.interface.limited(.limited(self.compressed_size));
|
||||
var file_read_buffer: [1000]u8 = undefined;
|
||||
var decompress_read_buffer: [1000]u8 = undefined;
|
||||
var limited_br = limited_file_reader.reader().buffered(&file_read_buffer);
|
||||
|
||||
@ -63,9 +63,7 @@ fn makeZipWithStore(
|
||||
options: WriteZipOptions,
|
||||
store: []FileStore,
|
||||
) !void {
|
||||
var buffer: [200]u8 = undefined;
|
||||
var bw = file_writer.writer(&buffer);
|
||||
try writeZip(&bw, files, store, options);
|
||||
try writeZip(&file_writer.interface, files, store, options);
|
||||
}
|
||||
|
||||
const WriteZipOptions = struct {
|
||||
@ -201,7 +199,7 @@ const Zipper = struct {
|
||||
var br: std.io.Reader = .fixed(opt.content);
|
||||
var compress: std.compress.flate.Compress = .init(&br, .{});
|
||||
var compress_br = compress.reader(&.{});
|
||||
const n = try compress_br.readRemaining(writer);
|
||||
const n = try compress_br.streamRemaining(writer);
|
||||
assert(br.seek == opt.content.len);
|
||||
try testing.expectEqual(n, writer.count - offset);
|
||||
compressed_size = @intCast(n);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user