mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
zstandard: fix division by zero when using RingBuffer
This change fixes some division-by-zero bugs introduced by the optimized ring buffer read/write functions in d8c067966. There are edge cases where decompression can use a length zero ring buffer as the size of the ring buffer used is exactly the the window size specified by a Zstandard frame, and this can be zero. Switching away from loops to mem copies means that we need to ensure ring buffers do not have length zero ring when attempting to read/write from them.
This commit is contained in:
parent
9ad03b628f
commit
138a35df8f
@ -70,13 +70,11 @@ pub fn DecompressStream(
|
||||
self.state = .NewFrame;
|
||||
},
|
||||
.zstandard => |header| {
|
||||
const frame_context = context: {
|
||||
break :context try decompress.FrameContext.init(
|
||||
header,
|
||||
options.window_size_max,
|
||||
options.verify_checksum,
|
||||
);
|
||||
};
|
||||
const frame_context = try decompress.FrameContext.init(
|
||||
header,
|
||||
options.window_size_max,
|
||||
options.verify_checksum,
|
||||
);
|
||||
|
||||
const literal_fse_buffer = try self.allocator.alloc(
|
||||
types.compressed_block.Table.Fse,
|
||||
@ -219,7 +217,9 @@ pub fn DecompressStream(
|
||||
}
|
||||
|
||||
const size = @min(self.buffer.len(), buffer.len);
|
||||
self.buffer.readFirstAssumeLength(buffer, size);
|
||||
if (size > 0) {
|
||||
self.buffer.readFirstAssumeLength(buffer, size);
|
||||
}
|
||||
if (self.state == .LastBlock and self.buffer.len() == 0) {
|
||||
self.state = .NewFrame;
|
||||
self.allocator.free(self.literal_fse_buffer);
|
||||
@ -282,3 +282,48 @@ test "zstandard decompression" {
|
||||
try testReader(compressed3, uncompressed);
|
||||
try testReader(compressed19, uncompressed);
|
||||
}
|
||||
|
||||
fn expectEqualDecoded(expected: []const u8, input: []const u8) !void {
|
||||
const allocator = std.testing.allocator;
|
||||
|
||||
{
|
||||
const result = try decompress.decodeAlloc(allocator, input, false, 1 << 23);
|
||||
defer allocator.free(result);
|
||||
try std.testing.expectEqualStrings(expected, result);
|
||||
}
|
||||
|
||||
{
|
||||
var buffer = try allocator.alloc(u8, 2 * expected.len);
|
||||
defer allocator.free(buffer);
|
||||
|
||||
const size = try decompress.decode(buffer, input, false);
|
||||
try std.testing.expectEqualStrings(expected, buffer[0..size]);
|
||||
}
|
||||
|
||||
{
|
||||
var in_stream = std.io.fixedBufferStream(input);
|
||||
var stream = decompressStream(allocator, in_stream.reader());
|
||||
defer stream.deinit();
|
||||
|
||||
const result = try stream.reader().readAllAlloc(allocator, std.math.maxInt(usize));
|
||||
defer allocator.free(result);
|
||||
|
||||
try std.testing.expectEqualStrings(expected, result);
|
||||
}
|
||||
}
|
||||
|
||||
test "zero sized block" {
|
||||
const input_raw =
|
||||
"\x28\xb5\x2f\xfd" ++ // zstandard frame magic number
|
||||
"\x20\x00" ++ // frame header: only single_segment_flag set, frame_content_size zero
|
||||
"\x01\x00\x00"; // block header with: last_block set, block_type raw, block_size zero
|
||||
|
||||
const input_rle =
|
||||
"\x28\xb5\x2f\xfd" ++ // zstandard frame magic number
|
||||
"\x20\x00" ++ // frame header: only single_segment_flag set, frame_content_size zero
|
||||
"\x03\x00\x00" ++ // block header with: last_block set, block_type rle, block_size zero
|
||||
"\xaa"; // block_content
|
||||
|
||||
try expectEqualDecoded("", input_raw);
|
||||
try expectEqualDecoded("", input_rle);
|
||||
}
|
||||
|
||||
@ -713,10 +713,14 @@ pub fn decodeBlockRingBuffer(
|
||||
switch (block_header.block_type) {
|
||||
.raw => {
|
||||
if (src.len < block_size) return error.MalformedBlockSize;
|
||||
const data = src[0..block_size];
|
||||
dest.writeSliceAssumeCapacity(data);
|
||||
consumed_count.* += block_size;
|
||||
decode_state.written_count += block_size;
|
||||
// dest may have length zero if block_size == 0, causing division by zero in
|
||||
// writeSliceAssumeCapacity()
|
||||
if (block_size > 0) {
|
||||
const data = src[0..block_size];
|
||||
dest.writeSliceAssumeCapacity(data);
|
||||
consumed_count.* += block_size;
|
||||
decode_state.written_count += block_size;
|
||||
}
|
||||
return block_size;
|
||||
},
|
||||
.rle => {
|
||||
@ -934,7 +938,7 @@ pub fn decodeLiteralsSectionSlice(
|
||||
switch (header.block_type) {
|
||||
.raw => {
|
||||
if (src.len < bytes_read + header.regenerated_size) return error.MalformedLiteralsSection;
|
||||
const stream = src[bytes_read .. bytes_read + header.regenerated_size];
|
||||
const stream = src[bytes_read..][0..header.regenerated_size];
|
||||
consumed_count.* += header.regenerated_size + bytes_read;
|
||||
return LiteralsSection{
|
||||
.header = header,
|
||||
@ -944,7 +948,7 @@ pub fn decodeLiteralsSectionSlice(
|
||||
},
|
||||
.rle => {
|
||||
if (src.len < bytes_read + 1) return error.MalformedLiteralsSection;
|
||||
const stream = src[bytes_read .. bytes_read + 1];
|
||||
const stream = src[bytes_read..][0..1];
|
||||
consumed_count.* += 1 + bytes_read;
|
||||
return LiteralsSection{
|
||||
.header = header,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user