mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 14:23:09 +00:00
Merge pull request #24699 from ziglang/bounded
remove RingBuffer; remove BoundedArray; use `@memmove`
This commit is contained in:
commit
d8cecffe31
@ -8,20 +8,22 @@ const Instruction = enum {
|
|||||||
};
|
};
|
||||||
|
|
||||||
fn evaluate(initial_stack: []const i32, code: []const Instruction) !i32 {
|
fn evaluate(initial_stack: []const i32, code: []const Instruction) !i32 {
|
||||||
var stack = try std.BoundedArray(i32, 8).fromSlice(initial_stack);
|
var buffer: [8]i32 = undefined;
|
||||||
|
var stack = std.ArrayListUnmanaged(i32).initBuffer(&buffer);
|
||||||
|
try stack.appendSliceBounded(initial_stack);
|
||||||
var ip: usize = 0;
|
var ip: usize = 0;
|
||||||
|
|
||||||
return vm: switch (code[ip]) {
|
return vm: switch (code[ip]) {
|
||||||
// Because all code after `continue` is unreachable, this branch does
|
// Because all code after `continue` is unreachable, this branch does
|
||||||
// not provide a result.
|
// not provide a result.
|
||||||
.add => {
|
.add => {
|
||||||
try stack.append(stack.pop().? + stack.pop().?);
|
try stack.appendBounded(stack.pop().? + stack.pop().?);
|
||||||
|
|
||||||
ip += 1;
|
ip += 1;
|
||||||
continue :vm code[ip];
|
continue :vm code[ip];
|
||||||
},
|
},
|
||||||
.mul => {
|
.mul => {
|
||||||
try stack.append(stack.pop().? * stack.pop().?);
|
try stack.appendBounded(stack.pop().? * stack.pop().?);
|
||||||
|
|
||||||
ip += 1;
|
ip += 1;
|
||||||
continue :vm code[ip];
|
continue :vm code[ip];
|
||||||
|
|||||||
@ -29,13 +29,14 @@ const Node = Document.Node;
|
|||||||
const ExtraIndex = Document.ExtraIndex;
|
const ExtraIndex = Document.ExtraIndex;
|
||||||
const ExtraData = Document.ExtraData;
|
const ExtraData = Document.ExtraData;
|
||||||
const StringIndex = Document.StringIndex;
|
const StringIndex = Document.StringIndex;
|
||||||
|
const ArrayList = std.ArrayListUnmanaged;
|
||||||
|
|
||||||
nodes: Node.List = .{},
|
nodes: Node.List = .{},
|
||||||
extra: std.ArrayListUnmanaged(u32) = .empty,
|
extra: ArrayList(u32) = .empty,
|
||||||
scratch_extra: std.ArrayListUnmanaged(u32) = .empty,
|
scratch_extra: ArrayList(u32) = .empty,
|
||||||
string_bytes: std.ArrayListUnmanaged(u8) = .empty,
|
string_bytes: ArrayList(u8) = .empty,
|
||||||
scratch_string: std.ArrayListUnmanaged(u8) = .empty,
|
scratch_string: ArrayList(u8) = .empty,
|
||||||
pending_blocks: std.ArrayListUnmanaged(Block) = .empty,
|
pending_blocks: ArrayList(Block) = .empty,
|
||||||
allocator: Allocator,
|
allocator: Allocator,
|
||||||
|
|
||||||
const Parser = @This();
|
const Parser = @This();
|
||||||
@ -86,7 +87,8 @@ const Block = struct {
|
|||||||
continuation_indent: usize,
|
continuation_indent: usize,
|
||||||
},
|
},
|
||||||
table: struct {
|
table: struct {
|
||||||
column_alignments: std.BoundedArray(Node.TableCellAlignment, max_table_columns) = .{},
|
column_alignments_buffer: [max_table_columns]Node.TableCellAlignment,
|
||||||
|
column_alignments_len: usize,
|
||||||
},
|
},
|
||||||
heading: struct {
|
heading: struct {
|
||||||
/// Between 1 and 6, inclusive.
|
/// Between 1 and 6, inclusive.
|
||||||
@ -354,7 +356,8 @@ const BlockStart = struct {
|
|||||||
continuation_indent: usize,
|
continuation_indent: usize,
|
||||||
},
|
},
|
||||||
table_row: struct {
|
table_row: struct {
|
||||||
cells: std.BoundedArray([]const u8, max_table_columns),
|
cells_buffer: [max_table_columns][]const u8,
|
||||||
|
cells_len: usize,
|
||||||
},
|
},
|
||||||
heading: struct {
|
heading: struct {
|
||||||
/// Between 1 and 6, inclusive.
|
/// Between 1 and 6, inclusive.
|
||||||
@ -422,7 +425,8 @@ fn appendBlockStart(p: *Parser, block_start: BlockStart) !void {
|
|||||||
try p.pending_blocks.append(p.allocator, .{
|
try p.pending_blocks.append(p.allocator, .{
|
||||||
.tag = .table,
|
.tag = .table,
|
||||||
.data = .{ .table = .{
|
.data = .{ .table = .{
|
||||||
.column_alignments = .{},
|
.column_alignments_buffer = undefined,
|
||||||
|
.column_alignments_len = 0,
|
||||||
} },
|
} },
|
||||||
.string_start = p.scratch_string.items.len,
|
.string_start = p.scratch_string.items.len,
|
||||||
.extra_start = p.scratch_extra.items.len,
|
.extra_start = p.scratch_extra.items.len,
|
||||||
@ -431,15 +435,19 @@ fn appendBlockStart(p: *Parser, block_start: BlockStart) !void {
|
|||||||
|
|
||||||
const current_row = p.scratch_extra.items.len - p.pending_blocks.getLast().extra_start;
|
const current_row = p.scratch_extra.items.len - p.pending_blocks.getLast().extra_start;
|
||||||
if (current_row <= 1) {
|
if (current_row <= 1) {
|
||||||
if (parseTableHeaderDelimiter(block_start.data.table_row.cells)) |alignments| {
|
var buffer: [max_table_columns]Node.TableCellAlignment = undefined;
|
||||||
p.pending_blocks.items[p.pending_blocks.items.len - 1].data.table.column_alignments = alignments;
|
const table_row = &block_start.data.table_row;
|
||||||
|
if (parseTableHeaderDelimiter(table_row.cells_buffer[0..table_row.cells_len], &buffer)) |alignments| {
|
||||||
|
const table = &p.pending_blocks.items[p.pending_blocks.items.len - 1].data.table;
|
||||||
|
@memcpy(table.column_alignments_buffer[0..alignments.len], alignments);
|
||||||
|
table.column_alignments_len = alignments.len;
|
||||||
if (current_row == 1) {
|
if (current_row == 1) {
|
||||||
// We need to go back and mark the header row and its column
|
// We need to go back and mark the header row and its column
|
||||||
// alignments.
|
// alignments.
|
||||||
const datas = p.nodes.items(.data);
|
const datas = p.nodes.items(.data);
|
||||||
const header_data = datas[p.scratch_extra.getLast()];
|
const header_data = datas[p.scratch_extra.getLast()];
|
||||||
for (p.extraChildren(header_data.container.children), 0..) |header_cell, i| {
|
for (p.extraChildren(header_data.container.children), 0..) |header_cell, i| {
|
||||||
const alignment = if (i < alignments.len) alignments.buffer[i] else .unset;
|
const alignment = if (i < alignments.len) alignments[i] else .unset;
|
||||||
const cell_data = &datas[@intFromEnum(header_cell)].table_cell;
|
const cell_data = &datas[@intFromEnum(header_cell)].table_cell;
|
||||||
cell_data.info.alignment = alignment;
|
cell_data.info.alignment = alignment;
|
||||||
cell_data.info.header = true;
|
cell_data.info.header = true;
|
||||||
@ -480,8 +488,10 @@ fn appendBlockStart(p: *Parser, block_start: BlockStart) !void {
|
|||||||
// available in the BlockStart. We can immediately parse and append
|
// available in the BlockStart. We can immediately parse and append
|
||||||
// these children now.
|
// these children now.
|
||||||
const containing_table = p.pending_blocks.items[p.pending_blocks.items.len - 2];
|
const containing_table = p.pending_blocks.items[p.pending_blocks.items.len - 2];
|
||||||
const column_alignments = containing_table.data.table.column_alignments.slice();
|
const table = &containing_table.data.table;
|
||||||
for (block_start.data.table_row.cells.slice(), 0..) |cell_content, i| {
|
const column_alignments = table.column_alignments_buffer[0..table.column_alignments_len];
|
||||||
|
const table_row = &block_start.data.table_row;
|
||||||
|
for (table_row.cells_buffer[0..table_row.cells_len], 0..) |cell_content, i| {
|
||||||
const cell_children = try p.parseInlines(cell_content);
|
const cell_children = try p.parseInlines(cell_content);
|
||||||
const alignment = if (i < column_alignments.len) column_alignments[i] else .unset;
|
const alignment = if (i < column_alignments.len) column_alignments[i] else .unset;
|
||||||
const cell = try p.addNode(.{
|
const cell = try p.addNode(.{
|
||||||
@ -523,7 +533,8 @@ fn startBlock(p: *Parser, line: []const u8) !?BlockStart {
|
|||||||
return .{
|
return .{
|
||||||
.tag = .table_row,
|
.tag = .table_row,
|
||||||
.data = .{ .table_row = .{
|
.data = .{ .table_row = .{
|
||||||
.cells = table_row.cells,
|
.cells_buffer = table_row.cells_buffer,
|
||||||
|
.cells_len = table_row.cells_len,
|
||||||
} },
|
} },
|
||||||
.rest = "",
|
.rest = "",
|
||||||
};
|
};
|
||||||
@ -606,7 +617,8 @@ fn startListItem(unindented_line: []const u8) ?ListItemStart {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const TableRowStart = struct {
|
const TableRowStart = struct {
|
||||||
cells: std.BoundedArray([]const u8, max_table_columns),
|
cells_buffer: [max_table_columns][]const u8,
|
||||||
|
cells_len: usize,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn startTableRow(unindented_line: []const u8) ?TableRowStart {
|
fn startTableRow(unindented_line: []const u8) ?TableRowStart {
|
||||||
@ -615,7 +627,8 @@ fn startTableRow(unindented_line: []const u8) ?TableRowStart {
|
|||||||
mem.endsWith(u8, unindented_line, "\\|") or
|
mem.endsWith(u8, unindented_line, "\\|") or
|
||||||
!mem.endsWith(u8, unindented_line, "|")) return null;
|
!mem.endsWith(u8, unindented_line, "|")) return null;
|
||||||
|
|
||||||
var cells: std.BoundedArray([]const u8, max_table_columns) = .{};
|
var cells_buffer: [max_table_columns][]const u8 = undefined;
|
||||||
|
var cells: ArrayList([]const u8) = .initBuffer(&cells_buffer);
|
||||||
const table_row_content = unindented_line[1 .. unindented_line.len - 1];
|
const table_row_content = unindented_line[1 .. unindented_line.len - 1];
|
||||||
var cell_start: usize = 0;
|
var cell_start: usize = 0;
|
||||||
var i: usize = 0;
|
var i: usize = 0;
|
||||||
@ -623,7 +636,7 @@ fn startTableRow(unindented_line: []const u8) ?TableRowStart {
|
|||||||
switch (table_row_content[i]) {
|
switch (table_row_content[i]) {
|
||||||
'\\' => i += 1,
|
'\\' => i += 1,
|
||||||
'|' => {
|
'|' => {
|
||||||
cells.append(table_row_content[cell_start..i]) catch return null;
|
cells.appendBounded(table_row_content[cell_start..i]) catch return null;
|
||||||
cell_start = i + 1;
|
cell_start = i + 1;
|
||||||
},
|
},
|
||||||
'`' => {
|
'`' => {
|
||||||
@ -641,20 +654,21 @@ fn startTableRow(unindented_line: []const u8) ?TableRowStart {
|
|||||||
else => {},
|
else => {},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cells.append(table_row_content[cell_start..]) catch return null;
|
cells.appendBounded(table_row_content[cell_start..]) catch return null;
|
||||||
|
|
||||||
return .{ .cells = cells };
|
return .{ .cells_buffer = cells_buffer, .cells_len = cells.items.len };
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parseTableHeaderDelimiter(
|
fn parseTableHeaderDelimiter(
|
||||||
row_cells: std.BoundedArray([]const u8, max_table_columns),
|
row_cells: []const []const u8,
|
||||||
) ?std.BoundedArray(Node.TableCellAlignment, max_table_columns) {
|
buffer: []Node.TableCellAlignment,
|
||||||
var alignments: std.BoundedArray(Node.TableCellAlignment, max_table_columns) = .{};
|
) ?[]Node.TableCellAlignment {
|
||||||
for (row_cells.slice()) |content| {
|
var alignments: ArrayList(Node.TableCellAlignment) = .initBuffer(buffer);
|
||||||
|
for (row_cells) |content| {
|
||||||
const alignment = parseTableHeaderDelimiterCell(content) orelse return null;
|
const alignment = parseTableHeaderDelimiterCell(content) orelse return null;
|
||||||
alignments.appendAssumeCapacity(alignment);
|
alignments.appendAssumeCapacity(alignment);
|
||||||
}
|
}
|
||||||
return alignments;
|
return alignments.items;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parseTableHeaderDelimiterCell(content: []const u8) ?Node.TableCellAlignment {
|
fn parseTableHeaderDelimiterCell(content: []const u8) ?Node.TableCellAlignment {
|
||||||
@ -928,8 +942,8 @@ const InlineParser = struct {
|
|||||||
parent: *Parser,
|
parent: *Parser,
|
||||||
content: []const u8,
|
content: []const u8,
|
||||||
pos: usize = 0,
|
pos: usize = 0,
|
||||||
pending_inlines: std.ArrayListUnmanaged(PendingInline) = .empty,
|
pending_inlines: ArrayList(PendingInline) = .empty,
|
||||||
completed_inlines: std.ArrayListUnmanaged(CompletedInline) = .empty,
|
completed_inlines: ArrayList(CompletedInline) = .empty,
|
||||||
|
|
||||||
const PendingInline = struct {
|
const PendingInline = struct {
|
||||||
tag: Tag,
|
tag: Tag,
|
||||||
|
|||||||
@ -231,21 +231,6 @@ pub fn GenericReader(
|
|||||||
return @errorCast(self.any().readBytesNoEof(num_bytes));
|
return @errorCast(self.any().readBytesNoEof(num_bytes));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn readIntoBoundedBytes(
|
|
||||||
self: Self,
|
|
||||||
comptime num_bytes: usize,
|
|
||||||
bounded: *std.BoundedArray(u8, num_bytes),
|
|
||||||
) Error!void {
|
|
||||||
return @errorCast(self.any().readIntoBoundedBytes(num_bytes, bounded));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub inline fn readBoundedBytes(
|
|
||||||
self: Self,
|
|
||||||
comptime num_bytes: usize,
|
|
||||||
) Error!std.BoundedArray(u8, num_bytes) {
|
|
||||||
return @errorCast(self.any().readBoundedBytes(num_bytes));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub inline fn readInt(self: Self, comptime T: type, endian: std.builtin.Endian) NoEofError!T {
|
pub inline fn readInt(self: Self, comptime T: type, endian: std.builtin.Endian) NoEofError!T {
|
||||||
return @errorCast(self.any().readInt(T, endian));
|
return @errorCast(self.any().readInt(T, endian));
|
||||||
}
|
}
|
||||||
|
|||||||
@ -249,33 +249,6 @@ pub fn readBytesNoEof(self: Self, comptime num_bytes: usize) anyerror![num_bytes
|
|||||||
return bytes;
|
return bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reads bytes until `bounded.len` is equal to `num_bytes`,
|
|
||||||
/// or the stream ends.
|
|
||||||
///
|
|
||||||
/// * it is assumed that `num_bytes` will not exceed `bounded.capacity()`
|
|
||||||
pub fn readIntoBoundedBytes(
|
|
||||||
self: Self,
|
|
||||||
comptime num_bytes: usize,
|
|
||||||
bounded: *std.BoundedArray(u8, num_bytes),
|
|
||||||
) anyerror!void {
|
|
||||||
while (bounded.len < num_bytes) {
|
|
||||||
// get at most the number of bytes free in the bounded array
|
|
||||||
const bytes_read = try self.read(bounded.unusedCapacitySlice());
|
|
||||||
if (bytes_read == 0) return;
|
|
||||||
|
|
||||||
// bytes_read will never be larger than @TypeOf(bounded.len)
|
|
||||||
// due to `self.read` being bounded by `bounded.unusedCapacitySlice()`
|
|
||||||
bounded.len += @as(@TypeOf(bounded.len), @intCast(bytes_read));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Reads at most `num_bytes` and returns as a bounded array.
|
|
||||||
pub fn readBoundedBytes(self: Self, comptime num_bytes: usize) anyerror!std.BoundedArray(u8, num_bytes) {
|
|
||||||
var result = std.BoundedArray(u8, num_bytes){};
|
|
||||||
try self.readIntoBoundedBytes(num_bytes, &result);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub inline fn readInt(self: Self, comptime T: type, endian: std.builtin.Endian) anyerror!T {
|
pub inline fn readInt(self: Self, comptime T: type, endian: std.builtin.Endian) anyerror!T {
|
||||||
const bytes = try self.readBytesNoEof(@divExact(@typeInfo(T).int.bits, 8));
|
const bytes = try self.readBytesNoEof(@divExact(@typeInfo(T).int.bits, 8));
|
||||||
return mem.readInt(T, &bytes, endian);
|
return mem.readInt(T, &bytes, endian);
|
||||||
|
|||||||
@ -349,24 +349,3 @@ test "streamUntilDelimiter writes all bytes without delimiter to the output" {
|
|||||||
|
|
||||||
try std.testing.expectError(error.StreamTooLong, reader.streamUntilDelimiter(writer, '!', 5));
|
try std.testing.expectError(error.StreamTooLong, reader.streamUntilDelimiter(writer, '!', 5));
|
||||||
}
|
}
|
||||||
|
|
||||||
test "readBoundedBytes correctly reads into a new bounded array" {
|
|
||||||
const test_string = "abcdefg";
|
|
||||||
var fis = std.io.fixedBufferStream(test_string);
|
|
||||||
const reader = fis.reader();
|
|
||||||
|
|
||||||
var array = try reader.readBoundedBytes(10000);
|
|
||||||
try testing.expectEqualStrings(array.slice(), test_string);
|
|
||||||
}
|
|
||||||
|
|
||||||
test "readIntoBoundedBytes correctly reads into a provided bounded array" {
|
|
||||||
const test_string = "abcdefg";
|
|
||||||
var fis = std.io.fixedBufferStream(test_string);
|
|
||||||
const reader = fis.reader();
|
|
||||||
|
|
||||||
var bounded_array = std.BoundedArray(u8, 10000){};
|
|
||||||
|
|
||||||
// compile time error if the size is not the same at the provided `bounded.capacity()`
|
|
||||||
try reader.readIntoBoundedBytes(10000, &bounded_array);
|
|
||||||
try testing.expectEqualStrings(bounded_array.slice(), test_string);
|
|
||||||
}
|
|
||||||
|
|||||||
@ -1006,7 +1006,7 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
const src = pipe_buf[m.remaining_read_trash_bytes..n];
|
const src = pipe_buf[m.remaining_read_trash_bytes..n];
|
||||||
std.mem.copyForwards(u8, &pipe_buf, src);
|
@memmove(pipe_buf[0..src.len], src);
|
||||||
m.remaining_read_trash_bytes = 0;
|
m.remaining_read_trash_bytes = 0;
|
||||||
bytes_read = src.len;
|
bytes_read = src.len;
|
||||||
continue;
|
continue;
|
||||||
|
|||||||
@ -1,230 +0,0 @@
|
|||||||
//! This ring buffer stores read and write indices while being able to utilise
|
|
||||||
//! the full backing slice by incrementing the indices modulo twice the slice's
|
|
||||||
//! length and reducing indices modulo the slice's length on slice access. This
|
|
||||||
//! means that whether the ring buffer is full or empty can be distinguished by
|
|
||||||
//! looking at the difference between the read and write indices without adding
|
|
||||||
//! an extra boolean flag or having to reserve a slot in the buffer.
|
|
||||||
//!
|
|
||||||
//! This ring buffer has not been implemented with thread safety in mind, and
|
|
||||||
//! therefore should not be assumed to be suitable for use cases involving
|
|
||||||
//! separate reader and writer threads.
|
|
||||||
|
|
||||||
const Allocator = @import("std").mem.Allocator;
|
|
||||||
const assert = @import("std").debug.assert;
|
|
||||||
const copyForwards = @import("std").mem.copyForwards;
|
|
||||||
|
|
||||||
const RingBuffer = @This();
|
|
||||||
|
|
||||||
data: []u8,
|
|
||||||
read_index: usize,
|
|
||||||
write_index: usize,
|
|
||||||
|
|
||||||
pub const Error = error{ Full, ReadLengthInvalid };
|
|
||||||
|
|
||||||
/// Allocate a new `RingBuffer`; `deinit()` should be called to free the buffer.
|
|
||||||
pub fn init(allocator: Allocator, capacity: usize) Allocator.Error!RingBuffer {
|
|
||||||
const bytes = try allocator.alloc(u8, capacity);
|
|
||||||
return RingBuffer{
|
|
||||||
.data = bytes,
|
|
||||||
.write_index = 0,
|
|
||||||
.read_index = 0,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Free the data backing a `RingBuffer`; must be passed the same `Allocator` as
|
|
||||||
/// `init()`.
|
|
||||||
pub fn deinit(self: *RingBuffer, allocator: Allocator) void {
|
|
||||||
allocator.free(self.data);
|
|
||||||
self.* = undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns `index` modulo the length of the backing slice.
|
|
||||||
pub fn mask(self: RingBuffer, index: usize) usize {
|
|
||||||
return index % self.data.len;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns `index` modulo twice the length of the backing slice.
|
|
||||||
pub fn mask2(self: RingBuffer, index: usize) usize {
|
|
||||||
return index % (2 * self.data.len);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write `byte` into the ring buffer. Returns `error.Full` if the ring
|
|
||||||
/// buffer is full.
|
|
||||||
pub fn write(self: *RingBuffer, byte: u8) Error!void {
|
|
||||||
if (self.isFull()) return error.Full;
|
|
||||||
self.writeAssumeCapacity(byte);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write `byte` into the ring buffer. If the ring buffer is full, the
|
|
||||||
/// oldest byte is overwritten.
|
|
||||||
pub fn writeAssumeCapacity(self: *RingBuffer, byte: u8) void {
|
|
||||||
self.data[self.mask(self.write_index)] = byte;
|
|
||||||
self.write_index = self.mask2(self.write_index + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write `bytes` into the ring buffer. Returns `error.Full` if the ring
|
|
||||||
/// buffer does not have enough space, without writing any data.
|
|
||||||
/// Uses memcpy and so `bytes` must not overlap ring buffer data.
|
|
||||||
pub fn writeSlice(self: *RingBuffer, bytes: []const u8) Error!void {
|
|
||||||
if (self.len() + bytes.len > self.data.len) return error.Full;
|
|
||||||
self.writeSliceAssumeCapacity(bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write `bytes` into the ring buffer. If there is not enough space, older
|
|
||||||
/// bytes will be overwritten.
|
|
||||||
/// Uses memcpy and so `bytes` must not overlap ring buffer data.
|
|
||||||
pub fn writeSliceAssumeCapacity(self: *RingBuffer, bytes: []const u8) void {
|
|
||||||
assert(bytes.len <= self.data.len);
|
|
||||||
const data_start = self.mask(self.write_index);
|
|
||||||
const part1_data_end = @min(data_start + bytes.len, self.data.len);
|
|
||||||
const part1_len = part1_data_end - data_start;
|
|
||||||
@memcpy(self.data[data_start..part1_data_end], bytes[0..part1_len]);
|
|
||||||
|
|
||||||
const remaining = bytes.len - part1_len;
|
|
||||||
const to_write = @min(remaining, remaining % self.data.len + self.data.len);
|
|
||||||
const part2_bytes_start = bytes.len - to_write;
|
|
||||||
const part2_bytes_end = @min(part2_bytes_start + self.data.len, bytes.len);
|
|
||||||
const part2_len = part2_bytes_end - part2_bytes_start;
|
|
||||||
@memcpy(self.data[0..part2_len], bytes[part2_bytes_start..part2_bytes_end]);
|
|
||||||
if (part2_bytes_end != bytes.len) {
|
|
||||||
const part3_len = bytes.len - part2_bytes_end;
|
|
||||||
@memcpy(self.data[0..part3_len], bytes[part2_bytes_end..bytes.len]);
|
|
||||||
}
|
|
||||||
self.write_index = self.mask2(self.write_index + bytes.len);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write `bytes` into the ring buffer. Returns `error.Full` if the ring
|
|
||||||
/// buffer does not have enough space, without writing any data.
|
|
||||||
/// Uses copyForwards and can write slices from this RingBuffer into itself.
|
|
||||||
pub fn writeSliceForwards(self: *RingBuffer, bytes: []const u8) Error!void {
|
|
||||||
if (self.len() + bytes.len > self.data.len) return error.Full;
|
|
||||||
self.writeSliceForwardsAssumeCapacity(bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write `bytes` into the ring buffer. If there is not enough space, older
|
|
||||||
/// bytes will be overwritten.
|
|
||||||
/// Uses copyForwards and can write slices from this RingBuffer into itself.
|
|
||||||
pub fn writeSliceForwardsAssumeCapacity(self: *RingBuffer, bytes: []const u8) void {
|
|
||||||
assert(bytes.len <= self.data.len);
|
|
||||||
const data_start = self.mask(self.write_index);
|
|
||||||
const part1_data_end = @min(data_start + bytes.len, self.data.len);
|
|
||||||
const part1_len = part1_data_end - data_start;
|
|
||||||
copyForwards(u8, self.data[data_start..], bytes[0..part1_len]);
|
|
||||||
|
|
||||||
const remaining = bytes.len - part1_len;
|
|
||||||
const to_write = @min(remaining, remaining % self.data.len + self.data.len);
|
|
||||||
const part2_bytes_start = bytes.len - to_write;
|
|
||||||
const part2_bytes_end = @min(part2_bytes_start + self.data.len, bytes.len);
|
|
||||||
copyForwards(u8, self.data[0..], bytes[part2_bytes_start..part2_bytes_end]);
|
|
||||||
if (part2_bytes_end != bytes.len)
|
|
||||||
copyForwards(u8, self.data[0..], bytes[part2_bytes_end..bytes.len]);
|
|
||||||
self.write_index = self.mask2(self.write_index + bytes.len);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Consume a byte from the ring buffer and return it. Returns `null` if the
|
|
||||||
/// ring buffer is empty.
|
|
||||||
pub fn read(self: *RingBuffer) ?u8 {
|
|
||||||
if (self.isEmpty()) return null;
|
|
||||||
return self.readAssumeLength();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Consume a byte from the ring buffer and return it; asserts that the buffer
|
|
||||||
/// is not empty.
|
|
||||||
pub fn readAssumeLength(self: *RingBuffer) u8 {
|
|
||||||
assert(!self.isEmpty());
|
|
||||||
const byte = self.data[self.mask(self.read_index)];
|
|
||||||
self.read_index = self.mask2(self.read_index + 1);
|
|
||||||
return byte;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Reads first `length` bytes written to the ring buffer into `dest`; Returns
|
|
||||||
/// Error.ReadLengthInvalid if length greater than ring or dest length
|
|
||||||
/// Uses memcpy and so `dest` must not overlap ring buffer data.
|
|
||||||
pub fn readFirst(self: *RingBuffer, dest: []u8, length: usize) Error!void {
|
|
||||||
if (length > self.len() or length > dest.len) return error.ReadLengthInvalid;
|
|
||||||
self.readFirstAssumeLength(dest, length);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Reads first `length` bytes written to the ring buffer into `dest`;
|
|
||||||
/// Asserts that length not greater than ring buffer or dest length
|
|
||||||
/// Uses memcpy and so `dest` must not overlap ring buffer data.
|
|
||||||
pub fn readFirstAssumeLength(self: *RingBuffer, dest: []u8, length: usize) void {
|
|
||||||
assert(length <= self.len() and length <= dest.len);
|
|
||||||
const slice = self.sliceAt(self.read_index, length);
|
|
||||||
slice.copyTo(dest);
|
|
||||||
self.read_index = self.mask2(self.read_index + length);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Reads last `length` bytes written to the ring buffer into `dest`; Returns
|
|
||||||
/// Error.ReadLengthInvalid if length greater than ring or dest length
|
|
||||||
/// Uses memcpy and so `dest` must not overlap ring buffer data.
|
|
||||||
/// Reduces write index by `length`.
|
|
||||||
pub fn readLast(self: *RingBuffer, dest: []u8, length: usize) Error!void {
|
|
||||||
if (length > self.len() or length > dest.len) return error.ReadLengthInvalid;
|
|
||||||
self.readLastAssumeLength(dest, length);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Reads last `length` bytes written to the ring buffer into `dest`;
|
|
||||||
/// Asserts that length not greater than ring buffer or dest length
|
|
||||||
/// Uses memcpy and so `dest` must not overlap ring buffer data.
|
|
||||||
/// Reduces write index by `length`.
|
|
||||||
pub fn readLastAssumeLength(self: *RingBuffer, dest: []u8, length: usize) void {
|
|
||||||
assert(length <= self.len() and length <= dest.len);
|
|
||||||
const slice = self.sliceLast(length);
|
|
||||||
slice.copyTo(dest);
|
|
||||||
self.write_index = if (self.write_index >= self.data.len)
|
|
||||||
self.write_index - length
|
|
||||||
else
|
|
||||||
self.mask(self.write_index + self.data.len - length);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns `true` if the ring buffer is empty and `false` otherwise.
|
|
||||||
pub fn isEmpty(self: RingBuffer) bool {
|
|
||||||
return self.write_index == self.read_index;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns `true` if the ring buffer is full and `false` otherwise.
|
|
||||||
pub fn isFull(self: RingBuffer) bool {
|
|
||||||
return self.mask2(self.write_index + self.data.len) == self.read_index;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the length of data available for reading
|
|
||||||
pub fn len(self: RingBuffer) usize {
|
|
||||||
const wrap_offset = 2 * self.data.len * @intFromBool(self.write_index < self.read_index);
|
|
||||||
const adjusted_write_index = self.write_index + wrap_offset;
|
|
||||||
return adjusted_write_index - self.read_index;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A `Slice` represents a region of a ring buffer. The region is split into two
|
|
||||||
/// sections as the ring buffer data will not be contiguous if the desired
|
|
||||||
/// region wraps to the start of the backing slice.
|
|
||||||
pub const Slice = struct {
|
|
||||||
first: []u8,
|
|
||||||
second: []u8,
|
|
||||||
|
|
||||||
/// Copy data from `self` into `dest`
|
|
||||||
pub fn copyTo(self: Slice, dest: []u8) void {
|
|
||||||
@memcpy(dest[0..self.first.len], self.first);
|
|
||||||
@memcpy(dest[self.first.len..][0..self.second.len], self.second);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Returns a `Slice` for the region of the ring buffer starting at
|
|
||||||
/// `self.mask(start_unmasked)` with the specified length.
|
|
||||||
pub fn sliceAt(self: RingBuffer, start_unmasked: usize, length: usize) Slice {
|
|
||||||
assert(length <= self.data.len);
|
|
||||||
const slice1_start = self.mask(start_unmasked);
|
|
||||||
const slice1_end = @min(self.data.len, slice1_start + length);
|
|
||||||
const slice1 = self.data[slice1_start..slice1_end];
|
|
||||||
const slice2 = self.data[0 .. length - slice1.len];
|
|
||||||
return Slice{
|
|
||||||
.first = slice1,
|
|
||||||
.second = slice2,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a `Slice` for the last `length` bytes written to the ring buffer.
|
|
||||||
/// Does not check that any bytes have been written into the region.
|
|
||||||
pub fn sliceLast(self: RingBuffer, length: usize) Slice {
|
|
||||||
return self.sliceAt(self.write_index + self.data.len - length, length);
|
|
||||||
}
|
|
||||||
@ -158,7 +158,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) ty
|
|||||||
assert(self.items.len < self.capacity);
|
assert(self.items.len < self.capacity);
|
||||||
self.items.len += 1;
|
self.items.len += 1;
|
||||||
|
|
||||||
mem.copyBackwards(T, self.items[i + 1 .. self.items.len], self.items[i .. self.items.len - 1]);
|
@memmove(self.items[i + 1 .. self.items.len], self.items[i .. self.items.len - 1]);
|
||||||
self.items[i] = item;
|
self.items[i] = item;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -216,7 +216,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) ty
|
|||||||
assert(self.capacity >= new_len);
|
assert(self.capacity >= new_len);
|
||||||
const to_move = self.items[index..];
|
const to_move = self.items[index..];
|
||||||
self.items.len = new_len;
|
self.items.len = new_len;
|
||||||
mem.copyBackwards(T, self.items[index + count ..], to_move);
|
@memmove(self.items[index + count ..][0..to_move.len], to_move);
|
||||||
const result = self.items[index..][0..count];
|
const result = self.items[index..][0..count];
|
||||||
@memset(result, undefined);
|
@memset(result, undefined);
|
||||||
return result;
|
return result;
|
||||||
@ -657,6 +657,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
|
|
||||||
/// Initialize with externally-managed memory. The buffer determines the
|
/// Initialize with externally-managed memory. The buffer determines the
|
||||||
/// capacity, and the length is set to zero.
|
/// capacity, and the length is set to zero.
|
||||||
|
///
|
||||||
/// When initialized this way, all functions that accept an Allocator
|
/// When initialized this way, all functions that accept an Allocator
|
||||||
/// argument cause illegal behavior.
|
/// argument cause illegal behavior.
|
||||||
pub fn initBuffer(buffer: Slice) Self {
|
pub fn initBuffer(buffer: Slice) Self {
|
||||||
@ -738,18 +739,37 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Insert `item` at index `i`. Moves `list[i .. list.len]` to higher indices to make room.
|
/// Insert `item` at index `i`. Moves `list[i .. list.len]` to higher indices to make room.
|
||||||
/// If in` is equal to the length of the list this operation is equivalent to append.
|
///
|
||||||
|
/// If `i` is equal to the length of the list this operation is equivalent to append.
|
||||||
|
///
|
||||||
/// This operation is O(N).
|
/// This operation is O(N).
|
||||||
|
///
|
||||||
/// Asserts that the list has capacity for one additional item.
|
/// Asserts that the list has capacity for one additional item.
|
||||||
|
///
|
||||||
/// Asserts that the index is in bounds or equal to the length.
|
/// Asserts that the index is in bounds or equal to the length.
|
||||||
pub fn insertAssumeCapacity(self: *Self, i: usize, item: T) void {
|
pub fn insertAssumeCapacity(self: *Self, i: usize, item: T) void {
|
||||||
assert(self.items.len < self.capacity);
|
assert(self.items.len < self.capacity);
|
||||||
self.items.len += 1;
|
self.items.len += 1;
|
||||||
|
|
||||||
mem.copyBackwards(T, self.items[i + 1 .. self.items.len], self.items[i .. self.items.len - 1]);
|
@memmove(self.items[i + 1 .. self.items.len], self.items[i .. self.items.len - 1]);
|
||||||
self.items[i] = item;
|
self.items[i] = item;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Insert `item` at index `i`, moving `list[i .. list.len]` to higher indices to make room.
|
||||||
|
///
|
||||||
|
/// If `i` is equal to the length of the list this operation is equivalent to append.
|
||||||
|
///
|
||||||
|
/// This operation is O(N).
|
||||||
|
///
|
||||||
|
/// If the list lacks unused capacity for the additional item, returns
|
||||||
|
/// `error.OutOfMemory`.
|
||||||
|
///
|
||||||
|
/// Asserts that the index is in bounds or equal to the length.
|
||||||
|
pub fn insertBounded(self: *Self, i: usize, item: T) error{OutOfMemory}!void {
|
||||||
|
if (self.capacity - self.items.len == 0) return error.OutOfMemory;
|
||||||
|
return insertAssumeCapacity(self, i, item);
|
||||||
|
}
|
||||||
|
|
||||||
/// Add `count` new elements at position `index`, which have
|
/// Add `count` new elements at position `index`, which have
|
||||||
/// `undefined` values. Returns a slice pointing to the newly allocated
|
/// `undefined` values. Returns a slice pointing to the newly allocated
|
||||||
/// elements, which becomes invalid after various `ArrayList`
|
/// elements, which becomes invalid after various `ArrayList`
|
||||||
@ -782,12 +802,29 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
assert(self.capacity >= new_len);
|
assert(self.capacity >= new_len);
|
||||||
const to_move = self.items[index..];
|
const to_move = self.items[index..];
|
||||||
self.items.len = new_len;
|
self.items.len = new_len;
|
||||||
mem.copyBackwards(T, self.items[index + count ..], to_move);
|
@memmove(self.items[index + count ..][0..to_move.len], to_move);
|
||||||
const result = self.items[index..][0..count];
|
const result = self.items[index..][0..count];
|
||||||
@memset(result, undefined);
|
@memset(result, undefined);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Add `count` new elements at position `index`, which have
|
||||||
|
/// `undefined` values, returning a slice pointing to the newly
|
||||||
|
/// allocated elements, which becomes invalid after various `ArrayList`
|
||||||
|
/// operations.
|
||||||
|
///
|
||||||
|
/// Invalidates pre-existing pointers to elements at and after `index`, but
|
||||||
|
/// does not invalidate any before that.
|
||||||
|
///
|
||||||
|
/// If the list lacks unused capacity for the additional items, returns
|
||||||
|
/// `error.OutOfMemory`.
|
||||||
|
///
|
||||||
|
/// Asserts that the index is in bounds or equal to the length.
|
||||||
|
pub fn addManyAtBounded(self: *Self, index: usize, count: usize) error{OutOfMemory}![]T {
|
||||||
|
if (self.capacity - self.items.len < count) return error.OutOfMemory;
|
||||||
|
return addManyAtAssumeCapacity(self, index, count);
|
||||||
|
}
|
||||||
|
|
||||||
/// Insert slice `items` at index `i` by moving `list[i .. list.len]` to make room.
|
/// Insert slice `items` at index `i` by moving `list[i .. list.len]` to make room.
|
||||||
/// This operation is O(N).
|
/// This operation is O(N).
|
||||||
/// Invalidates pre-existing pointers to elements at and after `index`.
|
/// Invalidates pre-existing pointers to elements at and after `index`.
|
||||||
@ -831,7 +868,9 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Grows or shrinks the list as necessary.
|
/// Grows or shrinks the list as necessary.
|
||||||
|
///
|
||||||
/// Never invalidates element pointers.
|
/// Never invalidates element pointers.
|
||||||
|
///
|
||||||
/// Asserts the capacity is enough for additional items.
|
/// Asserts the capacity is enough for additional items.
|
||||||
pub fn replaceRangeAssumeCapacity(self: *Self, start: usize, len: usize, new_items: []const T) void {
|
pub fn replaceRangeAssumeCapacity(self: *Self, start: usize, len: usize, new_items: []const T) void {
|
||||||
const after_range = start + len;
|
const after_range = start + len;
|
||||||
@ -848,16 +887,24 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
} else {
|
} else {
|
||||||
const extra = range.len - new_items.len;
|
const extra = range.len - new_items.len;
|
||||||
@memcpy(range[0..new_items.len], new_items);
|
@memcpy(range[0..new_items.len], new_items);
|
||||||
std.mem.copyForwards(
|
const src = self.items[after_range..];
|
||||||
T,
|
@memmove(self.items[after_range - extra ..][0..src.len], src);
|
||||||
self.items[after_range - extra ..],
|
|
||||||
self.items[after_range..],
|
|
||||||
);
|
|
||||||
@memset(self.items[self.items.len - extra ..], undefined);
|
@memset(self.items[self.items.len - extra ..], undefined);
|
||||||
self.items.len -= extra;
|
self.items.len -= extra;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Grows or shrinks the list as necessary.
|
||||||
|
///
|
||||||
|
/// Never invalidates element pointers.
|
||||||
|
///
|
||||||
|
/// If the unused capacity is insufficient for additional items,
|
||||||
|
/// returns `error.OutOfMemory`.
|
||||||
|
pub fn replaceRangeBounded(self: *Self, start: usize, len: usize, new_items: []const T) error{OutOfMemory}!void {
|
||||||
|
if (self.capacity - self.items.len < new_items.len -| len) return error.OutOfMemory;
|
||||||
|
return replaceRangeAssumeCapacity(self, start, len, new_items);
|
||||||
|
}
|
||||||
|
|
||||||
/// Extend the list by 1 element. Allocates more memory as necessary.
|
/// Extend the list by 1 element. Allocates more memory as necessary.
|
||||||
/// Invalidates element pointers if additional memory is needed.
|
/// Invalidates element pointers if additional memory is needed.
|
||||||
pub fn append(self: *Self, gpa: Allocator, item: T) Allocator.Error!void {
|
pub fn append(self: *Self, gpa: Allocator, item: T) Allocator.Error!void {
|
||||||
@ -866,12 +913,25 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Extend the list by 1 element.
|
/// Extend the list by 1 element.
|
||||||
|
///
|
||||||
/// Never invalidates element pointers.
|
/// Never invalidates element pointers.
|
||||||
|
///
|
||||||
/// Asserts that the list can hold one additional item.
|
/// Asserts that the list can hold one additional item.
|
||||||
pub fn appendAssumeCapacity(self: *Self, item: T) void {
|
pub fn appendAssumeCapacity(self: *Self, item: T) void {
|
||||||
self.addOneAssumeCapacity().* = item;
|
self.addOneAssumeCapacity().* = item;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Extend the list by 1 element.
|
||||||
|
///
|
||||||
|
/// Never invalidates element pointers.
|
||||||
|
///
|
||||||
|
/// If the list lacks unused capacity for the additional item, returns
|
||||||
|
/// `error.OutOfMemory`.
|
||||||
|
pub fn appendBounded(self: *Self, item: T) error{OutOfMemory}!void {
|
||||||
|
if (self.capacity - self.items.len == 0) return error.OutOfMemory;
|
||||||
|
return appendAssumeCapacity(self, item);
|
||||||
|
}
|
||||||
|
|
||||||
/// Remove the element at index `i` from the list and return its value.
|
/// Remove the element at index `i` from the list and return its value.
|
||||||
/// Invalidates pointers to the last element.
|
/// Invalidates pointers to the last element.
|
||||||
/// This operation is O(N).
|
/// This operation is O(N).
|
||||||
@ -906,6 +966,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Append the slice of items to the list.
|
/// Append the slice of items to the list.
|
||||||
|
///
|
||||||
/// Asserts that the list can hold the additional items.
|
/// Asserts that the list can hold the additional items.
|
||||||
pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void {
|
pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void {
|
||||||
const old_len = self.items.len;
|
const old_len = self.items.len;
|
||||||
@ -915,6 +976,14 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
@memcpy(self.items[old_len..][0..items.len], items);
|
@memcpy(self.items[old_len..][0..items.len], items);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Append the slice of items to the list.
|
||||||
|
///
|
||||||
|
/// If the list lacks unused capacity for the additional items, returns `error.OutOfMemory`.
|
||||||
|
pub fn appendSliceBounded(self: *Self, items: []const T) error{OutOfMemory}!void {
|
||||||
|
if (self.capacity - self.items.len < items.len) return error.OutOfMemory;
|
||||||
|
return appendSliceAssumeCapacity(self, items);
|
||||||
|
}
|
||||||
|
|
||||||
/// Append the slice of items to the list. Allocates more
|
/// Append the slice of items to the list. Allocates more
|
||||||
/// memory as necessary. Only call this function if a call to `appendSlice` instead would
|
/// memory as necessary. Only call this function if a call to `appendSlice` instead would
|
||||||
/// be a compile error.
|
/// be a compile error.
|
||||||
@ -925,8 +994,10 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Append an unaligned slice of items to the list.
|
/// Append an unaligned slice of items to the list.
|
||||||
/// Only call this function if a call to `appendSliceAssumeCapacity`
|
///
|
||||||
/// instead would be a compile error.
|
/// Intended to be used only when `appendSliceAssumeCapacity` would be
|
||||||
|
/// a compile error.
|
||||||
|
///
|
||||||
/// Asserts that the list can hold the additional items.
|
/// Asserts that the list can hold the additional items.
|
||||||
pub fn appendUnalignedSliceAssumeCapacity(self: *Self, items: []align(1) const T) void {
|
pub fn appendUnalignedSliceAssumeCapacity(self: *Self, items: []align(1) const T) void {
|
||||||
const old_len = self.items.len;
|
const old_len = self.items.len;
|
||||||
@ -936,6 +1007,18 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
@memcpy(self.items[old_len..][0..items.len], items);
|
@memcpy(self.items[old_len..][0..items.len], items);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Append an unaligned slice of items to the list.
|
||||||
|
///
|
||||||
|
/// Intended to be used only when `appendSliceAssumeCapacity` would be
|
||||||
|
/// a compile error.
|
||||||
|
///
|
||||||
|
/// If the list lacks unused capacity for the additional items, returns
|
||||||
|
/// `error.OutOfMemory`.
|
||||||
|
pub fn appendUnalignedSliceBounded(self: *Self, items: []align(1) const T) error{OutOfMemory}!void {
|
||||||
|
if (self.capacity - self.items.len < items.len) return error.OutOfMemory;
|
||||||
|
return appendUnalignedSliceAssumeCapacity(self, items);
|
||||||
|
}
|
||||||
|
|
||||||
pub fn print(self: *Self, gpa: Allocator, comptime fmt: []const u8, args: anytype) error{OutOfMemory}!void {
|
pub fn print(self: *Self, gpa: Allocator, comptime fmt: []const u8, args: anytype) error{OutOfMemory}!void {
|
||||||
comptime assert(T == u8);
|
comptime assert(T == u8);
|
||||||
try self.ensureUnusedCapacity(gpa, fmt.len);
|
try self.ensureUnusedCapacity(gpa, fmt.len);
|
||||||
@ -953,6 +1036,13 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
self.items.len += w.end;
|
self.items.len += w.end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn printBounded(self: *Self, comptime fmt: []const u8, args: anytype) error{OutOfMemory}!void {
|
||||||
|
comptime assert(T == u8);
|
||||||
|
var w: std.io.Writer = .fixed(self.unusedCapacitySlice());
|
||||||
|
w.print(fmt, args) catch return error.OutOfMemory;
|
||||||
|
self.items.len += w.end;
|
||||||
|
}
|
||||||
|
|
||||||
/// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
|
/// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
|
||||||
pub const WriterContext = struct {
|
pub const WriterContext = struct {
|
||||||
self: *Self,
|
self: *Self,
|
||||||
@ -1007,9 +1097,12 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Append a value to the list `n` times.
|
/// Append a value to the list `n` times.
|
||||||
|
///
|
||||||
/// Never invalidates element pointers.
|
/// Never invalidates element pointers.
|
||||||
|
///
|
||||||
/// The function is inline so that a comptime-known `value` parameter will
|
/// The function is inline so that a comptime-known `value` parameter will
|
||||||
/// have better memset codegen in case it has a repeated byte pattern.
|
/// have better memset codegen in case it has a repeated byte pattern.
|
||||||
|
///
|
||||||
/// Asserts that the list can hold the additional items.
|
/// Asserts that the list can hold the additional items.
|
||||||
pub inline fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
|
pub inline fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
|
||||||
const new_len = self.items.len + n;
|
const new_len = self.items.len + n;
|
||||||
@ -1018,6 +1111,22 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
self.items.len = new_len;
|
self.items.len = new_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Append a value to the list `n` times.
|
||||||
|
///
|
||||||
|
/// Never invalidates element pointers.
|
||||||
|
///
|
||||||
|
/// The function is inline so that a comptime-known `value` parameter will
|
||||||
|
/// have better memset codegen in case it has a repeated byte pattern.
|
||||||
|
///
|
||||||
|
/// If the list lacks unused capacity for the additional items, returns
|
||||||
|
/// `error.OutOfMemory`.
|
||||||
|
pub inline fn appendNTimesBounded(self: *Self, value: T, n: usize) error{OutOfMemory}!void {
|
||||||
|
const new_len = self.items.len + n;
|
||||||
|
if (self.capacity < new_len) return error.OutOfMemory;
|
||||||
|
@memset(self.items.ptr[self.items.len..new_len], value);
|
||||||
|
self.items.len = new_len;
|
||||||
|
}
|
||||||
|
|
||||||
/// Adjust the list length to `new_len`.
|
/// Adjust the list length to `new_len`.
|
||||||
/// Additional elements contain the value `undefined`.
|
/// Additional elements contain the value `undefined`.
|
||||||
/// Invalidates element pointers if additional memory is needed.
|
/// Invalidates element pointers if additional memory is needed.
|
||||||
@ -1143,8 +1252,11 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Increase length by 1, returning pointer to the new item.
|
/// Increase length by 1, returning pointer to the new item.
|
||||||
|
///
|
||||||
/// Never invalidates element pointers.
|
/// Never invalidates element pointers.
|
||||||
|
///
|
||||||
/// The returned element pointer becomes invalid when the list is resized.
|
/// The returned element pointer becomes invalid when the list is resized.
|
||||||
|
///
|
||||||
/// Asserts that the list can hold one additional item.
|
/// Asserts that the list can hold one additional item.
|
||||||
pub fn addOneAssumeCapacity(self: *Self) *T {
|
pub fn addOneAssumeCapacity(self: *Self) *T {
|
||||||
assert(self.items.len < self.capacity);
|
assert(self.items.len < self.capacity);
|
||||||
@ -1153,6 +1265,18 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
return &self.items[self.items.len - 1];
|
return &self.items[self.items.len - 1];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Increase length by 1, returning pointer to the new item.
|
||||||
|
///
|
||||||
|
/// Never invalidates element pointers.
|
||||||
|
///
|
||||||
|
/// The returned element pointer becomes invalid when the list is resized.
|
||||||
|
///
|
||||||
|
/// If the list lacks unused capacity for the additional item, returns `error.OutOfMemory`.
|
||||||
|
pub fn addOneBounded(self: *Self) error{OutOfMemory}!*T {
|
||||||
|
if (self.capacity - self.items.len < 1) return error.OutOfMemory;
|
||||||
|
return addOneAssumeCapacity(self);
|
||||||
|
}
|
||||||
|
|
||||||
/// Resize the array, adding `n` new elements, which have `undefined` values.
|
/// Resize the array, adding `n` new elements, which have `undefined` values.
|
||||||
/// The return value is an array pointing to the newly allocated elements.
|
/// The return value is an array pointing to the newly allocated elements.
|
||||||
/// The returned pointer becomes invalid when the list is resized.
|
/// The returned pointer becomes invalid when the list is resized.
|
||||||
@ -1163,9 +1287,13 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Resize the array, adding `n` new elements, which have `undefined` values.
|
/// Resize the array, adding `n` new elements, which have `undefined` values.
|
||||||
|
///
|
||||||
/// The return value is an array pointing to the newly allocated elements.
|
/// The return value is an array pointing to the newly allocated elements.
|
||||||
|
///
|
||||||
/// Never invalidates element pointers.
|
/// Never invalidates element pointers.
|
||||||
|
///
|
||||||
/// The returned pointer becomes invalid when the list is resized.
|
/// The returned pointer becomes invalid when the list is resized.
|
||||||
|
///
|
||||||
/// Asserts that the list can hold the additional items.
|
/// Asserts that the list can hold the additional items.
|
||||||
pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]T {
|
pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]T {
|
||||||
assert(self.items.len + n <= self.capacity);
|
assert(self.items.len + n <= self.capacity);
|
||||||
@ -1174,6 +1302,21 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
return self.items[prev_len..][0..n];
|
return self.items[prev_len..][0..n];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Resize the array, adding `n` new elements, which have `undefined` values.
|
||||||
|
///
|
||||||
|
/// The return value is an array pointing to the newly allocated elements.
|
||||||
|
///
|
||||||
|
/// Never invalidates element pointers.
|
||||||
|
///
|
||||||
|
/// The returned pointer becomes invalid when the list is resized.
|
||||||
|
///
|
||||||
|
/// If the list lacks unused capacity for the additional items, returns
|
||||||
|
/// `error.OutOfMemory`.
|
||||||
|
pub fn addManyAsArrayBounded(self: *Self, comptime n: usize) error{OutOfMemory}!*[n]T {
|
||||||
|
if (self.capacity - self.items.len < n) return error.OutOfMemory;
|
||||||
|
return addManyAsArrayAssumeCapacity(self, n);
|
||||||
|
}
|
||||||
|
|
||||||
/// Resize the array, adding `n` new elements, which have `undefined` values.
|
/// Resize the array, adding `n` new elements, which have `undefined` values.
|
||||||
/// The return value is a slice pointing to the newly allocated elements.
|
/// The return value is a slice pointing to the newly allocated elements.
|
||||||
/// The returned pointer becomes invalid when the list is resized.
|
/// The returned pointer becomes invalid when the list is resized.
|
||||||
@ -1184,10 +1327,12 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
return self.items[prev_len..][0..n];
|
return self.items[prev_len..][0..n];
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Resize the array, adding `n` new elements, which have `undefined` values.
|
/// Resizes the array, adding `n` new elements, which have `undefined`
|
||||||
/// The return value is a slice pointing to the newly allocated elements.
|
/// values, returning a slice pointing to the newly allocated elements.
|
||||||
/// Never invalidates element pointers.
|
///
|
||||||
/// The returned pointer becomes invalid when the list is resized.
|
/// Never invalidates element pointers. The returned pointer becomes
|
||||||
|
/// invalid when the list is resized.
|
||||||
|
///
|
||||||
/// Asserts that the list can hold the additional items.
|
/// Asserts that the list can hold the additional items.
|
||||||
pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T {
|
pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T {
|
||||||
assert(self.items.len + n <= self.capacity);
|
assert(self.items.len + n <= self.capacity);
|
||||||
@ -1196,6 +1341,19 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
|
|||||||
return self.items[prev_len..][0..n];
|
return self.items[prev_len..][0..n];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Resizes the array, adding `n` new elements, which have `undefined`
|
||||||
|
/// values, returning a slice pointing to the newly allocated elements.
|
||||||
|
///
|
||||||
|
/// Never invalidates element pointers. The returned pointer becomes
|
||||||
|
/// invalid when the list is resized.
|
||||||
|
///
|
||||||
|
/// If the list lacks unused capacity for the additional items, returns
|
||||||
|
/// `error.OutOfMemory`.
|
||||||
|
pub fn addManyAsSliceBounded(self: *Self, n: usize) error{OutOfMemory}![]T {
|
||||||
|
if (self.capacity - self.items.len < n) return error.OutOfMemory;
|
||||||
|
return addManyAsSliceAssumeCapacity(self, n);
|
||||||
|
}
|
||||||
|
|
||||||
/// Remove and return the last element from the list.
|
/// Remove and return the last element from the list.
|
||||||
/// If the list is empty, returns `null`.
|
/// If the list is empty, returns `null`.
|
||||||
/// Invalidates pointers to last element.
|
/// Invalidates pointers to last element.
|
||||||
|
|||||||
@ -118,22 +118,6 @@ pub const Base64Encoder = struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// destWriter must be compatible with std.io.GenericWriter's writeAll interface
|
|
||||||
// sourceReader must be compatible with `std.io.GenericReader` read interface
|
|
||||||
pub fn encodeFromReaderToWriter(encoder: *const Base64Encoder, destWriter: anytype, sourceReader: anytype) !void {
|
|
||||||
while (true) {
|
|
||||||
var tempSource: [3]u8 = undefined;
|
|
||||||
const bytesRead = try sourceReader.read(&tempSource);
|
|
||||||
if (bytesRead == 0) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
var temp: [5]u8 = undefined;
|
|
||||||
const s = encoder.encode(&temp, tempSource[0..bytesRead]);
|
|
||||||
try destWriter.writeAll(s);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// dest.len must at least be what you get from ::calcSize.
|
/// dest.len must at least be what you get from ::calcSize.
|
||||||
pub fn encode(encoder: *const Base64Encoder, dest: []u8, source: []const u8) []const u8 {
|
pub fn encode(encoder: *const Base64Encoder, dest: []u8, source: []const u8) []const u8 {
|
||||||
const out_len = encoder.calcSize(source.len);
|
const out_len = encoder.calcSize(source.len);
|
||||||
@ -517,17 +501,13 @@ fn testAllApis(codecs: Codecs, expected_decoded: []const u8, expected_encoded: [
|
|||||||
var buffer: [0x100]u8 = undefined;
|
var buffer: [0x100]u8 = undefined;
|
||||||
const encoded = codecs.Encoder.encode(&buffer, expected_decoded);
|
const encoded = codecs.Encoder.encode(&buffer, expected_decoded);
|
||||||
try testing.expectEqualSlices(u8, expected_encoded, encoded);
|
try testing.expectEqualSlices(u8, expected_encoded, encoded);
|
||||||
|
}
|
||||||
|
{
|
||||||
// stream encode
|
// stream encode
|
||||||
var list = try std.BoundedArray(u8, 0x100).init(0);
|
var buffer: [0x100]u8 = undefined;
|
||||||
try codecs.Encoder.encodeWriter(list.writer(), expected_decoded);
|
var writer: std.Io.Writer = .fixed(&buffer);
|
||||||
try testing.expectEqualSlices(u8, expected_encoded, list.slice());
|
try codecs.Encoder.encodeWriter(&writer, expected_decoded);
|
||||||
|
try testing.expectEqualSlices(u8, expected_encoded, writer.buffered());
|
||||||
// reader to writer encode
|
|
||||||
var stream = std.io.fixedBufferStream(expected_decoded);
|
|
||||||
list = try std.BoundedArray(u8, 0x100).init(0);
|
|
||||||
try codecs.Encoder.encodeFromReaderToWriter(list.writer(), stream.reader());
|
|
||||||
try testing.expectEqualSlices(u8, expected_encoded, list.slice());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Base64Decoder
|
// Base64Decoder
|
||||||
|
|||||||
@ -1,412 +0,0 @@
|
|||||||
const std = @import("std.zig");
|
|
||||||
const assert = std.debug.assert;
|
|
||||||
const mem = std.mem;
|
|
||||||
const testing = std.testing;
|
|
||||||
const Alignment = std.mem.Alignment;
|
|
||||||
|
|
||||||
/// A structure with an array and a length, that can be used as a slice.
|
|
||||||
///
|
|
||||||
/// Useful to pass around small arrays whose exact size is only known at
|
|
||||||
/// runtime, but whose maximum size is known at comptime, without requiring
|
|
||||||
/// an `Allocator`.
|
|
||||||
///
|
|
||||||
/// ```zig
|
|
||||||
/// var actual_size = 32;
|
|
||||||
/// var a = try BoundedArray(u8, 64).init(actual_size);
|
|
||||||
/// var slice = a.slice(); // a slice of the 64-byte array
|
|
||||||
/// var a_clone = a; // creates a copy - the structure doesn't use any internal pointers
|
|
||||||
/// ```
|
|
||||||
pub fn BoundedArray(comptime T: type, comptime buffer_capacity: usize) type {
|
|
||||||
return BoundedArrayAligned(T, .of(T), buffer_capacity);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A structure with an array, length and alignment, that can be used as a
|
|
||||||
/// slice.
|
|
||||||
///
|
|
||||||
/// Useful to pass around small explicitly-aligned arrays whose exact size is
|
|
||||||
/// only known at runtime, but whose maximum size is known at comptime, without
|
|
||||||
/// requiring an `Allocator`.
|
|
||||||
/// ```zig
|
|
||||||
// var a = try BoundedArrayAligned(u8, 16, 2).init(0);
|
|
||||||
// try a.append(255);
|
|
||||||
// try a.append(255);
|
|
||||||
// const b = @ptrCast(*const [1]u16, a.constSlice().ptr);
|
|
||||||
// try testing.expectEqual(@as(u16, 65535), b[0]);
|
|
||||||
/// ```
|
|
||||||
pub fn BoundedArrayAligned(
|
|
||||||
comptime T: type,
|
|
||||||
comptime alignment: Alignment,
|
|
||||||
comptime buffer_capacity: usize,
|
|
||||||
) type {
|
|
||||||
return struct {
|
|
||||||
const Self = @This();
|
|
||||||
buffer: [buffer_capacity]T align(alignment.toByteUnits()) = undefined,
|
|
||||||
len: usize = 0,
|
|
||||||
|
|
||||||
/// Set the actual length of the slice.
|
|
||||||
/// Returns error.Overflow if it exceeds the length of the backing array.
|
|
||||||
pub fn init(len: usize) error{Overflow}!Self {
|
|
||||||
if (len > buffer_capacity) return error.Overflow;
|
|
||||||
return Self{ .len = len };
|
|
||||||
}
|
|
||||||
|
|
||||||
/// View the internal array as a slice whose size was previously set.
|
|
||||||
pub fn slice(self: anytype) switch (@TypeOf(&self.buffer)) {
|
|
||||||
*align(alignment.toByteUnits()) [buffer_capacity]T => []align(alignment.toByteUnits()) T,
|
|
||||||
*align(alignment.toByteUnits()) const [buffer_capacity]T => []align(alignment.toByteUnits()) const T,
|
|
||||||
else => unreachable,
|
|
||||||
} {
|
|
||||||
return self.buffer[0..self.len];
|
|
||||||
}
|
|
||||||
|
|
||||||
/// View the internal array as a constant slice whose size was previously set.
|
|
||||||
pub fn constSlice(self: *const Self) []align(alignment.toByteUnits()) const T {
|
|
||||||
return self.slice();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Adjust the slice's length to `len`.
|
|
||||||
/// Does not initialize added items if any.
|
|
||||||
pub fn resize(self: *Self, len: usize) error{Overflow}!void {
|
|
||||||
if (len > buffer_capacity) return error.Overflow;
|
|
||||||
self.len = len;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Remove all elements from the slice.
|
|
||||||
pub fn clear(self: *Self) void {
|
|
||||||
self.len = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Copy the content of an existing slice.
|
|
||||||
pub fn fromSlice(m: []const T) error{Overflow}!Self {
|
|
||||||
var list = try init(m.len);
|
|
||||||
@memcpy(list.slice(), m);
|
|
||||||
return list;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the element at index `i` of the slice.
|
|
||||||
pub fn get(self: Self, i: usize) T {
|
|
||||||
return self.constSlice()[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the value of the element at index `i` of the slice.
|
|
||||||
pub fn set(self: *Self, i: usize, item: T) void {
|
|
||||||
self.slice()[i] = item;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the maximum length of a slice.
|
|
||||||
pub fn capacity(self: Self) usize {
|
|
||||||
return self.buffer.len;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check that the slice can hold at least `additional_count` items.
|
|
||||||
pub fn ensureUnusedCapacity(self: Self, additional_count: usize) error{Overflow}!void {
|
|
||||||
if (self.len + additional_count > buffer_capacity) {
|
|
||||||
return error.Overflow;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Increase length by 1, returning a pointer to the new item.
|
|
||||||
pub fn addOne(self: *Self) error{Overflow}!*T {
|
|
||||||
try self.ensureUnusedCapacity(1);
|
|
||||||
return self.addOneAssumeCapacity();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Increase length by 1, returning pointer to the new item.
|
|
||||||
/// Asserts that there is space for the new item.
|
|
||||||
pub fn addOneAssumeCapacity(self: *Self) *T {
|
|
||||||
assert(self.len < buffer_capacity);
|
|
||||||
self.len += 1;
|
|
||||||
return &self.slice()[self.len - 1];
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Resize the slice, adding `n` new elements, which have `undefined` values.
|
|
||||||
/// The return value is a pointer to the array of uninitialized elements.
|
|
||||||
pub fn addManyAsArray(self: *Self, comptime n: usize) error{Overflow}!*align(alignment.toByteUnits()) [n]T {
|
|
||||||
const prev_len = self.len;
|
|
||||||
try self.resize(self.len + n);
|
|
||||||
return self.slice()[prev_len..][0..n];
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Resize the slice, adding `n` new elements, which have `undefined` values.
|
|
||||||
/// The return value is a slice pointing to the uninitialized elements.
|
|
||||||
pub fn addManyAsSlice(self: *Self, n: usize) error{Overflow}![]align(alignment.toByteUnits()) T {
|
|
||||||
const prev_len = self.len;
|
|
||||||
try self.resize(self.len + n);
|
|
||||||
return self.slice()[prev_len..][0..n];
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Remove and return the last element from the slice, or return `null` if the slice is empty.
|
|
||||||
pub fn pop(self: *Self) ?T {
|
|
||||||
if (self.len == 0) return null;
|
|
||||||
const item = self.get(self.len - 1);
|
|
||||||
self.len -= 1;
|
|
||||||
return item;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return a slice of only the extra capacity after items.
|
|
||||||
/// This can be useful for writing directly into it.
|
|
||||||
/// Note that such an operation must be followed up with a
|
|
||||||
/// call to `resize()`
|
|
||||||
pub fn unusedCapacitySlice(self: *Self) []align(alignment.toByteUnits()) T {
|
|
||||||
return self.buffer[self.len..];
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Insert `item` at index `i` by moving `slice[n .. slice.len]` to make room.
|
|
||||||
/// This operation is O(N).
|
|
||||||
pub fn insert(
|
|
||||||
self: *Self,
|
|
||||||
i: usize,
|
|
||||||
item: T,
|
|
||||||
) error{Overflow}!void {
|
|
||||||
if (i > self.len) {
|
|
||||||
return error.Overflow;
|
|
||||||
}
|
|
||||||
_ = try self.addOne();
|
|
||||||
var s = self.slice();
|
|
||||||
mem.copyBackwards(T, s[i + 1 .. s.len], s[i .. s.len - 1]);
|
|
||||||
self.buffer[i] = item;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Insert slice `items` at index `i` by moving `slice[i .. slice.len]` to make room.
|
|
||||||
/// This operation is O(N).
|
|
||||||
pub fn insertSlice(self: *Self, i: usize, items: []const T) error{Overflow}!void {
|
|
||||||
try self.ensureUnusedCapacity(items.len);
|
|
||||||
self.len += items.len;
|
|
||||||
mem.copyBackwards(T, self.slice()[i + items.len .. self.len], self.constSlice()[i .. self.len - items.len]);
|
|
||||||
@memcpy(self.slice()[i..][0..items.len], items);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Replace range of elements `slice[start..][0..len]` with `new_items`.
|
|
||||||
/// Grows slice if `len < new_items.len`.
|
|
||||||
/// Shrinks slice if `len > new_items.len`.
|
|
||||||
pub fn replaceRange(
|
|
||||||
self: *Self,
|
|
||||||
start: usize,
|
|
||||||
len: usize,
|
|
||||||
new_items: []const T,
|
|
||||||
) error{Overflow}!void {
|
|
||||||
const after_range = start + len;
|
|
||||||
var range = self.slice()[start..after_range];
|
|
||||||
|
|
||||||
if (range.len == new_items.len) {
|
|
||||||
@memcpy(range[0..new_items.len], new_items);
|
|
||||||
} else if (range.len < new_items.len) {
|
|
||||||
const first = new_items[0..range.len];
|
|
||||||
const rest = new_items[range.len..];
|
|
||||||
@memcpy(range[0..first.len], first);
|
|
||||||
try self.insertSlice(after_range, rest);
|
|
||||||
} else {
|
|
||||||
@memcpy(range[0..new_items.len], new_items);
|
|
||||||
const after_subrange = start + new_items.len;
|
|
||||||
for (self.constSlice()[after_range..], 0..) |item, i| {
|
|
||||||
self.slice()[after_subrange..][i] = item;
|
|
||||||
}
|
|
||||||
self.len -= len - new_items.len;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Extend the slice by 1 element.
|
|
||||||
pub fn append(self: *Self, item: T) error{Overflow}!void {
|
|
||||||
const new_item_ptr = try self.addOne();
|
|
||||||
new_item_ptr.* = item;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Extend the slice by 1 element, asserting the capacity is already
|
|
||||||
/// enough to store the new item.
|
|
||||||
pub fn appendAssumeCapacity(self: *Self, item: T) void {
|
|
||||||
const new_item_ptr = self.addOneAssumeCapacity();
|
|
||||||
new_item_ptr.* = item;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Remove the element at index `i`, shift elements after index
|
|
||||||
/// `i` forward, and return the removed element.
|
|
||||||
/// Asserts the slice has at least one item.
|
|
||||||
/// This operation is O(N).
|
|
||||||
pub fn orderedRemove(self: *Self, i: usize) T {
|
|
||||||
const newlen = self.len - 1;
|
|
||||||
if (newlen == i) return self.pop().?;
|
|
||||||
const old_item = self.get(i);
|
|
||||||
for (self.slice()[i..newlen], 0..) |*b, j| b.* = self.get(i + 1 + j);
|
|
||||||
self.set(newlen, undefined);
|
|
||||||
self.len = newlen;
|
|
||||||
return old_item;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Remove the element at the specified index and return it.
|
|
||||||
/// The empty slot is filled from the end of the slice.
|
|
||||||
/// This operation is O(1).
|
|
||||||
pub fn swapRemove(self: *Self, i: usize) T {
|
|
||||||
if (self.len - 1 == i) return self.pop().?;
|
|
||||||
const old_item = self.get(i);
|
|
||||||
self.set(i, self.pop().?);
|
|
||||||
return old_item;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Append the slice of items to the slice.
|
|
||||||
pub fn appendSlice(self: *Self, items: []const T) error{Overflow}!void {
|
|
||||||
try self.ensureUnusedCapacity(items.len);
|
|
||||||
self.appendSliceAssumeCapacity(items);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Append the slice of items to the slice, asserting the capacity is already
|
|
||||||
/// enough to store the new items.
|
|
||||||
pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void {
|
|
||||||
const old_len = self.len;
|
|
||||||
self.len += items.len;
|
|
||||||
@memcpy(self.slice()[old_len..][0..items.len], items);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Append a value to the slice `n` times.
|
|
||||||
/// Allocates more memory as necessary.
|
|
||||||
pub fn appendNTimes(self: *Self, value: T, n: usize) error{Overflow}!void {
|
|
||||||
const old_len = self.len;
|
|
||||||
try self.resize(old_len + n);
|
|
||||||
@memset(self.slice()[old_len..self.len], value);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Append a value to the slice `n` times.
|
|
||||||
/// Asserts the capacity is enough.
|
|
||||||
pub fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
|
|
||||||
const old_len = self.len;
|
|
||||||
self.len += n;
|
|
||||||
assert(self.len <= buffer_capacity);
|
|
||||||
@memset(self.slice()[old_len..self.len], value);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const Writer = if (T != u8)
|
|
||||||
@compileError("The Writer interface is only defined for BoundedArray(u8, ...) " ++
|
|
||||||
"but the given type is BoundedArray(" ++ @typeName(T) ++ ", ...)")
|
|
||||||
else
|
|
||||||
std.io.GenericWriter(*Self, error{Overflow}, appendWrite);
|
|
||||||
|
|
||||||
/// Initializes a writer which will write into the array.
|
|
||||||
pub fn writer(self: *Self) Writer {
|
|
||||||
return .{ .context = self };
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Same as `appendSlice` except it returns the number of bytes written, which is always the same
|
|
||||||
/// as `m.len`. The purpose of this function existing is to match `std.io.GenericWriter` API.
|
|
||||||
fn appendWrite(self: *Self, m: []const u8) error{Overflow}!usize {
|
|
||||||
try self.appendSlice(m);
|
|
||||||
return m.len;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
test BoundedArray {
|
|
||||||
var a = try BoundedArray(u8, 64).init(32);
|
|
||||||
|
|
||||||
try testing.expectEqual(a.capacity(), 64);
|
|
||||||
try testing.expectEqual(a.slice().len, 32);
|
|
||||||
try testing.expectEqual(a.constSlice().len, 32);
|
|
||||||
|
|
||||||
try a.resize(48);
|
|
||||||
try testing.expectEqual(a.len, 48);
|
|
||||||
|
|
||||||
const x = [_]u8{1} ** 10;
|
|
||||||
a = try BoundedArray(u8, 64).fromSlice(&x);
|
|
||||||
try testing.expectEqualSlices(u8, &x, a.constSlice());
|
|
||||||
|
|
||||||
var a2 = a;
|
|
||||||
try testing.expectEqualSlices(u8, a.constSlice(), a2.constSlice());
|
|
||||||
a2.set(0, 0);
|
|
||||||
try testing.expect(a.get(0) != a2.get(0));
|
|
||||||
|
|
||||||
try testing.expectError(error.Overflow, a.resize(100));
|
|
||||||
try testing.expectError(error.Overflow, BoundedArray(u8, x.len - 1).fromSlice(&x));
|
|
||||||
|
|
||||||
try a.resize(0);
|
|
||||||
try a.ensureUnusedCapacity(a.capacity());
|
|
||||||
(try a.addOne()).* = 0;
|
|
||||||
try a.ensureUnusedCapacity(a.capacity() - 1);
|
|
||||||
try testing.expectEqual(a.len, 1);
|
|
||||||
|
|
||||||
const uninitialized = try a.addManyAsArray(4);
|
|
||||||
try testing.expectEqual(uninitialized.len, 4);
|
|
||||||
try testing.expectEqual(a.len, 5);
|
|
||||||
|
|
||||||
try a.append(0xff);
|
|
||||||
try testing.expectEqual(a.len, 6);
|
|
||||||
try testing.expectEqual(a.pop(), 0xff);
|
|
||||||
|
|
||||||
a.appendAssumeCapacity(0xff);
|
|
||||||
try testing.expectEqual(a.len, 6);
|
|
||||||
try testing.expectEqual(a.pop(), 0xff);
|
|
||||||
|
|
||||||
try a.resize(1);
|
|
||||||
try testing.expectEqual(a.pop(), 0);
|
|
||||||
try testing.expectEqual(a.pop(), null);
|
|
||||||
var unused = a.unusedCapacitySlice();
|
|
||||||
@memset(unused[0..8], 2);
|
|
||||||
unused[8] = 3;
|
|
||||||
unused[9] = 4;
|
|
||||||
try testing.expectEqual(unused.len, a.capacity());
|
|
||||||
try a.resize(10);
|
|
||||||
|
|
||||||
try a.insert(5, 0xaa);
|
|
||||||
try testing.expectEqual(a.len, 11);
|
|
||||||
try testing.expectEqual(a.get(5), 0xaa);
|
|
||||||
try testing.expectEqual(a.get(9), 3);
|
|
||||||
try testing.expectEqual(a.get(10), 4);
|
|
||||||
|
|
||||||
try a.insert(11, 0xbb);
|
|
||||||
try testing.expectEqual(a.len, 12);
|
|
||||||
try testing.expectEqual(a.pop(), 0xbb);
|
|
||||||
|
|
||||||
try a.appendSlice(&x);
|
|
||||||
try testing.expectEqual(a.len, 11 + x.len);
|
|
||||||
|
|
||||||
try a.appendNTimes(0xbb, 5);
|
|
||||||
try testing.expectEqual(a.len, 11 + x.len + 5);
|
|
||||||
try testing.expectEqual(a.pop(), 0xbb);
|
|
||||||
|
|
||||||
a.appendNTimesAssumeCapacity(0xcc, 5);
|
|
||||||
try testing.expectEqual(a.len, 11 + x.len + 5 - 1 + 5);
|
|
||||||
try testing.expectEqual(a.pop(), 0xcc);
|
|
||||||
|
|
||||||
try testing.expectEqual(a.len, 29);
|
|
||||||
try a.replaceRange(1, 20, &x);
|
|
||||||
try testing.expectEqual(a.len, 29 + x.len - 20);
|
|
||||||
|
|
||||||
try a.insertSlice(0, &x);
|
|
||||||
try testing.expectEqual(a.len, 29 + x.len - 20 + x.len);
|
|
||||||
|
|
||||||
try a.replaceRange(1, 5, &x);
|
|
||||||
try testing.expectEqual(a.len, 29 + x.len - 20 + x.len + x.len - 5);
|
|
||||||
|
|
||||||
try a.append(10);
|
|
||||||
try testing.expectEqual(a.pop(), 10);
|
|
||||||
|
|
||||||
try a.append(20);
|
|
||||||
const removed = a.orderedRemove(5);
|
|
||||||
try testing.expectEqual(removed, 1);
|
|
||||||
try testing.expectEqual(a.len, 34);
|
|
||||||
|
|
||||||
a.set(0, 0xdd);
|
|
||||||
a.set(a.len - 1, 0xee);
|
|
||||||
const swapped = a.swapRemove(0);
|
|
||||||
try testing.expectEqual(swapped, 0xdd);
|
|
||||||
try testing.expectEqual(a.get(0), 0xee);
|
|
||||||
|
|
||||||
const added_slice = try a.addManyAsSlice(3);
|
|
||||||
try testing.expectEqual(added_slice.len, 3);
|
|
||||||
try testing.expectEqual(a.len, 36);
|
|
||||||
|
|
||||||
while (a.pop()) |_| {}
|
|
||||||
const w = a.writer();
|
|
||||||
const s = "hello, this is a test string";
|
|
||||||
try w.writeAll(s);
|
|
||||||
try testing.expectEqualStrings(s, a.constSlice());
|
|
||||||
}
|
|
||||||
|
|
||||||
test "BoundedArrayAligned" {
|
|
||||||
var a = try BoundedArrayAligned(u8, .@"16", 4).init(0);
|
|
||||||
try a.append(0);
|
|
||||||
try a.append(0);
|
|
||||||
try a.append(255);
|
|
||||||
try a.append(255);
|
|
||||||
|
|
||||||
const b = @as(*const [2]u16, @ptrCast(a.constSlice().ptr));
|
|
||||||
try testing.expectEqual(@as(u16, 0), b[0]);
|
|
||||||
try testing.expectEqual(@as(u16, 65535), b[1]);
|
|
||||||
}
|
|
||||||
@ -1710,7 +1710,7 @@ pub const Mutable = struct {
|
|||||||
|
|
||||||
if (xy_trailing != 0 and r.limbs[r.len - 1] != 0) {
|
if (xy_trailing != 0 and r.limbs[r.len - 1] != 0) {
|
||||||
// Manually shift here since we know its limb aligned.
|
// Manually shift here since we know its limb aligned.
|
||||||
mem.copyBackwards(Limb, r.limbs[xy_trailing..], r.limbs[0..r.len]);
|
@memmove(r.limbs[xy_trailing..][0..r.len], r.limbs[0..r.len]);
|
||||||
@memset(r.limbs[0..xy_trailing], 0);
|
@memset(r.limbs[0..xy_trailing], 0);
|
||||||
r.len += xy_trailing;
|
r.len += xy_trailing;
|
||||||
}
|
}
|
||||||
@ -3836,8 +3836,7 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) usize {
|
|||||||
std.debug.assert(@intFromPtr(r.ptr) >= @intFromPtr(a.ptr));
|
std.debug.assert(@intFromPtr(r.ptr) >= @intFromPtr(a.ptr));
|
||||||
|
|
||||||
if (shift == 0) {
|
if (shift == 0) {
|
||||||
if (a.ptr != r.ptr)
|
if (a.ptr != r.ptr) @memmove(r[0..a.len], a);
|
||||||
std.mem.copyBackwards(Limb, r[0..a.len], a);
|
|
||||||
return a.len;
|
return a.len;
|
||||||
}
|
}
|
||||||
if (shift >= limb_bits) {
|
if (shift >= limb_bits) {
|
||||||
@ -3891,8 +3890,7 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) usize {
|
|||||||
if (shift == 0) {
|
if (shift == 0) {
|
||||||
std.debug.assert(r.len >= a.len);
|
std.debug.assert(r.len >= a.len);
|
||||||
|
|
||||||
if (a.ptr != r.ptr)
|
if (a.ptr != r.ptr) @memmove(r[0..a.len], a);
|
||||||
std.mem.copyForwards(Limb, r[0..a.len], a);
|
|
||||||
return a.len;
|
return a.len;
|
||||||
}
|
}
|
||||||
if (shift >= limb_bits) {
|
if (shift >= limb_bits) {
|
||||||
|
|||||||
@ -1332,7 +1332,7 @@ pub fn GetFinalPathNameByHandle(
|
|||||||
// dropping the \Device\Mup\ and making sure the path begins with \\
|
// dropping the \Device\Mup\ and making sure the path begins with \\
|
||||||
if (mem.eql(u16, device_name_u16, std.unicode.utf8ToUtf16LeStringLiteral("Mup"))) {
|
if (mem.eql(u16, device_name_u16, std.unicode.utf8ToUtf16LeStringLiteral("Mup"))) {
|
||||||
out_buffer[0] = '\\';
|
out_buffer[0] = '\\';
|
||||||
mem.copyForwards(u16, out_buffer[1..][0..file_name_u16.len], file_name_u16);
|
@memmove(out_buffer[1..][0..file_name_u16.len], file_name_u16);
|
||||||
return out_buffer[0 .. 1 + file_name_u16.len];
|
return out_buffer[0 .. 1 + file_name_u16.len];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1400,7 +1400,7 @@ pub fn GetFinalPathNameByHandle(
|
|||||||
if (out_buffer.len < drive_letter.len + file_name_u16.len) return error.NameTooLong;
|
if (out_buffer.len < drive_letter.len + file_name_u16.len) return error.NameTooLong;
|
||||||
|
|
||||||
@memcpy(out_buffer[0..drive_letter.len], drive_letter);
|
@memcpy(out_buffer[0..drive_letter.len], drive_letter);
|
||||||
mem.copyForwards(u16, out_buffer[drive_letter.len..][0..file_name_u16.len], file_name_u16);
|
@memmove(out_buffer[drive_letter.len..][0..file_name_u16.len], file_name_u16);
|
||||||
const total_len = drive_letter.len + file_name_u16.len;
|
const total_len = drive_letter.len + file_name_u16.len;
|
||||||
|
|
||||||
// Validate that DOS does not contain any spurious nul bytes.
|
// Validate that DOS does not contain any spurious nul bytes.
|
||||||
@ -1449,12 +1449,7 @@ pub fn GetFinalPathNameByHandle(
|
|||||||
// to copy backwards. We also need to do this before copying the volume path because
|
// to copy backwards. We also need to do this before copying the volume path because
|
||||||
// it could overwrite the file_name_u16 memory.
|
// it could overwrite the file_name_u16 memory.
|
||||||
const file_name_dest = out_buffer[volume_path.len..][0..file_name_u16.len];
|
const file_name_dest = out_buffer[volume_path.len..][0..file_name_u16.len];
|
||||||
const file_name_byte_offset = @intFromPtr(file_name_u16.ptr) - @intFromPtr(out_buffer.ptr);
|
@memmove(file_name_dest, file_name_u16);
|
||||||
const file_name_index = file_name_byte_offset / @sizeOf(u16);
|
|
||||||
if (volume_path.len > file_name_index)
|
|
||||||
mem.copyBackwards(u16, file_name_dest, file_name_u16)
|
|
||||||
else
|
|
||||||
mem.copyForwards(u16, file_name_dest, file_name_u16);
|
|
||||||
@memcpy(out_buffer[0..volume_path.len], volume_path);
|
@memcpy(out_buffer[0..volume_path.len], volume_path);
|
||||||
const total_len = volume_path.len + file_name_u16.len;
|
const total_len = volume_path.len + file_name_u16.len;
|
||||||
|
|
||||||
|
|||||||
@ -9,8 +9,6 @@ pub const AutoArrayHashMapUnmanaged = array_hash_map.AutoArrayHashMapUnmanaged;
|
|||||||
pub const AutoHashMap = hash_map.AutoHashMap;
|
pub const AutoHashMap = hash_map.AutoHashMap;
|
||||||
pub const AutoHashMapUnmanaged = hash_map.AutoHashMapUnmanaged;
|
pub const AutoHashMapUnmanaged = hash_map.AutoHashMapUnmanaged;
|
||||||
pub const BitStack = @import("BitStack.zig");
|
pub const BitStack = @import("BitStack.zig");
|
||||||
pub const BoundedArray = @import("bounded_array.zig").BoundedArray;
|
|
||||||
pub const BoundedArrayAligned = @import("bounded_array.zig").BoundedArrayAligned;
|
|
||||||
pub const Build = @import("Build.zig");
|
pub const Build = @import("Build.zig");
|
||||||
pub const BufMap = @import("buf_map.zig").BufMap;
|
pub const BufMap = @import("buf_map.zig").BufMap;
|
||||||
pub const BufSet = @import("buf_set.zig").BufSet;
|
pub const BufSet = @import("buf_set.zig").BufSet;
|
||||||
@ -31,7 +29,6 @@ pub const PriorityQueue = @import("priority_queue.zig").PriorityQueue;
|
|||||||
pub const PriorityDequeue = @import("priority_dequeue.zig").PriorityDequeue;
|
pub const PriorityDequeue = @import("priority_dequeue.zig").PriorityDequeue;
|
||||||
pub const Progress = @import("Progress.zig");
|
pub const Progress = @import("Progress.zig");
|
||||||
pub const Random = @import("Random.zig");
|
pub const Random = @import("Random.zig");
|
||||||
pub const RingBuffer = @import("RingBuffer.zig");
|
|
||||||
pub const SegmentedList = @import("segmented_list.zig").SegmentedList;
|
pub const SegmentedList = @import("segmented_list.zig").SegmentedList;
|
||||||
pub const SemanticVersion = @import("SemanticVersion.zig");
|
pub const SemanticVersion = @import("SemanticVersion.zig");
|
||||||
pub const SinglyLinkedList = @import("SinglyLinkedList.zig");
|
pub const SinglyLinkedList = @import("SinglyLinkedList.zig");
|
||||||
|
|||||||
@ -2103,6 +2103,8 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
|
|||||||
.local_zir_cache = local_zir_cache,
|
.local_zir_cache = local_zir_cache,
|
||||||
.error_limit = error_limit,
|
.error_limit = error_limit,
|
||||||
.llvm_object = null,
|
.llvm_object = null,
|
||||||
|
.analysis_roots_buffer = undefined,
|
||||||
|
.analysis_roots_len = 0,
|
||||||
};
|
};
|
||||||
try zcu.init(options.thread_pool.getIdCount());
|
try zcu.init(options.thread_pool.getIdCount());
|
||||||
break :blk zcu;
|
break :blk zcu;
|
||||||
@ -2933,22 +2935,26 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
|||||||
try comp.appendFileSystemInput(embed_file.path);
|
try comp.appendFileSystemInput(embed_file.path);
|
||||||
}
|
}
|
||||||
|
|
||||||
zcu.analysis_roots.clear();
|
zcu.analysis_roots_len = 0;
|
||||||
|
|
||||||
zcu.analysis_roots.appendAssumeCapacity(zcu.std_mod);
|
zcu.analysis_roots_buffer[zcu.analysis_roots_len] = zcu.std_mod;
|
||||||
|
zcu.analysis_roots_len += 1;
|
||||||
|
|
||||||
// Normally we rely on importing std to in turn import the root source file in the start code.
|
// Normally we rely on importing std to in turn import the root source file in the start code.
|
||||||
// However, the main module is distinct from the root module in tests, so that won't happen there.
|
// However, the main module is distinct from the root module in tests, so that won't happen there.
|
||||||
if (comp.config.is_test and zcu.main_mod != zcu.std_mod) {
|
if (comp.config.is_test and zcu.main_mod != zcu.std_mod) {
|
||||||
zcu.analysis_roots.appendAssumeCapacity(zcu.main_mod);
|
zcu.analysis_roots_buffer[zcu.analysis_roots_len] = zcu.main_mod;
|
||||||
|
zcu.analysis_roots_len += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (zcu.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| {
|
if (zcu.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| {
|
||||||
zcu.analysis_roots.appendAssumeCapacity(compiler_rt_mod);
|
zcu.analysis_roots_buffer[zcu.analysis_roots_len] = compiler_rt_mod;
|
||||||
|
zcu.analysis_roots_len += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (zcu.root_mod.deps.get("ubsan_rt")) |ubsan_rt_mod| {
|
if (zcu.root_mod.deps.get("ubsan_rt")) |ubsan_rt_mod| {
|
||||||
zcu.analysis_roots.appendAssumeCapacity(ubsan_rt_mod);
|
zcu.analysis_roots_buffer[zcu.analysis_roots_len] = ubsan_rt_mod;
|
||||||
|
zcu.analysis_roots_len += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4745,7 +4751,7 @@ fn performAllTheWork(
|
|||||||
try zcu.flushRetryableFailures();
|
try zcu.flushRetryableFailures();
|
||||||
|
|
||||||
// It's analysis time! Queue up our initial analysis.
|
// It's analysis time! Queue up our initial analysis.
|
||||||
for (zcu.analysis_roots.slice()) |mod| {
|
for (zcu.analysisRoots()) |mod| {
|
||||||
try comp.queueJob(.{ .analyze_mod = mod });
|
try comp.queueJob(.{ .analyze_mod = mod });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -2631,7 +2631,7 @@ fn reparentOwnedErrorMsg(
|
|||||||
|
|
||||||
const orig_notes = msg.notes.len;
|
const orig_notes = msg.notes.len;
|
||||||
msg.notes = try sema.gpa.realloc(msg.notes, orig_notes + 1);
|
msg.notes = try sema.gpa.realloc(msg.notes, orig_notes + 1);
|
||||||
std.mem.copyBackwards(Zcu.ErrorMsg, msg.notes[1..], msg.notes[0..orig_notes]);
|
@memmove(msg.notes[1..][0..orig_notes], msg.notes[0..orig_notes]);
|
||||||
msg.notes[0] = .{
|
msg.notes[0] = .{
|
||||||
.src_loc = msg.src_loc,
|
.src_loc = msg.src_loc,
|
||||||
.msg = msg.msg,
|
.msg = msg.msg,
|
||||||
@ -14464,8 +14464,8 @@ fn analyzeTupleMul(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (0..factor) |i| {
|
for (0..factor) |i| {
|
||||||
mem.copyForwards(InternPool.Index, types[tuple_len * i ..], types[0..tuple_len]);
|
@memmove(types[tuple_len * i ..][0..tuple_len], types[0..tuple_len]);
|
||||||
mem.copyForwards(InternPool.Index, values[tuple_len * i ..], values[0..tuple_len]);
|
@memmove(values[tuple_len * i ..][0..tuple_len], values[0..tuple_len]);
|
||||||
}
|
}
|
||||||
break :rs runtime_src;
|
break :rs runtime_src;
|
||||||
};
|
};
|
||||||
|
|||||||
11
src/Zcu.zig
11
src/Zcu.zig
@ -268,7 +268,8 @@ nav_val_analysis_queued: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, voi
|
|||||||
|
|
||||||
/// These are the modules which we initially queue for analysis in `Compilation.update`.
|
/// These are the modules which we initially queue for analysis in `Compilation.update`.
|
||||||
/// `resolveReferences` will use these as the root of its reachability traversal.
|
/// `resolveReferences` will use these as the root of its reachability traversal.
|
||||||
analysis_roots: std.BoundedArray(*Package.Module, 4) = .{},
|
analysis_roots_buffer: [4]*Package.Module,
|
||||||
|
analysis_roots_len: usize = 0,
|
||||||
/// This is the cached result of `Zcu.resolveReferences`. It is computed on-demand, and
|
/// This is the cached result of `Zcu.resolveReferences`. It is computed on-demand, and
|
||||||
/// reset to `null` when any semantic analysis occurs (since this invalidates the data).
|
/// reset to `null` when any semantic analysis occurs (since this invalidates the data).
|
||||||
/// Allocated into `gpa`.
|
/// Allocated into `gpa`.
|
||||||
@ -4013,8 +4014,8 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
|||||||
// This is not a sufficient size, but a lower bound.
|
// This is not a sufficient size, but a lower bound.
|
||||||
try result.ensureTotalCapacity(gpa, @intCast(zcu.reference_table.count()));
|
try result.ensureTotalCapacity(gpa, @intCast(zcu.reference_table.count()));
|
||||||
|
|
||||||
try type_queue.ensureTotalCapacity(gpa, zcu.analysis_roots.len);
|
try type_queue.ensureTotalCapacity(gpa, zcu.analysis_roots_len);
|
||||||
for (zcu.analysis_roots.slice()) |mod| {
|
for (zcu.analysisRoots()) |mod| {
|
||||||
const file = zcu.module_roots.get(mod).?.unwrap() orelse continue;
|
const file = zcu.module_roots.get(mod).?.unwrap() orelse continue;
|
||||||
const root_ty = zcu.fileRootType(file);
|
const root_ty = zcu.fileRootType(file);
|
||||||
if (root_ty == .none) continue;
|
if (root_ty == .none) continue;
|
||||||
@ -4202,6 +4203,10 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn analysisRoots(zcu: *Zcu) []*Package.Module {
|
||||||
|
return zcu.analysis_roots_buffer[0..zcu.analysis_roots_len];
|
||||||
|
}
|
||||||
|
|
||||||
pub fn fileByIndex(zcu: *const Zcu, file_index: File.Index) *File {
|
pub fn fileByIndex(zcu: *const Zcu, file_index: File.Index) *File {
|
||||||
return zcu.intern_pool.filePtr(file_index);
|
return zcu.intern_pool.filePtr(file_index);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2116,8 +2116,9 @@ pub fn computeAliveFiles(pt: Zcu.PerThread) Allocator.Error!bool {
|
|||||||
// multi-threaded environment (where things like file indices could differ between compiler runs).
|
// multi-threaded environment (where things like file indices could differ between compiler runs).
|
||||||
|
|
||||||
// The roots of our file liveness analysis will be the analysis roots.
|
// The roots of our file liveness analysis will be the analysis roots.
|
||||||
try zcu.alive_files.ensureTotalCapacity(gpa, zcu.analysis_roots.len);
|
const analysis_roots = zcu.analysisRoots();
|
||||||
for (zcu.analysis_roots.slice()) |mod| {
|
try zcu.alive_files.ensureTotalCapacity(gpa, analysis_roots.len);
|
||||||
|
for (analysis_roots) |mod| {
|
||||||
const file_index = zcu.module_roots.get(mod).?.unwrap() orelse continue;
|
const file_index = zcu.module_roots.get(mod).?.unwrap() orelse continue;
|
||||||
const file = zcu.fileByIndex(file_index);
|
const file = zcu.fileByIndex(file_index);
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user