mirror of
https://github.com/ziglang/zig.git
synced 2026-01-21 06:45:24 +00:00
Merge pull request #24968 from ifreund/deque
std: add a Deque data structure
This commit is contained in:
commit
bfda12efcf
433
lib/std/deque.zig
Normal file
433
lib/std/deque.zig
Normal file
@ -0,0 +1,433 @@
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
/// A contiguous, growable, double-ended queue.
|
||||
///
|
||||
/// Pushing/popping items from either end of the queue is O(1).
|
||||
pub fn Deque(comptime T: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
/// A ring buffer.
|
||||
buffer: []T,
|
||||
/// The index in buffer where the first item in the logical deque is stored.
|
||||
head: usize,
|
||||
/// The number of items stored in the logical deque.
|
||||
len: usize,
|
||||
|
||||
/// A Deque containing no elements.
|
||||
pub const empty: Self = .{
|
||||
.buffer = &.{},
|
||||
.head = 0,
|
||||
.len = 0,
|
||||
};
|
||||
|
||||
/// Initialize with capacity to hold `capacity` elements.
|
||||
/// The resulting capacity will equal `capacity` exactly.
|
||||
/// Deinitialize with `deinit`.
|
||||
pub fn initCapacity(gpa: Allocator, capacity: usize) Allocator.Error!Self {
|
||||
var deque: Self = .empty;
|
||||
try deque.ensureTotalCapacityPrecise(gpa, capacity);
|
||||
return deque;
|
||||
}
|
||||
|
||||
/// Initialize with externally-managed memory. The buffer determines the
|
||||
/// capacity and the deque is initially empty.
|
||||
///
|
||||
/// When initialized this way, all functions that accept an Allocator
|
||||
/// argument cause illegal behavior.
|
||||
pub fn initBuffer(buffer: []T) Self {
|
||||
return .{
|
||||
.buffer = buffer,
|
||||
.head = 0,
|
||||
.len = 0,
|
||||
};
|
||||
}
|
||||
|
||||
/// Release all allocated memory.
|
||||
pub fn deinit(deque: *Self, gpa: Allocator) void {
|
||||
gpa.free(deque.buffer);
|
||||
deque.* = undefined;
|
||||
}
|
||||
|
||||
/// Modify the deque so that it can hold at least `new_capacity` items.
|
||||
/// Implements super-linear growth to achieve amortized O(1) push/pop operations.
|
||||
/// Invalidates element pointers if additional memory is needed.
|
||||
pub fn ensureTotalCapacity(deque: *Self, gpa: Allocator, new_capacity: usize) Allocator.Error!void {
|
||||
if (deque.buffer.len >= new_capacity) return;
|
||||
return deque.ensureTotalCapacityPrecise(gpa, growCapacity(deque.buffer.len, new_capacity));
|
||||
}
|
||||
|
||||
/// If the current capacity is less than `new_capacity`, this function will
|
||||
/// modify the deque so that it can hold exactly `new_capacity` items.
|
||||
/// Invalidates element pointers if additional memory is needed.
|
||||
pub fn ensureTotalCapacityPrecise(deque: *Self, gpa: Allocator, new_capacity: usize) Allocator.Error!void {
|
||||
if (deque.buffer.len >= new_capacity) return;
|
||||
const old_buffer = deque.buffer;
|
||||
if (gpa.remap(old_buffer, new_capacity)) |new_buffer| {
|
||||
// If the items wrap around the end of the buffer we need to do
|
||||
// a memcpy to prevent a gap after resizing the buffer.
|
||||
if (deque.head > old_buffer.len - deque.len) {
|
||||
// The gap splits the items in the deque into head and tail parts.
|
||||
// Choose the shorter part to copy.
|
||||
const head = new_buffer[deque.head..old_buffer.len];
|
||||
const tail = new_buffer[0 .. deque.len - head.len];
|
||||
if (head.len > tail.len and new_buffer.len - old_buffer.len > tail.len) {
|
||||
@memcpy(new_buffer[old_buffer.len..][0..tail.len], tail);
|
||||
} else {
|
||||
// In this case overlap is possible if e.g. the capacity increase is 1
|
||||
// and head.len is greater than 1.
|
||||
deque.head = new_buffer.len - head.len;
|
||||
@memmove(new_buffer[deque.head..][0..head.len], head);
|
||||
}
|
||||
}
|
||||
deque.buffer = new_buffer;
|
||||
} else {
|
||||
const new_buffer = try gpa.alloc(T, new_capacity);
|
||||
if (deque.head < old_buffer.len - deque.len) {
|
||||
@memcpy(new_buffer[0..deque.len], old_buffer[deque.head..][0..deque.len]);
|
||||
} else {
|
||||
const head = old_buffer[deque.head..];
|
||||
const tail = old_buffer[0 .. deque.len - head.len];
|
||||
@memcpy(new_buffer[0..head.len], head);
|
||||
@memcpy(new_buffer[head.len..][0..tail.len], tail);
|
||||
}
|
||||
deque.head = 0;
|
||||
deque.buffer = new_buffer;
|
||||
gpa.free(old_buffer);
|
||||
}
|
||||
}
|
||||
|
||||
/// Modify the deque so that it can hold at least `additional_count` **more** items.
|
||||
/// Invalidates element pointers if additional memory is needed.
|
||||
pub fn ensureUnusedCapacity(
|
||||
deque: *Self,
|
||||
gpa: Allocator,
|
||||
additional_count: usize,
|
||||
) Allocator.Error!void {
|
||||
return deque.ensureTotalCapacity(gpa, try addOrOom(deque.len, additional_count));
|
||||
}
|
||||
|
||||
/// Add one item to the front of the deque.
|
||||
///
|
||||
/// Invalidates element pointers if additional memory is needed.
|
||||
pub fn pushFront(deque: *Self, gpa: Allocator, item: T) error{OutOfMemory}!void {
|
||||
try deque.ensureUnusedCapacity(gpa, 1);
|
||||
deque.pushFrontAssumeCapacity(item);
|
||||
}
|
||||
|
||||
/// Add one item to the front of the deque.
|
||||
///
|
||||
/// Never invalidates element pointers.
|
||||
///
|
||||
/// If the deque lacks unused capacity for the additional item, returns
|
||||
/// `error.OutOfMemory`.
|
||||
pub fn pushFrontBounded(deque: *Self, item: T) error{OutOfMemory}!void {
|
||||
if (deque.buffer.len - deque.len == 0) return error.OutOfMemory;
|
||||
return deque.pushFrontAssumeCapacity(item);
|
||||
}
|
||||
|
||||
/// Add one item to the front of the deque.
|
||||
///
|
||||
/// Never invalidates element pointers.
|
||||
///
|
||||
/// Asserts that the deque can hold one additional item.
|
||||
pub fn pushFrontAssumeCapacity(deque: *Self, item: T) void {
|
||||
assert(deque.len < deque.buffer.len);
|
||||
if (deque.head == 0) {
|
||||
deque.head = deque.buffer.len;
|
||||
}
|
||||
deque.head -= 1;
|
||||
deque.buffer[deque.head] = item;
|
||||
deque.len += 1;
|
||||
}
|
||||
|
||||
/// Add one item to the back of the deque.
|
||||
///
|
||||
/// Invalidates element pointers if additional memory is needed.
|
||||
pub fn pushBack(deque: *Self, gpa: Allocator, item: T) error{OutOfMemory}!void {
|
||||
try deque.ensureUnusedCapacity(gpa, 1);
|
||||
deque.pushBackAssumeCapacity(item);
|
||||
}
|
||||
|
||||
/// Add one item to the back of the deque.
|
||||
///
|
||||
/// Never invalidates element pointers.
|
||||
///
|
||||
/// If the deque lacks unused capacity for the additional item, returns
|
||||
/// `error.OutOfMemory`.
|
||||
pub fn pushBackBounded(deque: *Self, item: T) error{OutOfMemory}!void {
|
||||
if (deque.buffer.len - deque.len == 0) return error.OutOfMemory;
|
||||
deque.pushBackAssumeCapacity(item);
|
||||
}
|
||||
|
||||
/// Add one item to the back of the deque.
|
||||
///
|
||||
/// Never invalidates element pointers.
|
||||
///
|
||||
/// Asserts that the deque can hold one additional item.
|
||||
pub fn pushBackAssumeCapacity(deque: *Self, item: T) void {
|
||||
assert(deque.len < deque.buffer.len);
|
||||
const buffer_index = deque.bufferIndex(deque.len);
|
||||
deque.buffer[buffer_index] = item;
|
||||
deque.len += 1;
|
||||
}
|
||||
|
||||
/// Return the first item in the deque or null if empty.
|
||||
pub fn front(deque: *const Self) ?T {
|
||||
if (deque.len == 0) return null;
|
||||
return deque.buffer[deque.head];
|
||||
}
|
||||
|
||||
/// Return the last item in the deque or null if empty.
|
||||
pub fn back(deque: *const Self) ?T {
|
||||
if (deque.len == 0) return null;
|
||||
return deque.buffer[deque.bufferIndex(deque.len - 1)];
|
||||
}
|
||||
|
||||
/// Return the item at the given index in the deque.
|
||||
///
|
||||
/// The first item in the queue is at index 0.
|
||||
///
|
||||
/// Asserts that the index is in-bounds.
|
||||
pub fn at(deque: *const Self, index: usize) T {
|
||||
assert(index < deque.len);
|
||||
return deque.buffer[deque.bufferIndex(index)];
|
||||
}
|
||||
|
||||
/// Remove and return the first item in the deque or null if empty.
|
||||
pub fn popFront(deque: *Self) ?T {
|
||||
if (deque.len == 0) return null;
|
||||
const pop_index = deque.head;
|
||||
deque.head = deque.bufferIndex(1);
|
||||
deque.len -= 1;
|
||||
return deque.buffer[pop_index];
|
||||
}
|
||||
|
||||
/// Remove and return the last item in the deque or null if empty.
|
||||
pub fn popBack(deque: *Self) ?T {
|
||||
if (deque.len == 0) return null;
|
||||
deque.len -= 1;
|
||||
return deque.buffer[deque.bufferIndex(deque.len)];
|
||||
}
|
||||
|
||||
pub const Iterator = struct {
|
||||
deque: *const Self,
|
||||
index: usize,
|
||||
|
||||
pub fn next(it: *Iterator) ?T {
|
||||
if (it.index < it.deque.len) {
|
||||
defer it.index += 1;
|
||||
return it.deque.at(it.index);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/// Iterates over all items in the deque in order from front to back.
|
||||
pub fn iterator(deque: *const Self) Iterator {
|
||||
return .{ .deque = deque, .index = 0 };
|
||||
}
|
||||
|
||||
/// Returns the index in `buffer` where the element at the given
|
||||
/// index in the logical deque is stored.
|
||||
fn bufferIndex(deque: *const Self, index: usize) usize {
|
||||
// This function is written in this way to avoid overflow and
|
||||
// expensive division.
|
||||
const head_len = deque.buffer.len - deque.head;
|
||||
if (index < head_len) {
|
||||
return deque.head + index;
|
||||
} else {
|
||||
return index - head_len;
|
||||
}
|
||||
}
|
||||
|
||||
const init_capacity: comptime_int = @max(1, std.atomic.cache_line / @sizeOf(T));
|
||||
|
||||
/// Called when memory growth is necessary. Returns a capacity larger than
|
||||
/// minimum that grows super-linearly.
|
||||
fn growCapacity(current: usize, minimum: usize) usize {
|
||||
var new = current;
|
||||
while (true) {
|
||||
new +|= new / 2 + init_capacity;
|
||||
if (new >= minimum) return new;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Integer addition returning `error.OutOfMemory` on overflow.
|
||||
fn addOrOom(a: usize, b: usize) error{OutOfMemory}!usize {
|
||||
const result, const overflow = @addWithOverflow(a, b);
|
||||
if (overflow != 0) return error.OutOfMemory;
|
||||
return result;
|
||||
}
|
||||
|
||||
test "basic" {
|
||||
const testing = std.testing;
|
||||
const gpa = testing.allocator;
|
||||
|
||||
var q: Deque(u32) = .empty;
|
||||
defer q.deinit(gpa);
|
||||
|
||||
try testing.expectEqual(null, q.popFront());
|
||||
try testing.expectEqual(null, q.popBack());
|
||||
|
||||
try q.pushBack(gpa, 1);
|
||||
try q.pushBack(gpa, 2);
|
||||
try q.pushBack(gpa, 3);
|
||||
try q.pushFront(gpa, 0);
|
||||
|
||||
try testing.expectEqual(0, q.popFront());
|
||||
try testing.expectEqual(1, q.popFront());
|
||||
try testing.expectEqual(3, q.popBack());
|
||||
try testing.expectEqual(2, q.popFront());
|
||||
try testing.expectEqual(null, q.popFront());
|
||||
try testing.expectEqual(null, q.popBack());
|
||||
}
|
||||
|
||||
test "buffer" {
|
||||
const testing = std.testing;
|
||||
|
||||
var buffer: [4]u32 = undefined;
|
||||
var q: Deque(u32) = .initBuffer(&buffer);
|
||||
|
||||
try testing.expectEqual(null, q.popFront());
|
||||
try testing.expectEqual(null, q.popBack());
|
||||
|
||||
try q.pushBackBounded(1);
|
||||
try q.pushBackBounded(2);
|
||||
try q.pushBackBounded(3);
|
||||
try q.pushFrontBounded(0);
|
||||
try testing.expectError(error.OutOfMemory, q.pushBackBounded(4));
|
||||
|
||||
try testing.expectEqual(0, q.popFront());
|
||||
try testing.expectEqual(1, q.popFront());
|
||||
try testing.expectEqual(3, q.popBack());
|
||||
try testing.expectEqual(2, q.popFront());
|
||||
try testing.expectEqual(null, q.popFront());
|
||||
try testing.expectEqual(null, q.popBack());
|
||||
}
|
||||
|
||||
test "slow growth" {
|
||||
const testing = std.testing;
|
||||
const gpa = testing.allocator;
|
||||
|
||||
var q: Deque(i32) = .empty;
|
||||
defer q.deinit(gpa);
|
||||
|
||||
try q.ensureTotalCapacityPrecise(gpa, 1);
|
||||
q.pushBackAssumeCapacity(1);
|
||||
try q.ensureTotalCapacityPrecise(gpa, 2);
|
||||
q.pushFrontAssumeCapacity(0);
|
||||
try q.ensureTotalCapacityPrecise(gpa, 3);
|
||||
q.pushBackAssumeCapacity(2);
|
||||
try q.ensureTotalCapacityPrecise(gpa, 5);
|
||||
q.pushBackAssumeCapacity(3);
|
||||
q.pushFrontAssumeCapacity(-1);
|
||||
try q.ensureTotalCapacityPrecise(gpa, 6);
|
||||
q.pushFrontAssumeCapacity(-2);
|
||||
|
||||
try testing.expectEqual(-2, q.popFront());
|
||||
try testing.expectEqual(-1, q.popFront());
|
||||
try testing.expectEqual(3, q.popBack());
|
||||
try testing.expectEqual(0, q.popFront());
|
||||
try testing.expectEqual(2, q.popBack());
|
||||
try testing.expectEqual(1, q.popBack());
|
||||
try testing.expectEqual(null, q.popFront());
|
||||
try testing.expectEqual(null, q.popBack());
|
||||
}
|
||||
|
||||
test "fuzz against ArrayList oracle" {
|
||||
try std.testing.fuzz({}, fuzzAgainstArrayList, .{});
|
||||
}
|
||||
|
||||
test "dumb fuzz against ArrayList oracle" {
|
||||
const testing = std.testing;
|
||||
const gpa = testing.allocator;
|
||||
|
||||
const input = try gpa.alloc(u8, 1024);
|
||||
defer gpa.free(input);
|
||||
|
||||
var prng = std.Random.DefaultPrng.init(testing.random_seed);
|
||||
prng.random().bytes(input);
|
||||
|
||||
try fuzzAgainstArrayList({}, input);
|
||||
}
|
||||
|
||||
fn fuzzAgainstArrayList(_: void, input: []const u8) anyerror!void {
|
||||
const testing = std.testing;
|
||||
const gpa = testing.allocator;
|
||||
|
||||
var q: Deque(u32) = .empty;
|
||||
defer q.deinit(gpa);
|
||||
var l: std.ArrayList(u32) = .empty;
|
||||
defer l.deinit(gpa);
|
||||
|
||||
if (input.len < 2) return;
|
||||
|
||||
var prng = std.Random.DefaultPrng.init(input[0]);
|
||||
const random = prng.random();
|
||||
|
||||
const Action = enum {
|
||||
push_back,
|
||||
push_front,
|
||||
pop_back,
|
||||
pop_front,
|
||||
grow,
|
||||
/// Sentinel to avoid hardcoding the cast below
|
||||
max,
|
||||
};
|
||||
for (input[1..]) |byte| {
|
||||
switch (@as(Action, @enumFromInt(byte % (@intFromEnum(Action.max))))) {
|
||||
.push_back => {
|
||||
const item = random.int(u8);
|
||||
try testing.expectEqual(
|
||||
l.appendBounded(item),
|
||||
q.pushBackBounded(item),
|
||||
);
|
||||
},
|
||||
.push_front => {
|
||||
const item = random.int(u8);
|
||||
try testing.expectEqual(
|
||||
l.insertBounded(0, item),
|
||||
q.pushFrontBounded(item),
|
||||
);
|
||||
},
|
||||
.pop_back => {
|
||||
try testing.expectEqual(l.pop(), q.popBack());
|
||||
},
|
||||
.pop_front => {
|
||||
try testing.expectEqual(
|
||||
if (l.items.len > 0) l.orderedRemove(0) else null,
|
||||
q.popFront(),
|
||||
);
|
||||
},
|
||||
// Growing by small, random, linear amounts seems to better test
|
||||
// ensureTotalCapacityPrecise(), which is the most complex part
|
||||
// of the Deque implementation.
|
||||
.grow => {
|
||||
const growth = random.int(u3);
|
||||
try l.ensureTotalCapacityPrecise(gpa, l.items.len + growth);
|
||||
try q.ensureTotalCapacityPrecise(gpa, q.len + growth);
|
||||
},
|
||||
.max => unreachable,
|
||||
}
|
||||
try testing.expectEqual(l.getLastOrNull(), q.back());
|
||||
try testing.expectEqual(
|
||||
if (l.items.len > 0) l.items[0] else null,
|
||||
q.front(),
|
||||
);
|
||||
try testing.expectEqual(l.items.len, q.len);
|
||||
try testing.expectEqual(l.capacity, q.buffer.len);
|
||||
{
|
||||
var it = q.iterator();
|
||||
for (l.items) |item| {
|
||||
try testing.expectEqual(item, it.next());
|
||||
}
|
||||
try testing.expectEqual(null, it.next());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -10,6 +10,7 @@ pub const BufMap = @import("buf_map.zig").BufMap;
|
||||
pub const BufSet = @import("buf_set.zig").BufSet;
|
||||
pub const StaticStringMap = static_string_map.StaticStringMap;
|
||||
pub const StaticStringMapWithEql = static_string_map.StaticStringMapWithEql;
|
||||
pub const Deque = @import("deque.zig").Deque;
|
||||
pub const DoublyLinkedList = @import("DoublyLinkedList.zig");
|
||||
pub const DynLib = @import("dynamic_library.zig").DynLib;
|
||||
pub const DynamicBitSet = bit_set.DynamicBitSet;
|
||||
|
||||
@ -45,8 +45,6 @@ const Builtin = @import("Builtin.zig");
|
||||
const LlvmObject = @import("codegen/llvm.zig").Object;
|
||||
const dev = @import("dev.zig");
|
||||
|
||||
const DeprecatedLinearFifo = @import("deprecated.zig").LinearFifo;
|
||||
|
||||
pub const Config = @import("Compilation/Config.zig");
|
||||
|
||||
/// General-purpose allocator. Used for both temporary and long-term storage.
|
||||
@ -124,20 +122,21 @@ work_queues: [
|
||||
}
|
||||
break :len len;
|
||||
}
|
||||
]DeprecatedLinearFifo(Job),
|
||||
]std.Deque(Job),
|
||||
|
||||
/// These jobs are to invoke the Clang compiler to create an object file, which
|
||||
/// gets linked with the Compilation.
|
||||
c_object_work_queue: DeprecatedLinearFifo(*CObject),
|
||||
c_object_work_queue: std.Deque(*CObject),
|
||||
|
||||
/// These jobs are to invoke the RC compiler to create a compiled resource file (.res), which
|
||||
/// gets linked with the Compilation.
|
||||
win32_resource_work_queue: if (dev.env.supports(.win32_resource)) DeprecatedLinearFifo(*Win32Resource) else struct {
|
||||
pub fn ensureUnusedCapacity(_: @This(), _: u0) error{}!void {}
|
||||
pub fn readItem(_: @This()) ?noreturn {
|
||||
win32_resource_work_queue: if (dev.env.supports(.win32_resource)) std.Deque(*Win32Resource) else struct {
|
||||
pub const empty: @This() = .{};
|
||||
pub fn ensureUnusedCapacity(_: @This(), _: Allocator, _: u0) error{}!void {}
|
||||
pub fn popFront(_: @This()) ?noreturn {
|
||||
return null;
|
||||
}
|
||||
pub fn deinit(_: @This()) void {}
|
||||
pub fn deinit(_: @This(), _: Allocator) void {}
|
||||
},
|
||||
|
||||
/// The ErrorMsg memory is owned by the `CObject`, using Compilation's general purpose allocator.
|
||||
@ -2236,9 +2235,9 @@ pub fn create(gpa: Allocator, arena: Allocator, diag: *CreateDiagnostic, options
|
||||
.root_mod = options.root_mod,
|
||||
.config = options.config,
|
||||
.dirs = options.dirs,
|
||||
.work_queues = @splat(.init(gpa)),
|
||||
.c_object_work_queue = .init(gpa),
|
||||
.win32_resource_work_queue = if (dev.env.supports(.win32_resource)) .init(gpa) else .{},
|
||||
.work_queues = @splat(.empty),
|
||||
.c_object_work_queue = .empty,
|
||||
.win32_resource_work_queue = .empty,
|
||||
.c_source_files = options.c_source_files,
|
||||
.rc_source_files = options.rc_source_files,
|
||||
.cache_parent = cache,
|
||||
@ -2702,9 +2701,9 @@ pub fn destroy(comp: *Compilation) void {
|
||||
if (comp.zcu) |zcu| zcu.deinit();
|
||||
comp.cache_use.deinit();
|
||||
|
||||
for (&comp.work_queues) |*work_queue| work_queue.deinit();
|
||||
comp.c_object_work_queue.deinit();
|
||||
comp.win32_resource_work_queue.deinit();
|
||||
for (&comp.work_queues) |*work_queue| work_queue.deinit(gpa);
|
||||
comp.c_object_work_queue.deinit(gpa);
|
||||
comp.win32_resource_work_queue.deinit(gpa);
|
||||
|
||||
for (comp.windows_libs.keys()) |windows_lib| gpa.free(windows_lib);
|
||||
comp.windows_libs.deinit(gpa);
|
||||
@ -3019,17 +3018,17 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
|
||||
|
||||
// For compiling C objects, we rely on the cache hash system to avoid duplicating work.
|
||||
// Add a Job for each C object.
|
||||
try comp.c_object_work_queue.ensureUnusedCapacity(comp.c_object_table.count());
|
||||
try comp.c_object_work_queue.ensureUnusedCapacity(gpa, comp.c_object_table.count());
|
||||
for (comp.c_object_table.keys()) |c_object| {
|
||||
comp.c_object_work_queue.writeItemAssumeCapacity(c_object);
|
||||
comp.c_object_work_queue.pushBackAssumeCapacity(c_object);
|
||||
try comp.appendFileSystemInput(try .fromUnresolved(arena, comp.dirs, &.{c_object.src.src_path}));
|
||||
}
|
||||
|
||||
// For compiling Win32 resources, we rely on the cache hash system to avoid duplicating work.
|
||||
// Add a Job for each Win32 resource file.
|
||||
try comp.win32_resource_work_queue.ensureUnusedCapacity(comp.win32_resource_table.count());
|
||||
try comp.win32_resource_work_queue.ensureUnusedCapacity(gpa, comp.win32_resource_table.count());
|
||||
for (comp.win32_resource_table.keys()) |win32_resource| {
|
||||
comp.win32_resource_work_queue.writeItemAssumeCapacity(win32_resource);
|
||||
comp.win32_resource_work_queue.pushBackAssumeCapacity(win32_resource);
|
||||
switch (win32_resource.src) {
|
||||
.rc => |f| {
|
||||
try comp.appendFileSystemInput(try .fromUnresolved(arena, comp.dirs, &.{f.src_path}));
|
||||
@ -4871,14 +4870,14 @@ fn performAllTheWork(
|
||||
}
|
||||
}
|
||||
|
||||
while (comp.c_object_work_queue.readItem()) |c_object| {
|
||||
while (comp.c_object_work_queue.popFront()) |c_object| {
|
||||
comp.link_task_queue.startPrelinkItem();
|
||||
comp.thread_pool.spawnWg(&comp.link_task_wait_group, workerUpdateCObject, .{
|
||||
comp, c_object, main_progress_node,
|
||||
});
|
||||
}
|
||||
|
||||
while (comp.win32_resource_work_queue.readItem()) |win32_resource| {
|
||||
while (comp.win32_resource_work_queue.popFront()) |win32_resource| {
|
||||
comp.link_task_queue.startPrelinkItem();
|
||||
comp.thread_pool.spawnWg(&comp.link_task_wait_group, workerUpdateWin32Resource, .{
|
||||
comp, win32_resource, main_progress_node,
|
||||
@ -4998,7 +4997,7 @@ fn performAllTheWork(
|
||||
}
|
||||
|
||||
work: while (true) {
|
||||
for (&comp.work_queues) |*work_queue| if (work_queue.readItem()) |job| {
|
||||
for (&comp.work_queues) |*work_queue| if (work_queue.popFront()) |job| {
|
||||
try processOneJob(@intFromEnum(Zcu.PerThread.Id.main), comp, job);
|
||||
continue :work;
|
||||
};
|
||||
@ -5027,7 +5026,7 @@ fn performAllTheWork(
|
||||
const JobError = Allocator.Error;
|
||||
|
||||
pub fn queueJob(comp: *Compilation, job: Job) !void {
|
||||
try comp.work_queues[Job.stage(job)].writeItem(job);
|
||||
try comp.work_queues[Job.stage(job)].pushBack(comp.gpa, job);
|
||||
}
|
||||
|
||||
pub fn queueJobs(comp: *Compilation, jobs: []const Job) !void {
|
||||
|
||||
@ -1,169 +0,0 @@
|
||||
//! Deprecated. Stop using this API
|
||||
|
||||
const std = @import("std");
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
const Allocator = mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
const testing = std.testing;
|
||||
|
||||
pub fn LinearFifo(comptime T: type) type {
|
||||
return struct {
|
||||
allocator: Allocator,
|
||||
buf: []T,
|
||||
head: usize,
|
||||
count: usize,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(allocator: Allocator) Self {
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
.buf = &.{},
|
||||
.head = 0,
|
||||
.count = 0,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
self.allocator.free(self.buf);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn realign(self: *Self) void {
|
||||
if (self.buf.len - self.head >= self.count) {
|
||||
mem.copyForwards(T, self.buf[0..self.count], self.buf[self.head..][0..self.count]);
|
||||
self.head = 0;
|
||||
} else {
|
||||
var tmp: [4096 / 2 / @sizeOf(T)]T = undefined;
|
||||
|
||||
while (self.head != 0) {
|
||||
const n = @min(self.head, tmp.len);
|
||||
const m = self.buf.len - n;
|
||||
@memcpy(tmp[0..n], self.buf[0..n]);
|
||||
mem.copyForwards(T, self.buf[0..m], self.buf[n..][0..m]);
|
||||
@memcpy(self.buf[m..][0..n], tmp[0..n]);
|
||||
self.head -= n;
|
||||
}
|
||||
}
|
||||
{ // set unused area to undefined
|
||||
const unused = mem.sliceAsBytes(self.buf[self.count..]);
|
||||
@memset(unused, undefined);
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensure that the buffer can fit at least `size` items
|
||||
pub fn ensureTotalCapacity(self: *Self, size: usize) !void {
|
||||
if (self.buf.len >= size) return;
|
||||
self.realign();
|
||||
const new_size = math.ceilPowerOfTwo(usize, size) catch return error.OutOfMemory;
|
||||
self.buf = try self.allocator.realloc(self.buf, new_size);
|
||||
}
|
||||
|
||||
/// Makes sure at least `size` items are unused
|
||||
pub fn ensureUnusedCapacity(self: *Self, size: usize) error{OutOfMemory}!void {
|
||||
if (self.writableLength() >= size) return;
|
||||
|
||||
return try self.ensureTotalCapacity(math.add(usize, self.count, size) catch return error.OutOfMemory);
|
||||
}
|
||||
|
||||
/// Returns a writable slice from the 'read' end of the fifo
|
||||
fn readableSliceMut(self: Self, offset: usize) []T {
|
||||
if (offset > self.count) return &[_]T{};
|
||||
|
||||
var start = self.head + offset;
|
||||
if (start >= self.buf.len) {
|
||||
start -= self.buf.len;
|
||||
return self.buf[start .. start + (self.count - offset)];
|
||||
} else {
|
||||
const end = @min(self.head + self.count, self.buf.len);
|
||||
return self.buf[start..end];
|
||||
}
|
||||
}
|
||||
|
||||
/// Discard first `count` items in the fifo
|
||||
pub fn discard(self: *Self, count: usize) void {
|
||||
assert(count <= self.count);
|
||||
{ // set old range to undefined. Note: may be wrapped around
|
||||
const slice = self.readableSliceMut(0);
|
||||
if (slice.len >= count) {
|
||||
const unused = mem.sliceAsBytes(slice[0..count]);
|
||||
@memset(unused, undefined);
|
||||
} else {
|
||||
const unused = mem.sliceAsBytes(slice[0..]);
|
||||
@memset(unused, undefined);
|
||||
const unused2 = mem.sliceAsBytes(self.readableSliceMut(slice.len)[0 .. count - slice.len]);
|
||||
@memset(unused2, undefined);
|
||||
}
|
||||
}
|
||||
var head = self.head + count;
|
||||
// Note it is safe to do a wrapping subtract as
|
||||
// bitwise & with all 1s is a noop
|
||||
head &= self.buf.len -% 1;
|
||||
self.head = head;
|
||||
self.count -= count;
|
||||
}
|
||||
|
||||
/// Read the next item from the fifo
|
||||
pub fn readItem(self: *Self) ?T {
|
||||
if (self.count == 0) return null;
|
||||
|
||||
const c = self.buf[self.head];
|
||||
self.discard(1);
|
||||
return c;
|
||||
}
|
||||
|
||||
/// Returns number of items available in fifo
|
||||
pub fn writableLength(self: Self) usize {
|
||||
return self.buf.len - self.count;
|
||||
}
|
||||
|
||||
/// Returns the first section of writable buffer.
|
||||
/// Note that this may be of length 0
|
||||
pub fn writableSlice(self: Self, offset: usize) []T {
|
||||
if (offset > self.buf.len) return &[_]T{};
|
||||
|
||||
const tail = self.head + offset + self.count;
|
||||
if (tail < self.buf.len) {
|
||||
return self.buf[tail..];
|
||||
} else {
|
||||
return self.buf[tail - self.buf.len ..][0 .. self.writableLength() - offset];
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the tail location of the buffer (usually follows use of writable/writableWithSize)
|
||||
pub fn update(self: *Self, count: usize) void {
|
||||
assert(self.count + count <= self.buf.len);
|
||||
self.count += count;
|
||||
}
|
||||
|
||||
/// Appends the data in `src` to the fifo.
|
||||
/// You must have ensured there is enough space.
|
||||
pub fn writeAssumeCapacity(self: *Self, src: []const T) void {
|
||||
assert(self.writableLength() >= src.len);
|
||||
|
||||
var src_left = src;
|
||||
while (src_left.len > 0) {
|
||||
const writable_slice = self.writableSlice(0);
|
||||
assert(writable_slice.len != 0);
|
||||
const n = @min(writable_slice.len, src_left.len);
|
||||
@memcpy(writable_slice[0..n], src_left[0..n]);
|
||||
self.update(n);
|
||||
src_left = src_left[n..];
|
||||
}
|
||||
}
|
||||
|
||||
/// Write a single item to the fifo
|
||||
pub fn writeItem(self: *Self, item: T) !void {
|
||||
try self.ensureUnusedCapacity(1);
|
||||
return self.writeItemAssumeCapacity(item);
|
||||
}
|
||||
|
||||
pub fn writeItemAssumeCapacity(self: *Self, item: T) void {
|
||||
var tail = self.head + self.count;
|
||||
tail &= self.buf.len - 1;
|
||||
self.buf[tail] = item;
|
||||
self.update(1);
|
||||
}
|
||||
};
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user