de-genericify SinglyLinkedList

by making it always intrusive, we make it a more broadly useful API, and
avoid binary bloat.
This commit is contained in:
Andrew Kelley 2025-04-03 14:11:38 -07:00
parent e9220525e8
commit 1639fcea43
6 changed files with 207 additions and 202 deletions

View File

@ -444,7 +444,6 @@ set(ZIG_STAGE2_SOURCES
lib/std/json.zig
lib/std/json/stringify.zig
lib/std/leb128.zig
lib/std/linked_list.zig
lib/std/log.zig
lib/std/macho.zig
lib/std/math.zig

View File

@ -0,0 +1,166 @@
//! A singly-linked list is headed by a single forward pointer. The elements
//! are singly-linked for minimum space and pointer manipulation overhead at
//! the expense of O(n) removal for arbitrary elements. New elements can be
//! added to the list after an existing element or at the head of the list.
//!
//! A singly-linked list may only be traversed in the forward direction.
//!
//! Singly-linked lists are useful under these conditions:
//! * Ability to preallocate elements / requirement of infallibility for
//! insertion.
//! * Ability to allocate elements intrusively along with other data.
//! * Homogenous elements.
const std = @import("std.zig");
const debug = std.debug;
const assert = debug.assert;
const testing = std.testing;
const SinglyLinkedList = @This();
first: ?*Node = null,
/// This struct contains only a next pointer and not any data payload. The
/// intended usage is to embed it intrusively into another data structure and
/// access the data with `@fieldParentPtr`.
pub const Node = struct {
next: ?*Node = null,
pub fn insertAfter(node: *Node, new_node: *Node) void {
new_node.next = node.next;
node.next = new_node;
}
/// Remove the node after the one provided, returning it.
pub fn removeNext(node: *Node) ?*Node {
const next_node = node.next orelse return null;
node.next = next_node.next;
return next_node;
}
/// Iterate over the singly-linked list from this node, until the final
/// node is found.
///
/// This operation is O(N). Instead of calling this function, consider
/// using a different data structure.
pub fn findLast(node: *Node) *Node {
var it = node;
while (true) {
it = it.next orelse return it;
}
}
/// Iterate over each next node, returning the count of all nodes except
/// the starting one.
///
/// This operation is O(N). Instead of calling this function, consider
/// using a different data structure.
pub fn countChildren(node: *const Node) usize {
var count: usize = 0;
var it: ?*const Node = node.next;
while (it) |n| : (it = n.next) {
count += 1;
}
return count;
}
/// Reverse the list starting from this node in-place.
///
/// This operation is O(N). Instead of calling this function, consider
/// using a different data structure.
pub fn reverse(indirect: *?*Node) void {
if (indirect.* == null) {
return;
}
var current: *Node = indirect.*.?;
while (current.next) |next| {
current.next = next.next;
next.next = indirect.*;
indirect.* = next;
}
}
};
pub fn prepend(list: *SinglyLinkedList, new_node: *Node) void {
new_node.next = list.first;
list.first = new_node;
}
pub fn remove(list: *SinglyLinkedList, node: *Node) void {
if (list.first == node) {
list.first = node.next;
} else {
var current_elm = list.first.?;
while (current_elm.next != node) {
current_elm = current_elm.next.?;
}
current_elm.next = node.next;
}
}
/// Remove and return the first node in the list.
pub fn popFirst(list: *SinglyLinkedList) ?*Node {
const first = list.first orelse return null;
list.first = first.next;
return first;
}
/// Iterate over all nodes, returning the count.
///
/// This operation is O(N). Consider tracking the length separately rather than
/// computing it.
pub fn len(list: SinglyLinkedList) usize {
if (list.first) |n| {
return 1 + n.countChildren();
} else {
return 0;
}
}
test "basics" {
const L = struct {
data: u32,
node: SinglyLinkedList.Node = .{},
};
var list: SinglyLinkedList = .{};
try testing.expect(list.len() == 0);
var one: L = .{ .data = 1 };
var two: L = .{ .data = 2 };
var three: L = .{ .data = 3 };
var four: L = .{ .data = 4 };
var five: L = .{ .data = 5 };
list.prepend(&two.node); // {2}
two.node.insertAfter(&five.node); // {2, 5}
list.prepend(&one.node); // {1, 2, 5}
two.node.insertAfter(&three.node); // {1, 2, 3, 5}
three.node.insertAfter(&four.node); // {1, 2, 3, 4, 5}
try testing.expect(list.len() == 5);
// Traverse forwards.
{
var it = list.first;
var index: u32 = 1;
while (it) |node| : (it = node.next) {
const l: *L = @fieldParentPtr("node", node);
try testing.expect(l.data == index);
index += 1;
}
}
_ = list.popFirst(); // {2, 3, 4, 5}
_ = list.remove(&five.node); // {2, 3, 4}
_ = two.node.removeNext(); // {2, 4}
try testing.expect(@as(*L, @fieldParentPtr("node", list.first.?)).data == 2);
try testing.expect(@as(*L, @fieldParentPtr("node", list.first.?.next.?)).data == 4);
try testing.expect(list.first.?.next.?.next == null);
SinglyLinkedList.Node.reverse(&list.first);
try testing.expect(@as(*L, @fieldParentPtr("node", list.first.?)).data == 4);
try testing.expect(@as(*L, @fieldParentPtr("node", list.first.?.next.?)).data == 2);
try testing.expect(list.first.?.next.?.next == null);
}

View File

@ -5,7 +5,7 @@ const WaitGroup = @import("WaitGroup.zig");
mutex: std.Thread.Mutex = .{},
cond: std.Thread.Condition = .{},
run_queue: RunQueue = .{},
run_queue: std.SinglyLinkedList = .{},
is_running: bool = true,
allocator: std.mem.Allocator,
threads: if (builtin.single_threaded) [0]std.Thread else []std.Thread,
@ -16,9 +16,9 @@ ids: if (builtin.single_threaded) struct {
}
} else std.AutoArrayHashMapUnmanaged(std.Thread.Id, void),
const RunQueue = std.SinglyLinkedList(Runnable);
const Runnable = struct {
runFn: RunProto,
node: std.SinglyLinkedList.Node = .{},
};
const RunProto = *const fn (*Runnable, id: ?usize) void;
@ -110,12 +110,11 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args
const Closure = struct {
arguments: Args,
pool: *Pool,
run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } },
runnable: Runnable = .{ .runFn = runFn },
wait_group: *WaitGroup,
fn runFn(runnable: *Runnable, _: ?usize) void {
const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable);
const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node));
const closure: *@This() = @alignCast(@fieldParentPtr("runnable", runnable));
@call(.auto, func, closure.arguments);
closure.wait_group.finish();
@ -143,7 +142,7 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args
.wait_group = wait_group,
};
pool.run_queue.prepend(&closure.run_node);
pool.run_queue.prepend(&closure.runnable.node);
pool.mutex.unlock();
}
@ -173,12 +172,11 @@ pub fn spawnWgId(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, ar
const Closure = struct {
arguments: Args,
pool: *Pool,
run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } },
runnable: Runnable = .{ .runFn = runFn },
wait_group: *WaitGroup,
fn runFn(runnable: *Runnable, id: ?usize) void {
const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable);
const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node));
const closure: *@This() = @alignCast(@fieldParentPtr("runnable", runnable));
@call(.auto, func, .{id.?} ++ closure.arguments);
closure.wait_group.finish();
@ -207,7 +205,7 @@ pub fn spawnWgId(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, ar
.wait_group = wait_group,
};
pool.run_queue.prepend(&closure.run_node);
pool.run_queue.prepend(&closure.runnable.node);
pool.mutex.unlock();
}
@ -225,11 +223,10 @@ pub fn spawn(pool: *Pool, comptime func: anytype, args: anytype) !void {
const Closure = struct {
arguments: Args,
pool: *Pool,
run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } },
runnable: Runnable = .{ .runFn = runFn },
fn runFn(runnable: *Runnable, _: ?usize) void {
const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable);
const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node));
const closure: *@This() = @alignCast(@fieldParentPtr("runnable", runnable));
@call(.auto, func, closure.arguments);
// The thread pool's allocator is protected by the mutex.
@ -251,7 +248,7 @@ pub fn spawn(pool: *Pool, comptime func: anytype, args: anytype) !void {
.pool = pool,
};
pool.run_queue.prepend(&closure.run_node);
pool.run_queue.prepend(&closure.runnable.node);
}
// Notify waiting threads outside the lock to try and keep the critical section small.
@ -292,7 +289,8 @@ fn worker(pool: *Pool) void {
pool.mutex.unlock();
defer pool.mutex.lock();
run_node.data.runFn(&run_node.data, id);
const runnable: *Runnable = @fieldParentPtr("node", run_node);
runnable.runFn(runnable, id);
}
// Stop executing instead of waiting if the thread pool is no longer running.
@ -312,7 +310,8 @@ pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void {
if (pool.run_queue.popFirst()) |run_node| {
id = id orelse pool.ids.getIndex(std.Thread.getCurrentId());
pool.mutex.unlock();
run_node.data.runFn(&run_node.data, id);
const runnable: *Runnable = @fieldParentPtr("node", run_node);
runnable.runFn(runnable, id);
continue;
}

View File

@ -14,7 +14,7 @@ pub const ArenaAllocator = struct {
/// Inner state of ArenaAllocator. Can be stored rather than the entire ArenaAllocator
/// as a memory-saving optimization.
pub const State = struct {
buffer_list: std.SinglyLinkedList(usize) = .{},
buffer_list: std.SinglyLinkedList = .{},
end_index: usize = 0,
pub fn promote(self: State, child_allocator: Allocator) ArenaAllocator {
@ -37,7 +37,10 @@ pub const ArenaAllocator = struct {
};
}
const BufNode = std.SinglyLinkedList(usize).Node;
const BufNode = struct {
data: usize,
node: std.SinglyLinkedList.Node = .{},
};
const BufNode_alignment: mem.Alignment = .fromByteUnits(@alignOf(BufNode));
pub fn init(child_allocator: Allocator) ArenaAllocator {
@ -51,7 +54,8 @@ pub const ArenaAllocator = struct {
while (it) |node| {
// this has to occur before the free because the free frees node
const next_it = node.next;
const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data];
const buf_node: *BufNode = @fieldParentPtr("node", node);
const alloc_buf = @as([*]u8, @ptrCast(buf_node))[0..buf_node.data];
self.child_allocator.rawFree(alloc_buf, BufNode_alignment, @returnAddress());
it = next_it;
}
@ -78,7 +82,8 @@ pub const ArenaAllocator = struct {
while (it) |node| : (it = node.next) {
// Compute the actually allocated size excluding the
// linked list node.
size += node.data - @sizeOf(BufNode);
const buf_node: *BufNode = @fieldParentPtr("node", node);
size += buf_node.data - @sizeOf(BufNode);
}
return size;
}
@ -130,7 +135,8 @@ pub const ArenaAllocator = struct {
const next_it = node.next;
if (next_it == null)
break node;
const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data];
const buf_node: *BufNode = @fieldParentPtr("node", node);
const alloc_buf = @as([*]u8, @ptrCast(buf_node))[0..buf_node.data];
self.child_allocator.rawFree(alloc_buf, BufNode_alignment, @returnAddress());
it = next_it;
} else null;
@ -140,12 +146,13 @@ pub const ArenaAllocator = struct {
if (maybe_first_node) |first_node| {
self.state.buffer_list.first = first_node;
// perfect, no need to invoke the child_allocator
if (first_node.data == total_size)
const first_buf_node: *BufNode = @fieldParentPtr("node", first_node);
if (first_buf_node.data == total_size)
return true;
const first_alloc_buf = @as([*]u8, @ptrCast(first_node))[0..first_node.data];
const first_alloc_buf = @as([*]u8, @ptrCast(first_buf_node))[0..first_buf_node.data];
if (self.child_allocator.rawResize(first_alloc_buf, BufNode_alignment, total_size, @returnAddress())) {
// successful resize
first_node.data = total_size;
first_buf_node.data = total_size;
} else {
// manual realloc
const new_ptr = self.child_allocator.rawAlloc(total_size, BufNode_alignment, @returnAddress()) orelse {
@ -153,9 +160,9 @@ pub const ArenaAllocator = struct {
return false;
};
self.child_allocator.rawFree(first_alloc_buf, BufNode_alignment, @returnAddress());
const node: *BufNode = @ptrCast(@alignCast(new_ptr));
node.* = .{ .data = total_size };
self.state.buffer_list.first = node;
const buf_node: *BufNode = @ptrCast(@alignCast(new_ptr));
buf_node.* = .{ .data = total_size };
self.state.buffer_list.first = &buf_node.node;
}
}
return true;
@ -169,7 +176,7 @@ pub const ArenaAllocator = struct {
return null;
const buf_node: *BufNode = @ptrCast(@alignCast(ptr));
buf_node.* = .{ .data = len };
self.state.buffer_list.prepend(buf_node);
self.state.buffer_list.prepend(&buf_node.node);
self.state.end_index = 0;
return buf_node;
}
@ -179,8 +186,8 @@ pub const ArenaAllocator = struct {
_ = ra;
const ptr_align = alignment.toByteUnits();
var cur_node = if (self.state.buffer_list.first) |first_node|
first_node
var cur_node: *BufNode = if (self.state.buffer_list.first) |first_node|
@fieldParentPtr("node", first_node)
else
(self.createNode(0, n + ptr_align) orelse return null);
while (true) {
@ -213,7 +220,8 @@ pub const ArenaAllocator = struct {
_ = ret_addr;
const cur_node = self.state.buffer_list.first orelse return false;
const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data];
const cur_buf_node: *BufNode = @fieldParentPtr("node", cur_node);
const cur_buf = @as([*]u8, @ptrCast(cur_buf_node))[@sizeOf(BufNode)..cur_buf_node.data];
if (@intFromPtr(cur_buf.ptr) + self.state.end_index != @intFromPtr(buf.ptr) + buf.len) {
// It's not the most recent allocation, so it cannot be expanded,
// but it's fine if they want to make it smaller.
@ -248,7 +256,8 @@ pub const ArenaAllocator = struct {
const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
const cur_node = self.state.buffer_list.first orelse return;
const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data];
const cur_buf_node: *BufNode = @fieldParentPtr("node", cur_node);
const cur_buf = @as([*]u8, @ptrCast(cur_buf_node))[@sizeOf(BufNode)..cur_buf_node.data];
if (@intFromPtr(cur_buf.ptr) + self.state.end_index == @intFromPtr(buf.ptr) + buf.len) {
self.state.end_index -= buf.len;

View File

@ -3,174 +3,6 @@ const debug = std.debug;
const assert = debug.assert;
const testing = std.testing;
/// A singly-linked list is headed by a single forward pointer. The elements
/// are singly-linked for minimum space and pointer manipulation overhead at
/// the expense of O(n) removal for arbitrary elements. New elements can be
/// added to the list after an existing element or at the head of the list.
/// A singly-linked list may only be traversed in the forward direction.
/// Singly-linked lists are ideal for applications with large datasets and
/// few or no removals or for implementing a LIFO queue.
pub fn SinglyLinkedList(comptime T: type) type {
return struct {
const Self = @This();
/// Node inside the linked list wrapping the actual data.
pub const Node = struct {
next: ?*Node = null,
data: T,
pub const Data = T;
/// Insert a new node after the current one.
///
/// Arguments:
/// new_node: Pointer to the new node to insert.
pub fn insertAfter(node: *Node, new_node: *Node) void {
new_node.next = node.next;
node.next = new_node;
}
/// Remove a node from the list.
///
/// Arguments:
/// node: Pointer to the node to be removed.
/// Returns:
/// node removed
pub fn removeNext(node: *Node) ?*Node {
const next_node = node.next orelse return null;
node.next = next_node.next;
return next_node;
}
/// Iterate over the singly-linked list from this node, until the final node is found.
/// This operation is O(N).
pub fn findLast(node: *Node) *Node {
var it = node;
while (true) {
it = it.next orelse return it;
}
}
/// Iterate over each next node, returning the count of all nodes except the starting one.
/// This operation is O(N).
pub fn countChildren(node: *const Node) usize {
var count: usize = 0;
var it: ?*const Node = node.next;
while (it) |n| : (it = n.next) {
count += 1;
}
return count;
}
/// Reverse the list starting from this node in-place.
/// This operation is O(N).
pub fn reverse(indirect: *?*Node) void {
if (indirect.* == null) {
return;
}
var current: *Node = indirect.*.?;
while (current.next) |next| {
current.next = next.next;
next.next = indirect.*;
indirect.* = next;
}
}
};
first: ?*Node = null,
/// Insert a new node at the head.
///
/// Arguments:
/// new_node: Pointer to the new node to insert.
pub fn prepend(list: *Self, new_node: *Node) void {
new_node.next = list.first;
list.first = new_node;
}
/// Remove a node from the list.
///
/// Arguments:
/// node: Pointer to the node to be removed.
pub fn remove(list: *Self, node: *Node) void {
if (list.first == node) {
list.first = node.next;
} else {
var current_elm = list.first.?;
while (current_elm.next != node) {
current_elm = current_elm.next.?;
}
current_elm.next = node.next;
}
}
/// Remove and return the first node in the list.
///
/// Returns:
/// A pointer to the first node in the list.
pub fn popFirst(list: *Self) ?*Node {
const first = list.first orelse return null;
list.first = first.next;
return first;
}
/// Iterate over all nodes, returning the count.
/// This operation is O(N).
pub fn len(list: Self) usize {
if (list.first) |n| {
return 1 + n.countChildren();
} else {
return 0;
}
}
};
}
test "basic SinglyLinkedList test" {
const L = SinglyLinkedList(u32);
var list = L{};
try testing.expect(list.len() == 0);
var one = L.Node{ .data = 1 };
var two = L.Node{ .data = 2 };
var three = L.Node{ .data = 3 };
var four = L.Node{ .data = 4 };
var five = L.Node{ .data = 5 };
list.prepend(&two); // {2}
two.insertAfter(&five); // {2, 5}
list.prepend(&one); // {1, 2, 5}
two.insertAfter(&three); // {1, 2, 3, 5}
three.insertAfter(&four); // {1, 2, 3, 4, 5}
try testing.expect(list.len() == 5);
// Traverse forwards.
{
var it = list.first;
var index: u32 = 1;
while (it) |node| : (it = node.next) {
try testing.expect(node.data == index);
index += 1;
}
}
_ = list.popFirst(); // {2, 3, 4, 5}
_ = list.remove(&five); // {2, 3, 4}
_ = two.removeNext(); // {2, 4}
try testing.expect(list.first.?.data == 2);
try testing.expect(list.first.?.next.?.data == 4);
try testing.expect(list.first.?.next.?.next == null);
L.Node.reverse(&list.first);
try testing.expect(list.first.?.data == 4);
try testing.expect(list.first.?.next.?.data == 2);
try testing.expect(list.first.?.next.?.next == null);
}
/// A doubly-linked list has a pair of pointers to both the head and
/// tail of the list. List elements have pointers to both the previous
/// and next elements in the sequence. The list can be traversed both

View File

@ -33,7 +33,7 @@ pub const Random = @import("Random.zig");
pub const RingBuffer = @import("RingBuffer.zig");
pub const SegmentedList = @import("segmented_list.zig").SegmentedList;
pub const SemanticVersion = @import("SemanticVersion.zig");
pub const SinglyLinkedList = @import("linked_list.zig").SinglyLinkedList;
pub const SinglyLinkedList = @import("SinglyLinkedList.zig");
pub const StaticBitSet = bit_set.StaticBitSet;
pub const StringHashMap = hash_map.StringHashMap;
pub const StringHashMapUnmanaged = hash_map.StringHashMapUnmanaged;