From 42dbd35d3e16247ee68d7e3ace0da3778a1f5d37 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 8 Feb 2025 16:46:56 -0800 Subject: [PATCH] std.heap.SmpAllocator: back to simple free In practice this is fine because eventually alloc wins the race and grabs that massive freelist. --- lib/std/heap/SmpAllocator.zig | 46 +++-------------------------------- 1 file changed, 4 insertions(+), 42 deletions(-) diff --git a/lib/std/heap/SmpAllocator.zig b/lib/std/heap/SmpAllocator.zig index 62b90e8dbe..63f1e38a2a 100644 --- a/lib/std/heap/SmpAllocator.zig +++ b/lib/std/heap/SmpAllocator.zig @@ -51,10 +51,6 @@ const slab_len: usize = @max(std.heap.page_size_max, 64 * 1024); /// Because of storing free list pointers, the minimum size class is 3. const min_class = math.log2(@sizeOf(usize)); const size_class_count = math.log2(slab_len) - min_class; -/// When a freelist length exceeds this number of slabs, a `free` will rotate -/// up to `max_free_search` times before pushing. -const max_freelist_len = 255; -const max_free_search = 1; /// Before mapping a fresh page, `alloc` will rotate this many times. const max_alloc_search = 1; @@ -73,8 +69,6 @@ const Thread = struct { next_addrs: [size_class_count]usize = @splat(0), /// For each size class, points to the freed pointer. frees: [size_class_count]usize = @splat(0), - /// For each size class, tracks the number of items in the freelist. - freelist_lens: [size_class_count]u8 = @splat(0), fn lock() *Thread { var index = thread_index; @@ -159,7 +153,6 @@ fn alloc(context: *anyopaque, len: usize, alignment: mem.Alignment, ra: usize) ? // slab alignment here ensures the % slab len earlier catches the end of slots. const slab = PageAllocator.map(slab_len, .fromByteUnits(slab_len)) orelse return null; t.next_addrs[class] = @intFromPtr(slab) + slot_size; - t.freelist_lens[class] = 0; return slab; } @@ -213,43 +206,12 @@ fn free(context: *anyopaque, memory: []u8, alignment: mem.Alignment, ra: usize) } const node: *usize = @alignCast(@ptrCast(memory.ptr)); - var search_count: u8 = 0; - var t = Thread.lock(); + const t = Thread.lock(); + defer t.unlock(); - outer: while (true) { - const freelist_len = t.freelist_lens[class]; - if (freelist_len < max_freelist_len) { - @branchHint(.likely); - defer t.unlock(); - t.freelist_lens[class] = freelist_len +| @intFromBool((@intFromPtr(node) % slab_len) == 0); - node.* = t.frees[class]; - t.frees[class] = @intFromPtr(node); - return; - } - - if (search_count >= max_free_search) { - defer t.unlock(); - t.freelist_lens[class] = freelist_len +| @intFromBool((@intFromPtr(node) % slab_len) == 0); - node.* = t.frees[class]; - t.frees[class] = @intFromPtr(node); - return; - } - - t.unlock(); - const cpu_count = getCpuCount(); - assert(cpu_count != 0); - var index = thread_index; - while (true) { - index = (index + 1) % cpu_count; - t = &global.threads[index]; - if (t.mutex.tryLock()) { - thread_index = index; - search_count += 1; - continue :outer; - } - } - } + node.* = t.frees[class]; + t.frees[class] = @intFromPtr(node); } fn sizeClassIndex(len: usize, alignment: mem.Alignment) usize {