revert GPA to before this branch

This commit is contained in:
Andrew Kelley 2025-01-29 21:48:05 -08:00
parent 284de7d957
commit 95a0474dc6

View File

@ -48,7 +48,7 @@
//!
//! ## Basic Design:
//!
//! Small allocations are divided into buckets. For a max page size of 4K:
//! Small allocations are divided into buckets:
//!
//! ```
//! index obj_size
@ -75,9 +75,6 @@
//! BucketHeader, followed by "used bits", and two stack traces for each slot
//! (allocation trace and free trace).
//!
//! The buckets array contains buckets for every size class below `page_size_max`.
//! At runtime, only size classes below `pageSize()` will actually be used for allocations.
//!
//! The "used bits" are 1 bit per slot representing whether the slot is used.
//! Allocations use the data to iterate to find a free slot. Frees assert that the
//! corresponding bit is 1 and set it to 0.
@ -102,13 +99,11 @@ const math = std.math;
const assert = std.debug.assert;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const page_size_min = std.heap.page_size_min;
const page_size_max = std.heap.page_size_max;
const pageSize = std.heap.pageSize;
const page_size = std.mem.page_size;
const StackTrace = std.builtin.StackTrace;
/// Integer type for pointing to slots in a small allocation
const SlotIndex = std.meta.Int(.unsigned, math.log2(page_size_max) + 1);
const SlotIndex = std.meta.Int(.unsigned, math.log2(page_size) + 1);
const default_test_stack_trace_frames: usize = if (builtin.is_test) 10 else 6;
const default_sys_stack_trace_frames: usize = if (std.debug.sys_can_stack_trace) default_test_stack_trace_frames else 0;
@ -162,9 +157,6 @@ pub const Config = struct {
pub const Check = enum { ok, leak };
var used_small_bucket_count_cache = std.atomic.Value(usize).init(0);
var largest_used_bucket_object_size_cache = std.atomic.Value(usize).init(0);
/// Default initialization of this struct is deprecated; use `.init` instead.
pub fn GeneralPurposeAllocator(comptime config: Config) type {
return struct {
@ -214,27 +206,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
pub const Error = mem.Allocator.Error;
const small_bucket_count = math.log2(page_size_max);
const small_bucket_count = math.log2(page_size);
const largest_bucket_object_size = 1 << (small_bucket_count - 1);
const LargestSizeClassInt = std.math.IntFittingRange(0, largest_bucket_object_size);
fn used_small_bucket_count() usize {
const cached = used_small_bucket_count_cache.load(.monotonic);
if (cached != 0) {
return cached;
}
const val = math.log2(pageSize());
used_small_bucket_count_cache.store(val, .monotonic);
return val;
}
fn largest_used_bucket_object_size() usize {
const cached = largest_used_bucket_object_size_cache.load(.monotonic);
if (cached != 0) {
return cached;
}
const val = @as(usize, 1) << @truncate(used_small_bucket_count() - 1);
largest_used_bucket_object_size_cache.store(val, .monotonic);
return val;
}
const bucketCompare = struct {
fn compare(a: *BucketHeader, b: *BucketHeader) std.math.Order {
@ -287,7 +261,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
// * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation
const BucketHeader = struct {
page: [*]align(page_size_min) u8,
page: [*]align(page_size) u8,
alloc_cursor: SlotIndex,
used_count: SlotIndex,
@ -299,14 +273,14 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
if (!config.safety) @compileError("requested size is only stored when safety is enabled");
const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketRequestedSizesStart(size_class);
const sizes = @as([*]LargestSizeClassInt, @ptrCast(@alignCast(start_ptr)));
const slot_count = @divExact(pageSize(), size_class);
const slot_count = @divExact(page_size, size_class);
return sizes[0..slot_count];
}
fn log2PtrAligns(bucket: *BucketHeader, size_class: usize) []u8 {
if (!config.safety) @compileError("requested size is only stored when safety is enabled");
const aligns_ptr = @as([*]u8, @ptrCast(bucket)) + bucketAlignsStart(size_class);
const slot_count = @divExact(pageSize(), size_class);
const slot_count = @divExact(page_size, size_class);
return aligns_ptr[0..slot_count];
}
@ -338,7 +312,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
/// Only valid for buckets within `empty_buckets`, and relies on the `alloc_cursor`
/// of empty buckets being set to `slot_count` when they are added to `empty_buckets`
fn emptyBucketSizeClass(bucket: *BucketHeader) usize {
return @divExact(pageSize(), bucket.alloc_cursor);
return @divExact(page_size, bucket.alloc_cursor);
}
};
@ -381,13 +355,13 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
fn bucketAlignsStart(size_class: usize) usize {
if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled");
const slot_count = @divExact(pageSize(), size_class);
const slot_count = @divExact(page_size, size_class);
return bucketRequestedSizesStart(size_class) + (@sizeOf(LargestSizeClassInt) * slot_count);
}
fn bucketStackFramesStart(size_class: usize) usize {
const unaligned_start = if (config.safety) blk: {
const slot_count = @divExact(pageSize(), size_class);
const slot_count = @divExact(page_size, size_class);
break :blk bucketAlignsStart(size_class) + slot_count;
} else @sizeOf(BucketHeader) + usedBitsCount(size_class);
return mem.alignForward(
@ -398,12 +372,12 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
fn bucketSize(size_class: usize) usize {
const slot_count = @divExact(pageSize(), size_class);
const slot_count = @divExact(page_size, size_class);
return bucketStackFramesStart(size_class) + one_trace_size * traces_per_slot * slot_count;
}
fn usedBitsCount(size_class: usize) usize {
const slot_count = @divExact(pageSize(), size_class);
const slot_count = @divExact(page_size, size_class);
if (slot_count < 8) return 1;
return @divExact(slot_count, 8);
}
@ -442,8 +416,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
pub fn detectLeaks(self: *Self) bool {
var leaks = false;
for (0..used_small_bucket_count()) |bucket_i| {
const buckets = &self.buckets[bucket_i];
for (&self.buckets, 0..) |*buckets, bucket_i| {
if (buckets.root == null) continue;
const size_class = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(bucket_i));
const used_bits_count = usedBitsCount(size_class);
@ -491,7 +464,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
var bucket = node.key;
if (config.never_unmap) {
// free page that was intentionally leaked by never_unmap
self.backing_allocator.free(bucket.page[0..pageSize()]);
self.backing_allocator.free(bucket.page[0..page_size]);
}
// alloc_cursor was set to slot count when bucket added to empty_buckets
self.freeBucket(bucket, bucket.emptyBucketSizeClass());
@ -558,7 +531,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
fn allocSlot(self: *Self, size_class: usize, trace_addr: usize) Error!Slot {
const bucket_index = math.log2(size_class);
var buckets = &self.buckets[bucket_index];
const slot_count = @divExact(pageSize(), size_class);
const slot_count = @divExact(page_size, size_class);
if (self.cur_buckets[bucket_index] == null or self.cur_buckets[bucket_index].?.alloc_cursor == slot_count) {
const new_bucket = try self.createBucket(size_class);
errdefer self.freeBucket(new_bucket, size_class);
@ -591,7 +564,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
addr: usize,
current_bucket: ?*BucketHeader,
) ?*BucketHeader {
const search_page: [*]align(page_size_min) u8 = @ptrFromInt(mem.alignBackward(usize, addr, pageSize()));
const search_page: [*]align(page_size) u8 = @ptrFromInt(mem.alignBackward(usize, addr, page_size));
if (current_bucket != null and current_bucket.?.page == search_page) {
return current_bucket;
}
@ -756,14 +729,14 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
assert(old_mem.len != 0);
const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align);
if (aligned_size > largest_used_bucket_object_size()) {
if (aligned_size > largest_bucket_object_size) {
return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr);
}
const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size);
var bucket_index = math.log2(size_class_hint);
var size_class: usize = size_class_hint;
const bucket = while (bucket_index < used_small_bucket_count()) : (bucket_index += 1) {
const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) {
if (searchBucket(&self.buckets[bucket_index], @intFromPtr(old_mem.ptr), self.cur_buckets[bucket_index])) |bucket| {
break bucket;
}
@ -874,7 +847,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
assert(old_mem.len != 0);
const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align);
if (aligned_size > largest_used_bucket_object_size()) {
if (aligned_size > largest_bucket_object_size) {
self.freeLarge(old_mem, log2_old_align, ret_addr);
return;
}
@ -882,7 +855,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
var bucket_index = math.log2(size_class_hint);
var size_class: usize = size_class_hint;
const bucket = while (bucket_index < used_small_bucket_count()) : (bucket_index += 1) {
const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) {
if (searchBucket(&self.buckets[bucket_index], @intFromPtr(old_mem.ptr), self.cur_buckets[bucket_index])) |bucket| {
break bucket;
}
@ -971,14 +944,14 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
self.cur_buckets[bucket_index] = null;
}
if (!config.never_unmap) {
self.backing_allocator.free(bucket.page[0..pageSize()]);
self.backing_allocator.free(bucket.page[0..page_size]);
}
if (!config.retain_metadata) {
self.freeBucket(bucket, size_class);
self.bucket_node_pool.destroy(node);
} else {
// move alloc_cursor to end so we can tell size_class later
const slot_count = @divExact(pageSize(), size_class);
const slot_count = @divExact(page_size, size_class);
bucket.alloc_cursor = @as(SlotIndex, @truncate(slot_count));
var empty_entry = self.empty_buckets.getEntryFor(node.key);
empty_entry.set(node);
@ -1019,7 +992,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
ret_addr: usize,
) Allocator.Error![*]u8 {
const new_aligned_size = @max(len, @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)));
if (new_aligned_size > largest_used_bucket_object_size()) {
if (new_aligned_size > largest_bucket_object_size) {
try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
const ptr = self.backing_allocator.rawAlloc(len, log2_ptr_align, ret_addr) orelse
return error.OutOfMemory;
@ -1062,7 +1035,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
fn createBucket(self: *Self, size_class: usize) Error!*BucketHeader {
const page = try self.backing_allocator.alignedAlloc(u8, page_size_min, pageSize());
const page = try self.backing_allocator.alignedAlloc(u8, page_size, page_size);
errdefer self.backing_allocator.free(page);
const bucket_size = bucketSize(size_class);
@ -1206,17 +1179,17 @@ test "large object - grow" {
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
const allocator = gpa.allocator();
var slice1 = try allocator.alloc(u8, pageSize() * 2 - 20);
var slice1 = try allocator.alloc(u8, page_size * 2 - 20);
defer allocator.free(slice1);
const old = slice1;
slice1 = try allocator.realloc(slice1, pageSize() * 2 - 10);
slice1 = try allocator.realloc(slice1, page_size * 2 - 10);
try std.testing.expect(slice1.ptr == old.ptr);
slice1 = try allocator.realloc(slice1, pageSize() * 2);
slice1 = try allocator.realloc(slice1, page_size * 2);
try std.testing.expect(slice1.ptr == old.ptr);
slice1 = try allocator.realloc(slice1, pageSize() * 2 + 1);
slice1 = try allocator.realloc(slice1, page_size * 2 + 1);
}
test "realloc small object to large object" {
@ -1230,7 +1203,7 @@ test "realloc small object to large object" {
slice[60] = 0x34;
// This requires upgrading to a large object
const large_object_size = pageSize() * 2 + 50;
const large_object_size = page_size * 2 + 50;
slice = try allocator.realloc(slice, large_object_size);
try std.testing.expect(slice[0] == 0x12);
try std.testing.expect(slice[60] == 0x34);
@ -1241,22 +1214,22 @@ test "shrink large object to large object" {
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
const allocator = gpa.allocator();
var slice = try allocator.alloc(u8, pageSize() * 2 + 50);
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
slice[0] = 0x12;
slice[60] = 0x34;
if (!allocator.resize(slice, pageSize() * 2 + 1)) return;
slice = slice.ptr[0 .. pageSize() * 2 + 1];
if (!allocator.resize(slice, page_size * 2 + 1)) return;
slice = slice.ptr[0 .. page_size * 2 + 1];
try std.testing.expect(slice[0] == 0x12);
try std.testing.expect(slice[60] == 0x34);
try std.testing.expect(allocator.resize(slice, pageSize() * 2 + 1));
slice = slice[0 .. pageSize() * 2 + 1];
try std.testing.expect(allocator.resize(slice, page_size * 2 + 1));
slice = slice[0 .. page_size * 2 + 1];
try std.testing.expect(slice[0] == 0x12);
try std.testing.expect(slice[60] == 0x34);
slice = try allocator.realloc(slice, pageSize() * 2);
slice = try allocator.realloc(slice, page_size * 2);
try std.testing.expect(slice[0] == 0x12);
try std.testing.expect(slice[60] == 0x34);
}
@ -1272,13 +1245,13 @@ test "shrink large object to large object with larger alignment" {
var fba = std.heap.FixedBufferAllocator.init(&debug_buffer);
const debug_allocator = fba.allocator();
const alloc_size = pageSize() * 2 + 50;
const alloc_size = page_size * 2 + 50;
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
defer allocator.free(slice);
const big_alignment: usize = switch (builtin.os.tag) {
.windows => pageSize() * 32, // Windows aligns to 64K.
else => pageSize() * 2,
.windows => page_size * 32, // Windows aligns to 64K.
else => page_size * 2,
};
// This loop allocates until we find a page that is not aligned to the big
// alignment. Then we shrink the allocation after the loop, but increase the
@ -1304,7 +1277,7 @@ test "realloc large object to small object" {
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
const allocator = gpa.allocator();
var slice = try allocator.alloc(u8, pageSize() * 2 + 50);
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
slice[0] = 0x12;
slice[16] = 0x34;
@ -1346,18 +1319,18 @@ test "realloc large object to larger alignment" {
var fba = std.heap.FixedBufferAllocator.init(&debug_buffer);
const debug_allocator = fba.allocator();
var slice = try allocator.alignedAlloc(u8, 16, pageSize() * 2 + 50);
var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
defer allocator.free(slice);
const big_alignment: usize = switch (builtin.os.tag) {
.windows => pageSize() * 32, // Windows aligns to 64K.
else => pageSize() * 2,
.windows => page_size * 32, // Windows aligns to 64K.
else => page_size * 2,
};
// This loop allocates until we find a page that is not aligned to the big alignment.
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) {
try stuff_to_free.append(slice);
slice = try allocator.alignedAlloc(u8, 16, pageSize() * 2 + 50);
slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
}
while (stuff_to_free.popOrNull()) |item| {
allocator.free(item);
@ -1365,15 +1338,15 @@ test "realloc large object to larger alignment" {
slice[0] = 0x12;
slice[16] = 0x34;
slice = try allocator.reallocAdvanced(slice, 32, pageSize() * 2 + 100);
slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 100);
try std.testing.expect(slice[0] == 0x12);
try std.testing.expect(slice[16] == 0x34);
slice = try allocator.reallocAdvanced(slice, 32, pageSize() * 2 + 25);
slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 25);
try std.testing.expect(slice[0] == 0x12);
try std.testing.expect(slice[16] == 0x34);
slice = try allocator.reallocAdvanced(slice, big_alignment, pageSize() * 2 + 100);
slice = try allocator.reallocAdvanced(slice, big_alignment, page_size * 2 + 100);
try std.testing.expect(slice[0] == 0x12);
try std.testing.expect(slice[16] == 0x34);
}
@ -1389,7 +1362,7 @@ test "large object shrinks to small but allocation fails during shrink" {
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
const allocator = gpa.allocator();
var slice = try allocator.alloc(u8, pageSize() * 2 + 50);
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
slice[0] = 0x12;
slice[3] = 0x34;
@ -1460,7 +1433,7 @@ test "double frees" {
try std.testing.expect(GPA.searchBucket(&gpa.empty_buckets, @intFromPtr(small.ptr), null) != null);
// detect a large allocation double free
const large = try allocator.alloc(u8, 2 * pageSize());
const large = try allocator.alloc(u8, 2 * page_size);
try std.testing.expect(gpa.large_allocations.contains(@intFromPtr(large.ptr)));
try std.testing.expectEqual(gpa.large_allocations.getEntry(@intFromPtr(large.ptr)).?.value_ptr.bytes, large);
allocator.free(large);
@ -1469,7 +1442,7 @@ test "double frees" {
const normal_small = try allocator.alloc(u8, size_class);
defer allocator.free(normal_small);
const normal_large = try allocator.alloc(u8, 2 * pageSize());
const normal_large = try allocator.alloc(u8, 2 * page_size);
defer allocator.free(normal_large);
// check that flushing retained metadata doesn't disturb live allocations
@ -1502,8 +1475,8 @@ test "bug 9995 fix, large allocs count requested size not backing size" {
var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
const allocator = gpa.allocator();
var buf = try allocator.alignedAlloc(u8, 1, pageSize() + 1);
try std.testing.expect(gpa.total_requested_bytes == pageSize() + 1);
var buf = try allocator.alignedAlloc(u8, 1, page_size + 1);
try std.testing.expect(gpa.total_requested_bytes == page_size + 1);
buf = try allocator.realloc(buf, 1);
try std.testing.expect(gpa.total_requested_bytes == 1);
buf = try allocator.realloc(buf, 2);