mirror of
https://github.com/ziglang/zig.git
synced 2026-01-21 06:45:24 +00:00
gpa: fix memory limit accounting for large allocations
This commit is contained in:
parent
544d7d9982
commit
5ff01bd820
@ -210,6 +210,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
|
||||
const LargeAlloc = struct {
|
||||
bytes: []u8,
|
||||
requested_size: if (config.enable_memory_limit) usize else void,
|
||||
stack_addresses: [trace_n][stack_n]usize,
|
||||
freed: if (config.retain_metadata) bool else void,
|
||||
ptr_align: if (config.never_unmap and config.retain_metadata) u29 else void,
|
||||
@ -528,13 +529,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
if (config.retain_metadata and entry.value_ptr.freed) {
|
||||
if (config.safety) {
|
||||
reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
|
||||
if (new_size == 0) {
|
||||
// Recoverable. Restore self.total_requested_bytes if needed.
|
||||
if (config.enable_memory_limit) {
|
||||
self.total_requested_bytes += old_mem.len;
|
||||
}
|
||||
// Recoverable if this is a free.
|
||||
if (new_size == 0)
|
||||
return @as(usize, 0);
|
||||
}
|
||||
@panic("Unrecoverable double free");
|
||||
} else {
|
||||
unreachable;
|
||||
@ -556,7 +553,29 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
});
|
||||
}
|
||||
|
||||
const result_len = if (config.never_unmap and new_size == 0) 0 else try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align, ret_addr);
|
||||
// Do memory limit accounting with requested sizes rather than what backing_allocator returns
|
||||
// because if we want to return error.OutOfMemory, we have to leave allocation untouched, and
|
||||
// that is impossible to guarantee after calling backing_allocator.resizeFn.
|
||||
const prev_req_bytes = self.total_requested_bytes;
|
||||
if (config.enable_memory_limit) {
|
||||
const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size;
|
||||
if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
self.total_requested_bytes = new_req_bytes;
|
||||
}
|
||||
errdefer if (config.enable_memory_limit) {
|
||||
self.total_requested_bytes = prev_req_bytes;
|
||||
};
|
||||
|
||||
const result_len = if (config.never_unmap and new_size == 0)
|
||||
0
|
||||
else
|
||||
try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align, ret_addr);
|
||||
|
||||
if (config.enable_memory_limit) {
|
||||
entry.value_ptr.requested_size = new_size;
|
||||
}
|
||||
|
||||
if (result_len == 0) {
|
||||
if (config.verbose_log) {
|
||||
@ -599,18 +618,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
const held = self.mutex.acquire();
|
||||
defer held.release();
|
||||
|
||||
const prev_req_bytes = self.total_requested_bytes;
|
||||
if (config.enable_memory_limit) {
|
||||
const new_req_bytes = prev_req_bytes + new_size - old_mem.len;
|
||||
if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
self.total_requested_bytes = new_req_bytes;
|
||||
}
|
||||
errdefer if (config.enable_memory_limit) {
|
||||
self.total_requested_bytes = prev_req_bytes;
|
||||
};
|
||||
|
||||
assert(old_mem.len != 0);
|
||||
|
||||
const aligned_size = math.max(old_mem.len, old_align);
|
||||
@ -651,19 +658,28 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
if (!is_used) {
|
||||
if (config.safety) {
|
||||
reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free));
|
||||
if (new_size == 0) {
|
||||
// Recoverable. Restore self.total_requested_bytes if needed, as we
|
||||
// don't return an error value so the errdefer above does not run.
|
||||
if (config.enable_memory_limit) {
|
||||
self.total_requested_bytes = prev_req_bytes;
|
||||
}
|
||||
// Recoverable if this is a free.
|
||||
if (new_size == 0)
|
||||
return @as(usize, 0);
|
||||
}
|
||||
@panic("Unrecoverable double free");
|
||||
} else {
|
||||
unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
// Definitely an in-use small alloc now.
|
||||
const prev_req_bytes = self.total_requested_bytes;
|
||||
if (config.enable_memory_limit) {
|
||||
const new_req_bytes = prev_req_bytes + new_size - old_mem.len;
|
||||
if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
self.total_requested_bytes = new_req_bytes;
|
||||
}
|
||||
errdefer if (config.enable_memory_limit) {
|
||||
self.total_requested_bytes = prev_req_bytes;
|
||||
};
|
||||
|
||||
if (new_size == 0) {
|
||||
// Capture stack trace to be the "first free", in case a double free happens.
|
||||
bucket.captureStackTrace(ret_addr, size_class, slot_index, .free);
|
||||
@ -745,21 +761,15 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
const held = self.mutex.acquire();
|
||||
defer held.release();
|
||||
|
||||
if (!self.isAllocationAllowed(len)) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
const new_aligned_size = math.max(len, ptr_align);
|
||||
if (new_aligned_size > largest_bucket_object_size) {
|
||||
try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
|
||||
|
||||
const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr);
|
||||
|
||||
// The backing allocator may return a memory block bigger than
|
||||
// `len`, use the effective size for bookkeeping purposes
|
||||
if (!self.isAllocationAllowed(slice.len)) {
|
||||
// Free the block so no memory is leaked
|
||||
const new_len = try self.backing_allocator.resizeFn(self.backing_allocator, slice, ptr_align, 0, 0, ret_addr);
|
||||
assert(new_len == 0);
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
|
||||
if (config.retain_metadata and !config.never_unmap) {
|
||||
// Backing allocator may be reusing memory that we're retaining metadata for
|
||||
@ -768,6 +778,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
|
||||
}
|
||||
gop.value_ptr.bytes = slice;
|
||||
if (config.enable_memory_limit)
|
||||
gop.value_ptr.requested_size = len;
|
||||
gop.value_ptr.captureStackTrace(ret_addr, .alloc);
|
||||
if (config.retain_metadata) {
|
||||
gop.value_ptr.freed = false;
|
||||
@ -782,10 +794,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
return slice;
|
||||
}
|
||||
|
||||
if (!self.isAllocationAllowed(len)) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
|
||||
const ptr = try self.allocSlot(new_size_class, ret_addr);
|
||||
if (config.verbose_log) {
|
||||
@ -1183,3 +1191,14 @@ test "double frees" {
|
||||
try std.testing.expect(gpa.large_allocations.contains(@ptrToInt(normal_large.ptr)));
|
||||
try std.testing.expect(!gpa.large_allocations.contains(@ptrToInt(large.ptr)));
|
||||
}
|
||||
|
||||
test "bug 9995 fix, large allocs count requested size not backing size" {
|
||||
// with AtLeast, buffer likely to be larger than requested, especially when shrinking
|
||||
var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
|
||||
var buf = try gpa.allocator.allocAdvanced(u8, 1, page_size + 1, .at_least);
|
||||
try std.testing.expect(gpa.total_requested_bytes == page_size + 1);
|
||||
buf = try gpa.allocator.reallocAtLeast(buf, 1);
|
||||
try std.testing.expect(gpa.total_requested_bytes == 1);
|
||||
buf = try gpa.allocator.reallocAtLeast(buf, 2);
|
||||
try std.testing.expect(gpa.total_requested_bytes == 2);
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user