gpa: Don't leak memory when the upper bound is hit

This commit is contained in:
LemonBoy 2020-08-31 12:35:25 +02:00
parent f20305d249
commit 29de809a92

View File

@ -561,6 +561,19 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
return error.OutOfMemory;
}
// Returns true if an allocation of `size` bytes is within the specified
// limits if enable_memory_limit is true
fn isAllocationAllowed(self: *Self, size: usize) bool {
if (config.enable_memory_limit) {
const new_req_bytes = self.total_requested_bytes + size;
if (new_req_bytes > self.requested_memory_limit)
return false;
self.total_requested_bytes = new_req_bytes;
}
return true;
}
fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
const self = @fieldParentPtr(Self, "allocator", allocator);
@ -568,39 +581,38 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
defer held.release();
const new_aligned_size = math.max(len, ptr_align);
const mem_slice = blk: {
if (new_aligned_size > largest_bucket_object_size) {
try self.large_allocations.ensureCapacity(
self.backing_allocator,
self.large_allocations.entries.items.len + 1,
);
if (new_aligned_size > largest_bucket_object_size) {
try self.large_allocations.ensureCapacity(
self.backing_allocator,
self.large_allocations.entries.items.len + 1,
);
const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr);
const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr);
const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
gop.entry.value.bytes = slice;
collectStackTrace(ret_addr, &gop.entry.value.stack_addresses);
break :blk slice;
} else {
const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
const ptr = try self.allocSlot(new_size_class, ret_addr);
break :blk ptr[0..len];
}
};
if (config.enable_memory_limit) {
// The backing allocator may return a memory block bigger than
// `len`, use the effective size for bookkeeping purposes
const new_req_bytes = self.total_requested_bytes + mem_slice.len;
if (new_req_bytes > self.requested_memory_limit) {
if (!self.isAllocationAllowed(slice.len)) {
// Free the block so no memory is leaked
const new_len = try self.backing_allocator.resizeFn(self.backing_allocator, slice, ptr_align, 0, 0, ret_addr);
assert(new_len == 0);
return error.OutOfMemory;
}
self.total_requested_bytes = new_req_bytes;
const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
gop.entry.value.bytes = slice;
collectStackTrace(ret_addr, &gop.entry.value.stack_addresses);
return slice;
}
return mem_slice;
if (!self.isAllocationAllowed(len)) {
return error.OutOfMemory;
}
const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
const ptr = try self.allocSlot(new_size_class, ret_addr);
return ptr[0..len];
}
fn createBucket(self: *Self, size_class: usize, bucket_index: usize) Error!*BucketHeader {