GPA: Catch invalid frees

* GPA: Catch invalid frees

Fix #14791: Catch cases where an invalid slice is passed to free().
This was silently ignored before but now logs an error. This change
uses a AutoHashMap to keep track of the sizes which seems to be an
overkill but seems like the easiest way to catch these errors.

* GPA: Add wrong alignment checks to free/resize

Implement @Inkryption's suggestion to catch free/resize with the wrong
alignment. I also changed the naming to match large allocations.
This commit is contained in:
Ganesan Rajagopal 2023-04-04 15:41:25 +05:30 committed by GitHub
parent 771d07268f
commit 49b56f88b9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -160,6 +160,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
backing_allocator: Allocator = std.heap.page_allocator,
buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count,
large_allocations: LargeAllocTable = .{},
small_allocations: if (config.safety) SmallAllocTable else void = if (config.safety) .{} else {},
empty_buckets: if (config.retain_metadata) ?*BucketHeader else void =
if (config.retain_metadata) null else {},
@ -194,6 +195,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const small_bucket_count = math.log2(page_size);
const largest_bucket_object_size = 1 << (small_bucket_count - 1);
const SmallAlloc = struct {
requested_size: usize,
log2_ptr_align: u8,
};
const LargeAlloc = struct {
bytes: []u8,
requested_size: if (config.enable_memory_limit) usize else void,
@ -227,6 +233,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
};
const LargeAllocTable = std.AutoHashMapUnmanaged(usize, LargeAlloc);
const SmallAllocTable = std.AutoHashMapUnmanaged(usize, SmallAlloc);
// Bucket: In memory, in order:
// * BucketHeader
@ -430,6 +437,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
self.freeRetainedMetadata();
}
self.large_allocations.deinit(self.backing_allocator);
if (config.safety) {
self.small_allocations.deinit(self.backing_allocator);
}
self.* = undefined;
return leaks;
}
@ -706,6 +716,34 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
// Definitely an in-use small alloc now.
if (config.safety) {
const entry = self.small_allocations.getEntry(@ptrToInt(old_mem.ptr)) orelse
@panic("Invalid free");
if (old_mem.len != entry.value_ptr.requested_size or log2_old_align != entry.value_ptr.log2_ptr_align) {
var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
var free_stack_trace = StackTrace{
.instruction_addresses = &addresses,
.index = 0,
};
std.debug.captureStackTrace(ret_addr, &free_stack_trace);
if (old_mem.len != entry.value_ptr.requested_size) {
log.err("Allocation size {d} bytes does not match resize size {d}. Allocation: {} Resize: {}", .{
entry.value_ptr.requested_size,
old_mem.len,
bucketStackTrace(bucket, size_class, slot_index, .alloc),
free_stack_trace,
});
}
if (log2_old_align != entry.value_ptr.log2_ptr_align) {
log.err("Allocation alignment {d} does not match resize alignment {d}. Allocation: {} Resize: {}", .{
@as(usize, 1) << @intCast(math.Log2Int(usize), entry.value_ptr.log2_ptr_align),
@as(usize, 1) << @intCast(math.Log2Int(usize), log2_old_align),
bucketStackTrace(bucket, size_class, slot_index, .alloc),
free_stack_trace,
});
}
}
}
const prev_req_bytes = self.total_requested_bytes;
if (config.enable_memory_limit) {
const new_req_bytes = prev_req_bytes + new_size - old_mem.len;
@ -726,6 +764,10 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
old_mem.len, old_mem.ptr, new_size,
});
}
if (config.safety) {
const entry = self.small_allocations.getEntry(@ptrToInt(old_mem.ptr)).?;
entry.value_ptr.requested_size = new_size;
}
return true;
}
@ -796,6 +838,35 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
// Definitely an in-use small alloc now.
if (config.safety) {
const entry = self.small_allocations.getEntry(@ptrToInt(old_mem.ptr)) orelse
@panic("Invalid free");
if (old_mem.len != entry.value_ptr.requested_size or log2_old_align != entry.value_ptr.log2_ptr_align) {
var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
var free_stack_trace = StackTrace{
.instruction_addresses = &addresses,
.index = 0,
};
std.debug.captureStackTrace(ret_addr, &free_stack_trace);
if (old_mem.len != entry.value_ptr.requested_size) {
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
entry.value_ptr.requested_size,
old_mem.len,
bucketStackTrace(bucket, size_class, slot_index, .alloc),
free_stack_trace,
});
}
if (log2_old_align != entry.value_ptr.log2_ptr_align) {
log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {} Free: {}", .{
@as(usize, 1) << @intCast(math.Log2Int(usize), entry.value_ptr.log2_ptr_align),
@as(usize, 1) << @intCast(math.Log2Int(usize), log2_old_align),
bucketStackTrace(bucket, size_class, slot_index, .alloc),
free_stack_trace,
});
}
}
}
if (config.enable_memory_limit) {
self.total_requested_bytes -= old_mem.len;
}
@ -840,6 +911,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
} else {
@memset(old_mem.ptr, undefined, old_mem.len);
}
if (config.safety) {
assert(self.small_allocations.remove(@ptrToInt(old_mem.ptr)));
}
if (config.verbose_log) {
log.info("small free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
}
@ -903,8 +977,16 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
return slice.ptr;
}
if (config.safety) {
try self.small_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
}
const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
const ptr = try self.allocSlot(new_size_class, ret_addr);
if (config.safety) {
const gop = self.small_allocations.getOrPutAssumeCapacity(@ptrToInt(ptr));
gop.value_ptr.requested_size = len;
gop.value_ptr.log2_ptr_align = log2_ptr_align;
}
if (config.verbose_log) {
log.info("small alloc {d} bytes at {*}", .{ len, ptr });
}