mirror of
https://github.com/ziglang/zig.git
synced 2026-01-03 20:13:21 +00:00
allocgate: split free out from resize
This commit is contained in:
parent
23866b1f81
commit
f68cda738a
141
lib/std/heap.zig
141
lib/std/heap.zig
@ -132,10 +132,6 @@ const CAllocator = struct {
|
||||
) Allocator.Error!usize {
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
if (new_len == 0) {
|
||||
alignedFree(buf.ptr);
|
||||
return 0;
|
||||
}
|
||||
if (new_len <= buf.len) {
|
||||
return mem.alignAllocLen(buf.len, new_len, len_align);
|
||||
}
|
||||
@ -147,6 +143,17 @@ const CAllocator = struct {
|
||||
}
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
fn free(
|
||||
_: *c_void,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
return_address: usize,
|
||||
) void {
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
alignedFree(buf.ptr);
|
||||
}
|
||||
};
|
||||
|
||||
/// Supports the full Allocator interface, including alignment, and exploiting
|
||||
@ -159,6 +166,7 @@ pub const c_allocator = Allocator{
|
||||
const c_allocator_vtable = Allocator.VTable{
|
||||
.alloc = CAllocator.alloc,
|
||||
.resize = CAllocator.resize,
|
||||
.free = CAllocator.free,
|
||||
};
|
||||
|
||||
/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls
|
||||
@ -173,6 +181,7 @@ pub const raw_c_allocator = Allocator{
|
||||
const raw_c_allocator_vtable = Allocator.VTable{
|
||||
.alloc = rawCAlloc,
|
||||
.resize = rawCResize,
|
||||
.free = rawCFree,
|
||||
};
|
||||
|
||||
fn rawCAlloc(
|
||||
@ -199,16 +208,23 @@ fn rawCResize(
|
||||
) Allocator.Error!usize {
|
||||
_ = old_align;
|
||||
_ = ret_addr;
|
||||
if (new_len == 0) {
|
||||
c.free(buf.ptr);
|
||||
return 0;
|
||||
}
|
||||
if (new_len <= buf.len) {
|
||||
return mem.alignAllocLen(buf.len, new_len, len_align);
|
||||
}
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
fn rawCFree(
|
||||
_: *c_void,
|
||||
buf: []u8,
|
||||
old_align: u29,
|
||||
ret_addr: usize,
|
||||
) void {
|
||||
_ = old_align;
|
||||
_ = ret_addr;
|
||||
c.free(buf.ptr);
|
||||
}
|
||||
|
||||
/// This allocator makes a syscall directly for every allocation and free.
|
||||
/// Thread-safe and lock-free.
|
||||
pub const page_allocator = if (builtin.target.isWasm())
|
||||
@ -238,6 +254,7 @@ const PageAllocator = struct {
|
||||
const vtable = Allocator.VTable{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.free = free,
|
||||
};
|
||||
|
||||
fn alloc(_: *c_void, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
|
||||
@ -351,16 +368,6 @@ const PageAllocator = struct {
|
||||
|
||||
if (builtin.os.tag == .windows) {
|
||||
const w = os.windows;
|
||||
if (new_size == 0) {
|
||||
// From the docs:
|
||||
// "If the dwFreeType parameter is MEM_RELEASE, this parameter
|
||||
// must be 0 (zero). The function frees the entire region that
|
||||
// is reserved in the initial allocation call to VirtualAlloc."
|
||||
// So we can only use MEM_RELEASE when actually releasing the
|
||||
// whole allocation.
|
||||
w.VirtualFree(buf_unaligned.ptr, 0, w.MEM_RELEASE);
|
||||
return 0;
|
||||
}
|
||||
if (new_size <= buf_unaligned.len) {
|
||||
const base_addr = @ptrToInt(buf_unaligned.ptr);
|
||||
const old_addr_end = base_addr + buf_unaligned.len;
|
||||
@ -391,8 +398,6 @@ const PageAllocator = struct {
|
||||
const ptr = @alignCast(mem.page_size, buf_unaligned.ptr + new_size_aligned);
|
||||
// TODO: if the next_mmap_addr_hint is within the unmapped range, update it
|
||||
os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]);
|
||||
if (new_size_aligned == 0)
|
||||
return 0;
|
||||
return alignPageAllocLen(new_size_aligned, new_size, len_align);
|
||||
}
|
||||
|
||||
@ -400,6 +405,19 @@ const PageAllocator = struct {
|
||||
// TODO: if the next_mmap_addr_hint is within the remapped range, update it
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
fn free(_: *c_void, buf_unaligned: []u8, buf_align: u29, return_address: usize) void {
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
|
||||
if (builtin.os.tag == .windows) {
|
||||
os.windows.VirtualFree(buf_unaligned.ptr, 0, os.windows.MEM_RELEASE);
|
||||
} else {
|
||||
const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size);
|
||||
const ptr = @alignCast(mem.page_size, buf_unaligned.ptr);
|
||||
os.munmap(ptr[0..buf_aligned_len]);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const WasmPageAllocator = struct {
|
||||
@ -412,6 +430,7 @@ const WasmPageAllocator = struct {
|
||||
const vtable = Allocator.VTable{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.free = free,
|
||||
};
|
||||
|
||||
const PageStatus = enum(u1) {
|
||||
@ -571,7 +590,21 @@ const WasmPageAllocator = struct {
|
||||
const base = nPages(@ptrToInt(buf.ptr));
|
||||
freePages(base + new_n, base + current_n);
|
||||
}
|
||||
return if (new_len == 0) 0 else alignPageAllocLen(new_n * mem.page_size, new_len, len_align);
|
||||
return alignPageAllocLen(new_n * mem.page_size, new_len, len_align);
|
||||
}
|
||||
|
||||
fn free(
|
||||
_: *c_void,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
return_address: usize,
|
||||
) void {
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
const aligned_len = mem.alignForward(buf.len, mem.page_size);
|
||||
const current_n = nPages(aligned_len);
|
||||
const base = nPages(@ptrToInt(buf.ptr));
|
||||
freePages(base, base + current_n);
|
||||
}
|
||||
};
|
||||
|
||||
@ -588,7 +621,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
}
|
||||
|
||||
pub fn allocator(self: *HeapAllocator) Allocator {
|
||||
return Allocator.init(self, alloc, resize);
|
||||
return Allocator.init(self, alloc, resize, free);
|
||||
}
|
||||
|
||||
pub fn deinit(self: *HeapAllocator) void {
|
||||
@ -644,10 +677,6 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
) error{OutOfMemory}!usize {
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
if (new_size == 0) {
|
||||
os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*));
|
||||
return 0;
|
||||
}
|
||||
|
||||
const root_addr = getRecordPtr(buf).*;
|
||||
const align_offset = @ptrToInt(buf.ptr) - root_addr;
|
||||
@ -669,6 +698,17 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
getRecordPtr(buf.ptr[0..return_len]).* = root_addr;
|
||||
return return_len;
|
||||
}
|
||||
|
||||
fn free(
|
||||
self: *HeapAllocator,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
return_address: usize,
|
||||
) void {
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*));
|
||||
}
|
||||
},
|
||||
else => @compileError("Unsupported OS"),
|
||||
};
|
||||
@ -696,13 +736,18 @@ pub const FixedBufferAllocator = struct {
|
||||
|
||||
/// *WARNING* using this at the same time as the interface returned by `threadSafeAllocator` is not thread safe
|
||||
pub fn allocator(self: *FixedBufferAllocator) Allocator {
|
||||
return Allocator.init(self, alloc, resize);
|
||||
return Allocator.init(self, alloc, resize, free);
|
||||
}
|
||||
|
||||
/// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator`
|
||||
/// *WARNING* using this at the same time as the interface returned by `getAllocator` is not thread safe
|
||||
pub fn threadSafeAllocator(self: *FixedBufferAllocator) Allocator {
|
||||
return Allocator.init(self, threadSafeAlloc, Allocator.NoResize(FixedBufferAllocator).noResize);
|
||||
return Allocator.init(
|
||||
self,
|
||||
threadSafeAlloc,
|
||||
Allocator.NoResize(FixedBufferAllocator).noResize,
|
||||
Allocator.NoOpFree(FixedBufferAllocator).noOpFree,
|
||||
);
|
||||
}
|
||||
|
||||
pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool {
|
||||
@ -715,7 +760,7 @@ pub const FixedBufferAllocator = struct {
|
||||
|
||||
/// NOTE: this will not work in all cases, if the last allocation had an adjusted_index
|
||||
/// then we won't be able to determine what the last allocation was. This is because
|
||||
/// the alignForward operation done in alloc is not reverisible.
|
||||
/// the alignForward operation done in alloc is not reversible.
|
||||
pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool {
|
||||
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
|
||||
}
|
||||
@ -751,13 +796,13 @@ pub const FixedBufferAllocator = struct {
|
||||
if (!self.isLastAllocation(buf)) {
|
||||
if (new_size > buf.len)
|
||||
return error.OutOfMemory;
|
||||
return if (new_size == 0) 0 else mem.alignAllocLen(buf.len, new_size, len_align);
|
||||
return mem.alignAllocLen(buf.len, new_size, len_align);
|
||||
}
|
||||
|
||||
if (new_size <= buf.len) {
|
||||
const sub = buf.len - new_size;
|
||||
self.end_index -= sub;
|
||||
return if (new_size == 0) 0 else mem.alignAllocLen(buf.len - sub, new_size, len_align);
|
||||
return mem.alignAllocLen(buf.len - sub, new_size, len_align);
|
||||
}
|
||||
|
||||
const add = new_size - buf.len;
|
||||
@ -768,6 +813,21 @@ pub const FixedBufferAllocator = struct {
|
||||
return new_size;
|
||||
}
|
||||
|
||||
fn free(
|
||||
self: *FixedBufferAllocator,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
return_address: usize,
|
||||
) void {
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
assert(self.ownsSlice(buf)); // sanity check
|
||||
|
||||
if (self.isLastAllocation(buf)) {
|
||||
self.end_index -= buf.len;
|
||||
}
|
||||
}
|
||||
|
||||
fn threadSafeAlloc(self: *FixedBufferAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
|
||||
_ = len_align;
|
||||
_ = ra;
|
||||
@ -810,7 +870,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
|
||||
/// WARNING: This functions both fetches a `std.mem.Allocator` interface to this allocator *and* resets the internal buffer allocator
|
||||
pub fn get(self: *Self) Allocator {
|
||||
self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
|
||||
return Allocator.init(self, alloc, resize);
|
||||
return Allocator.init(self, alloc, resize, free);
|
||||
}
|
||||
|
||||
fn alloc(
|
||||
@ -821,7 +881,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align, len_align, return_address) catch
|
||||
return self.fallback_allocator.vtable.alloc(self.fallback_allocator.ptr, len, ptr_align, len_align, return_address);
|
||||
return self.fallback_allocator.rawAlloc(len, ptr_align, len_align, return_address);
|
||||
}
|
||||
|
||||
fn resize(
|
||||
@ -835,7 +895,20 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
|
||||
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
|
||||
return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, buf_align, new_len, len_align, return_address);
|
||||
} else {
|
||||
return self.fallback_allocator.vtable.resize(self.fallback_allocator.ptr, buf, buf_align, new_len, len_align, return_address);
|
||||
return self.fallback_allocator.rawResize(buf, buf_align, new_len, len_align, return_address);
|
||||
}
|
||||
}
|
||||
|
||||
fn free(
|
||||
self: *Self,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
return_address: usize,
|
||||
) void {
|
||||
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
|
||||
return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, buf_align, return_address);
|
||||
} else {
|
||||
return self.fallback_allocator.rawFree(buf, buf_align, return_address);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -24,7 +24,7 @@ pub const ArenaAllocator = struct {
|
||||
};
|
||||
|
||||
pub fn allocator(self: *ArenaAllocator) Allocator {
|
||||
return Allocator.init(self, alloc, resize);
|
||||
return Allocator.init(self, alloc, resize, free);
|
||||
}
|
||||
|
||||
const BufNode = std.SinglyLinkedList([]u8).Node;
|
||||
@ -47,7 +47,7 @@ pub const ArenaAllocator = struct {
|
||||
const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
|
||||
const big_enough_len = prev_len + actual_min_size;
|
||||
const len = big_enough_len + big_enough_len / 2;
|
||||
const buf = try self.child_allocator.vtable.alloc(self.child_allocator.ptr, len, @alignOf(BufNode), 1, @returnAddress());
|
||||
const buf = try self.child_allocator.rawAlloc(len, @alignOf(BufNode), 1, @returnAddress());
|
||||
const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr));
|
||||
buf_node.* = BufNode{
|
||||
.data = buf,
|
||||
@ -111,4 +111,16 @@ pub const ArenaAllocator = struct {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
}
|
||||
|
||||
fn free(self: *ArenaAllocator, buf: []u8, buf_align: u29, ret_addr: usize) void {
|
||||
_ = buf_align;
|
||||
_ = ret_addr;
|
||||
|
||||
const cur_node = self.state.buffer_list.first orelse return;
|
||||
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
|
||||
|
||||
if (@ptrToInt(cur_buf.ptr) + self.state.end_index == @ptrToInt(buf.ptr) + buf.len) {
|
||||
self.state.end_index -= buf.len;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -281,7 +281,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
};
|
||||
|
||||
pub fn allocator(self: *Self) Allocator {
|
||||
return Allocator.init(self, alloc, resize);
|
||||
return Allocator.init(self, alloc, resize, free);
|
||||
}
|
||||
|
||||
fn bucketStackTrace(
|
||||
@ -388,7 +388,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
var it = self.large_allocations.iterator();
|
||||
while (it.next()) |large| {
|
||||
if (large.value_ptr.freed) {
|
||||
_ = self.backing_allocator.vtable.resize(self.backing_allocator.ptr, large.value_ptr.bytes, large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable;
|
||||
self.backing_allocator.rawFree(large.value_ptr.bytes, large.value_ptr.ptr_align, @returnAddress());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -529,9 +529,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
if (config.retain_metadata and entry.value_ptr.freed) {
|
||||
if (config.safety) {
|
||||
reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
|
||||
// Recoverable if this is a free.
|
||||
if (new_size == 0)
|
||||
return @as(usize, 0);
|
||||
@panic("Unrecoverable double free");
|
||||
} else {
|
||||
unreachable;
|
||||
@ -555,7 +552,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
|
||||
// Do memory limit accounting with requested sizes rather than what backing_allocator returns
|
||||
// because if we want to return error.OutOfMemory, we have to leave allocation untouched, and
|
||||
// that is impossible to guarantee after calling backing_allocator.vtable.resize.
|
||||
// that is impossible to guarantee after calling backing_allocator.rawResize.
|
||||
const prev_req_bytes = self.total_requested_bytes;
|
||||
if (config.enable_memory_limit) {
|
||||
const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size;
|
||||
@ -568,29 +565,12 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
self.total_requested_bytes = prev_req_bytes;
|
||||
};
|
||||
|
||||
const result_len = if (config.never_unmap and new_size == 0)
|
||||
0
|
||||
else
|
||||
try self.backing_allocator.vtable.resize(self.backing_allocator.ptr, old_mem, old_align, new_size, len_align, ret_addr);
|
||||
const result_len = try self.backing_allocator.rawResize(old_mem, old_align, new_size, len_align, ret_addr);
|
||||
|
||||
if (config.enable_memory_limit) {
|
||||
entry.value_ptr.requested_size = new_size;
|
||||
}
|
||||
|
||||
if (result_len == 0) {
|
||||
if (config.verbose_log) {
|
||||
log.info("large free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
|
||||
}
|
||||
|
||||
if (!config.retain_metadata) {
|
||||
assert(self.large_allocations.remove(@ptrToInt(old_mem.ptr)));
|
||||
} else {
|
||||
entry.value_ptr.freed = true;
|
||||
entry.value_ptr.captureStackTrace(ret_addr, .free);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (config.verbose_log) {
|
||||
log.info("large resize {d} bytes at {*} to {d}", .{
|
||||
old_mem.len, old_mem.ptr, new_size,
|
||||
@ -601,6 +581,64 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
return result_len;
|
||||
}
|
||||
|
||||
/// This function assumes the object is in the large object storage regardless
|
||||
/// of the parameters.
|
||||
fn freeLarge(
|
||||
self: *Self,
|
||||
old_mem: []u8,
|
||||
old_align: u29,
|
||||
ret_addr: usize,
|
||||
) void {
|
||||
_ = old_align;
|
||||
|
||||
const entry = self.large_allocations.getEntry(@ptrToInt(old_mem.ptr)) orelse {
|
||||
if (config.safety) {
|
||||
@panic("Invalid free");
|
||||
} else {
|
||||
unreachable;
|
||||
}
|
||||
};
|
||||
|
||||
if (config.retain_metadata and entry.value_ptr.freed) {
|
||||
if (config.safety) {
|
||||
reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
|
||||
return;
|
||||
} else {
|
||||
unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
|
||||
var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
|
||||
var free_stack_trace = StackTrace{
|
||||
.instruction_addresses = &addresses,
|
||||
.index = 0,
|
||||
};
|
||||
std.debug.captureStackTrace(ret_addr, &free_stack_trace);
|
||||
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {s} Free: {s}", .{
|
||||
entry.value_ptr.bytes.len,
|
||||
old_mem.len,
|
||||
entry.value_ptr.getStackTrace(.alloc),
|
||||
free_stack_trace,
|
||||
});
|
||||
}
|
||||
|
||||
if (config.enable_memory_limit) {
|
||||
self.total_requested_bytes -= entry.value_ptr.requested_size;
|
||||
}
|
||||
|
||||
if (config.verbose_log) {
|
||||
log.info("large free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
|
||||
}
|
||||
|
||||
if (!config.retain_metadata) {
|
||||
assert(self.large_allocations.remove(@ptrToInt(old_mem.ptr)));
|
||||
} else {
|
||||
entry.value_ptr.freed = true;
|
||||
entry.value_ptr.captureStackTrace(ret_addr, .free);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn setRequestedMemoryLimit(self: *Self, limit: usize) void {
|
||||
self.requested_memory_limit = limit;
|
||||
}
|
||||
@ -656,9 +694,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
if (!is_used) {
|
||||
if (config.safety) {
|
||||
reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free));
|
||||
// Recoverable if this is a free.
|
||||
if (new_size == 0)
|
||||
return @as(usize, 0);
|
||||
@panic("Unrecoverable double free");
|
||||
} else {
|
||||
unreachable;
|
||||
@ -678,52 +713,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
self.total_requested_bytes = prev_req_bytes;
|
||||
};
|
||||
|
||||
if (new_size == 0) {
|
||||
// Capture stack trace to be the "first free", in case a double free happens.
|
||||
bucket.captureStackTrace(ret_addr, size_class, slot_index, .free);
|
||||
|
||||
used_byte.* &= ~(@as(u8, 1) << used_bit_index);
|
||||
bucket.used_count -= 1;
|
||||
if (bucket.used_count == 0) {
|
||||
if (bucket.next == bucket) {
|
||||
// it's the only bucket and therefore the current one
|
||||
self.buckets[bucket_index] = null;
|
||||
} else {
|
||||
bucket.next.prev = bucket.prev;
|
||||
bucket.prev.next = bucket.next;
|
||||
self.buckets[bucket_index] = bucket.prev;
|
||||
}
|
||||
if (!config.never_unmap) {
|
||||
self.backing_allocator.free(bucket.page[0..page_size]);
|
||||
}
|
||||
if (!config.retain_metadata) {
|
||||
self.freeBucket(bucket, size_class);
|
||||
} else {
|
||||
// move alloc_cursor to end so we can tell size_class later
|
||||
const slot_count = @divExact(page_size, size_class);
|
||||
bucket.alloc_cursor = @truncate(SlotIndex, slot_count);
|
||||
if (self.empty_buckets) |prev_bucket| {
|
||||
// empty_buckets is ordered newest to oldest through prev so that if
|
||||
// config.never_unmap is false and backing_allocator reuses freed memory
|
||||
// then searchBuckets will always return the newer, relevant bucket
|
||||
bucket.prev = prev_bucket;
|
||||
bucket.next = prev_bucket.next;
|
||||
prev_bucket.next = bucket;
|
||||
bucket.next.prev = bucket;
|
||||
} else {
|
||||
bucket.prev = bucket;
|
||||
bucket.next = bucket;
|
||||
}
|
||||
self.empty_buckets = bucket;
|
||||
}
|
||||
} else {
|
||||
@memset(old_mem.ptr, undefined, old_mem.len);
|
||||
}
|
||||
if (config.verbose_log) {
|
||||
log.info("small free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
|
||||
}
|
||||
return @as(usize, 0);
|
||||
}
|
||||
const new_aligned_size = math.max(new_size, old_align);
|
||||
const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
|
||||
if (new_size_class <= size_class) {
|
||||
@ -740,6 +729,114 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
fn free(
|
||||
self: *Self,
|
||||
old_mem: []u8,
|
||||
old_align: u29,
|
||||
ret_addr: usize,
|
||||
) void {
|
||||
const held = self.mutex.acquire();
|
||||
defer held.release();
|
||||
|
||||
assert(old_mem.len != 0);
|
||||
|
||||
const aligned_size = math.max(old_mem.len, old_align);
|
||||
if (aligned_size > largest_bucket_object_size) {
|
||||
self.freeLarge(old_mem, old_align, ret_addr);
|
||||
return;
|
||||
}
|
||||
const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size);
|
||||
|
||||
var bucket_index = math.log2(size_class_hint);
|
||||
var size_class: usize = size_class_hint;
|
||||
const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) {
|
||||
if (searchBucket(self.buckets[bucket_index], @ptrToInt(old_mem.ptr))) |bucket| {
|
||||
// move bucket to head of list to optimize search for nearby allocations
|
||||
self.buckets[bucket_index] = bucket;
|
||||
break bucket;
|
||||
}
|
||||
size_class *= 2;
|
||||
} else blk: {
|
||||
if (config.retain_metadata) {
|
||||
if (!self.large_allocations.contains(@ptrToInt(old_mem.ptr))) {
|
||||
// object not in active buckets or a large allocation, so search empty buckets
|
||||
if (searchBucket(self.empty_buckets, @ptrToInt(old_mem.ptr))) |bucket| {
|
||||
// bucket is empty so is_used below will always be false and we exit there
|
||||
break :blk bucket;
|
||||
} else {
|
||||
@panic("Invalid free");
|
||||
}
|
||||
}
|
||||
}
|
||||
self.freeLarge(old_mem, old_align, ret_addr);
|
||||
return;
|
||||
};
|
||||
const byte_offset = @ptrToInt(old_mem.ptr) - @ptrToInt(bucket.page);
|
||||
const slot_index = @intCast(SlotIndex, byte_offset / size_class);
|
||||
const used_byte_index = slot_index / 8;
|
||||
const used_bit_index = @intCast(u3, slot_index % 8);
|
||||
const used_byte = bucket.usedBits(used_byte_index);
|
||||
const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0;
|
||||
if (!is_used) {
|
||||
if (config.safety) {
|
||||
reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free));
|
||||
// Recoverable if this is a free.
|
||||
return;
|
||||
} else {
|
||||
unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
// Definitely an in-use small alloc now.
|
||||
if (config.enable_memory_limit) {
|
||||
self.total_requested_bytes -= old_mem.len;
|
||||
}
|
||||
|
||||
// Capture stack trace to be the "first free", in case a double free happens.
|
||||
bucket.captureStackTrace(ret_addr, size_class, slot_index, .free);
|
||||
|
||||
used_byte.* &= ~(@as(u8, 1) << used_bit_index);
|
||||
bucket.used_count -= 1;
|
||||
if (bucket.used_count == 0) {
|
||||
if (bucket.next == bucket) {
|
||||
// it's the only bucket and therefore the current one
|
||||
self.buckets[bucket_index] = null;
|
||||
} else {
|
||||
bucket.next.prev = bucket.prev;
|
||||
bucket.prev.next = bucket.next;
|
||||
self.buckets[bucket_index] = bucket.prev;
|
||||
}
|
||||
if (!config.never_unmap) {
|
||||
self.backing_allocator.free(bucket.page[0..page_size]);
|
||||
}
|
||||
if (!config.retain_metadata) {
|
||||
self.freeBucket(bucket, size_class);
|
||||
} else {
|
||||
// move alloc_cursor to end so we can tell size_class later
|
||||
const slot_count = @divExact(page_size, size_class);
|
||||
bucket.alloc_cursor = @truncate(SlotIndex, slot_count);
|
||||
if (self.empty_buckets) |prev_bucket| {
|
||||
// empty_buckets is ordered newest to oldest through prev so that if
|
||||
// config.never_unmap is false and backing_allocator reuses freed memory
|
||||
// then searchBuckets will always return the newer, relevant bucket
|
||||
bucket.prev = prev_bucket;
|
||||
bucket.next = prev_bucket.next;
|
||||
prev_bucket.next = bucket;
|
||||
bucket.next.prev = bucket;
|
||||
} else {
|
||||
bucket.prev = bucket;
|
||||
bucket.next = bucket;
|
||||
}
|
||||
self.empty_buckets = bucket;
|
||||
}
|
||||
} else {
|
||||
@memset(old_mem.ptr, undefined, old_mem.len);
|
||||
}
|
||||
if (config.verbose_log) {
|
||||
log.info("small free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if an allocation of `size` bytes is within the specified
|
||||
// limits if enable_memory_limit is true
|
||||
fn isAllocationAllowed(self: *Self, size: usize) bool {
|
||||
@ -764,7 +861,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
const new_aligned_size = math.max(len, ptr_align);
|
||||
if (new_aligned_size > largest_bucket_object_size) {
|
||||
try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
|
||||
const slice = try self.backing_allocator.vtable.alloc(self.backing_allocator.ptr, len, ptr_align, len_align, ret_addr);
|
||||
const slice = try self.backing_allocator.rawAlloc(len, ptr_align, len_align, ret_addr);
|
||||
|
||||
const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
|
||||
if (config.retain_metadata and !config.never_unmap) {
|
||||
|
||||
@ -18,7 +18,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
|
||||
}
|
||||
|
||||
pub fn allocator(self: *Self) Allocator {
|
||||
return Allocator.init(self, alloc, resize);
|
||||
return Allocator.init(self, alloc, resize, free);
|
||||
}
|
||||
|
||||
fn alloc(
|
||||
@ -29,7 +29,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
|
||||
ra: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
self.writer.print("alloc : {}", .{len}) catch {};
|
||||
const result = self.parent_allocator.vtable.alloc(self.parent_allocator.ptr, len, ptr_align, len_align, ra);
|
||||
const result = self.parent_allocator.rawAlloc(len, ptr_align, len_align, ra);
|
||||
if (result) |_| {
|
||||
self.writer.print(" success!\n", .{}) catch {};
|
||||
} else |_| {
|
||||
@ -46,14 +46,12 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
if (new_len == 0) {
|
||||
self.writer.print("free : {}\n", .{buf.len}) catch {};
|
||||
} else if (new_len <= buf.len) {
|
||||
if (new_len <= buf.len) {
|
||||
self.writer.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {};
|
||||
} else {
|
||||
self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
|
||||
}
|
||||
if (self.parent_allocator.vtable.resize(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| {
|
||||
if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ra)) |resized_len| {
|
||||
if (new_len > buf.len) {
|
||||
self.writer.print(" success!\n", .{}) catch {};
|
||||
}
|
||||
@ -64,6 +62,16 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
|
||||
return e;
|
||||
}
|
||||
}
|
||||
|
||||
fn free(
|
||||
self: *Self,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
ra: usize,
|
||||
) void {
|
||||
self.writer.print("free : {}\n", .{buf.len}) catch {};
|
||||
self.parent_allocator.rawFree(buf, buf_align, ra);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@ -33,7 +33,7 @@ pub fn ScopedLoggingAllocator(
|
||||
}
|
||||
|
||||
pub fn allocator(self: *Self) Allocator {
|
||||
return Allocator.init(self, alloc, resize);
|
||||
return Allocator.init(self, alloc, resize, free);
|
||||
}
|
||||
|
||||
// This function is required as the `std.log.log` function is not public
|
||||
@ -53,7 +53,7 @@ pub fn ScopedLoggingAllocator(
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
const result = self.parent_allocator.vtable.alloc(self.parent_allocator.ptr, len, ptr_align, len_align, ra);
|
||||
const result = self.parent_allocator.rawAlloc(len, ptr_align, len_align, ra);
|
||||
if (result) |_| {
|
||||
logHelper(
|
||||
success_log_level,
|
||||
@ -78,10 +78,8 @@ pub fn ScopedLoggingAllocator(
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
if (self.parent_allocator.vtable.resize(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| {
|
||||
if (new_len == 0) {
|
||||
logHelper(success_log_level, "free - success - len: {}", .{buf.len});
|
||||
} else if (new_len <= buf.len) {
|
||||
if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ra)) |resized_len| {
|
||||
if (new_len <= buf.len) {
|
||||
logHelper(
|
||||
success_log_level,
|
||||
"shrink - success - {} to {}, len_align: {}, buf_align: {}",
|
||||
@ -106,6 +104,16 @@ pub fn ScopedLoggingAllocator(
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
fn free(
|
||||
self: *Self,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
ra: usize,
|
||||
) void {
|
||||
self.parent_allocator.rawFree(buf, buf_align, ra);
|
||||
logHelper(success_log_level, "free - len: {}", .{buf.len});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@ -47,7 +47,7 @@ pub fn ValidationAllocator(comptime T: type) type {
|
||||
}
|
||||
|
||||
pub fn allocator(self: *Self) Allocator {
|
||||
return Allocator.init(self, alloc, resize);
|
||||
return Allocator.init(self, alloc, resize, free);
|
||||
}
|
||||
|
||||
fn getUnderlyingAllocatorPtr(self: *Self) Allocator {
|
||||
@ -70,7 +70,7 @@ pub fn ValidationAllocator(comptime T: type) type {
|
||||
}
|
||||
|
||||
const underlying = self.getUnderlyingAllocatorPtr();
|
||||
const result = try underlying.vtable.alloc(underlying.ptr, n, ptr_align, len_align, ret_addr);
|
||||
const result = try underlying.rawAlloc(n, ptr_align, len_align, ret_addr);
|
||||
assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align));
|
||||
if (len_align == 0) {
|
||||
assert(result.len == n);
|
||||
@ -95,7 +95,7 @@ pub fn ValidationAllocator(comptime T: type) type {
|
||||
assert(new_len >= len_align);
|
||||
}
|
||||
const underlying = self.getUnderlyingAllocatorPtr();
|
||||
const result = try underlying.vtable.resize(underlying.ptr, buf, buf_align, new_len, len_align, ret_addr);
|
||||
const result = try underlying.rawResize(buf, buf_align, new_len, len_align, ret_addr);
|
||||
if (len_align == 0) {
|
||||
assert(result == new_len);
|
||||
} else {
|
||||
@ -104,6 +104,19 @@ pub fn ValidationAllocator(comptime T: type) type {
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn free(
|
||||
self: *Self,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
ret_addr: usize,
|
||||
) void {
|
||||
_ = self;
|
||||
_ = buf_align;
|
||||
_ = ret_addr;
|
||||
assert(buf.len > 0);
|
||||
}
|
||||
|
||||
pub usingnamespace if (T == Allocator or !@hasDecl(T, "reset")) struct {} else struct {
|
||||
pub fn reset(self: *Self) void {
|
||||
self.underlying_allocator.reset();
|
||||
@ -139,6 +152,7 @@ const fail_allocator = Allocator{
|
||||
const failAllocator_vtable = Allocator.VTable{
|
||||
.alloc = failAllocatorAlloc,
|
||||
.resize = Allocator.NoResize(c_void).noResize,
|
||||
.free = Allocator.NoOpFree(c_void).noOpFree,
|
||||
};
|
||||
|
||||
fn failAllocatorAlloc(_: *c_void, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
|
||||
|
||||
@ -5,6 +5,7 @@ const assert = std.debug.assert;
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
const Allocator = @This();
|
||||
const builtin = @import("builtin");
|
||||
|
||||
pub const Error = error{OutOfMemory};
|
||||
|
||||
@ -28,9 +29,6 @@ pub const VTable = struct {
|
||||
/// length returned by `alloc` or `resize`. `buf_align` must equal the same value
|
||||
/// that was passed as the `ptr_align` parameter to the original `alloc` call.
|
||||
///
|
||||
/// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no
|
||||
/// longer be passed to `resize`.
|
||||
///
|
||||
/// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`.
|
||||
/// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be
|
||||
/// unmodified and error.OutOfMemory MUST be returned.
|
||||
@ -40,36 +38,54 @@ pub const VTable = struct {
|
||||
/// provide a way to modify the alignment of a pointer. Rather it provides an API for
|
||||
/// accepting more bytes of memory from the allocator than requested.
|
||||
///
|
||||
/// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
|
||||
/// `new_len` must be greater than zero, greater than or equal to `len_align` and must be aligned by `len_align`.
|
||||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
|
||||
/// If the value is `0` it means no return address has been provided.
|
||||
resize: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
|
||||
|
||||
/// Free and invalidate a buffer. `buf.len` must equal the most recent length returned by `alloc` or `resize`.
|
||||
/// `buf_align` must equal the same value that was passed as the `ptr_align` parameter to the original `alloc` call.
|
||||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
|
||||
/// If the value is `0` it means no return address has been provided.
|
||||
free: fn (ptr: *c_void, buf: []u8, buf_align: u29, ret_addr: usize) void,
|
||||
};
|
||||
|
||||
pub fn init(
|
||||
pointer: anytype,
|
||||
comptime allocFn: fn (ptr: @TypeOf(pointer), len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
|
||||
comptime resizeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
|
||||
comptime freeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, ret_addr: usize) void,
|
||||
) Allocator {
|
||||
const Ptr = @TypeOf(pointer);
|
||||
assert(@typeInfo(Ptr) == .Pointer); // Must be a pointer
|
||||
assert(@typeInfo(Ptr).Pointer.size == .One); // Must be a single-item pointer
|
||||
const ptr_info = @typeInfo(Ptr);
|
||||
|
||||
assert(ptr_info == .Pointer); // Must be a pointer
|
||||
assert(ptr_info.Pointer.size == .One); // Must be a single-item pointer
|
||||
|
||||
const alignment = ptr_info.Pointer.alignment;
|
||||
|
||||
const gen = struct {
|
||||
fn alloc(ptr: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
|
||||
const alignment = @typeInfo(Ptr).Pointer.alignment;
|
||||
const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
|
||||
return allocFn(self, len, ptr_align, len_align, ret_addr);
|
||||
return @call(.{ .modifier = .always_inline }, allocFn, .{ self, len, ptr_align, len_align, ret_addr });
|
||||
}
|
||||
fn resize(ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize {
|
||||
const alignment = @typeInfo(Ptr).Pointer.alignment;
|
||||
assert(new_len != 0);
|
||||
const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
|
||||
return resizeFn(self, buf, buf_align, new_len, len_align, ret_addr);
|
||||
return @call(.{ .modifier = .always_inline }, resizeFn, .{ self, buf, buf_align, new_len, len_align, ret_addr });
|
||||
}
|
||||
fn free(ptr: *c_void, buf: []u8, buf_align: u29, ret_addr: usize) void {
|
||||
const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
|
||||
@call(.{ .modifier = .always_inline }, freeFn, .{ self, buf, buf_align, ret_addr });
|
||||
}
|
||||
};
|
||||
|
||||
const vtable = VTable{
|
||||
.alloc = gen.alloc,
|
||||
.resize = gen.resize,
|
||||
.free = gen.free,
|
||||
};
|
||||
|
||||
return .{
|
||||
@ -100,6 +116,56 @@ pub fn NoResize(comptime AllocatorType: type) type {
|
||||
};
|
||||
}
|
||||
|
||||
/// Set freeFn to `NoOpFree(AllocatorType).noOpFree` if free is a no-op.
|
||||
pub fn NoOpFree(comptime AllocatorType: type) type {
|
||||
return struct {
|
||||
pub fn noOpFree(
|
||||
self: *AllocatorType,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
ret_addr: usize,
|
||||
) void {
|
||||
_ = self;
|
||||
_ = buf;
|
||||
_ = buf_align;
|
||||
_ = ret_addr;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Set freeFn to `PanicFree(AllocatorType).noOpFree` if free is not a supported operation.
|
||||
pub fn PanicFree(comptime AllocatorType: type) type {
|
||||
return struct {
|
||||
pub fn noOpFree(
|
||||
self: *AllocatorType,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
ret_addr: usize,
|
||||
) void {
|
||||
_ = self;
|
||||
_ = buf;
|
||||
_ = buf_align;
|
||||
_ = ret_addr;
|
||||
@panic("free is not a supported operation for the allocator: " ++ @typeName(AllocatorType));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// This function is not intended to be called except from within the implementation of an Allocator
|
||||
pub inline fn rawAlloc(self: Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
|
||||
return self.vtable.alloc(self.ptr, len, ptr_align, len_align, ret_addr);
|
||||
}
|
||||
|
||||
/// This function is not intended to be called except from within the implementation of an Allocator
|
||||
pub inline fn rawResize(self: Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize {
|
||||
return self.vtable.resize(self.ptr, buf, buf_align, new_len, len_align, ret_addr);
|
||||
}
|
||||
|
||||
/// This function is not intended to be called except from within the implementation of an Allocator
|
||||
pub inline fn rawFree(self: Allocator, buf: []u8, buf_align: u29, ret_addr: usize) void {
|
||||
return self.vtable.free(self.ptr, buf, buf_align, ret_addr);
|
||||
}
|
||||
|
||||
/// Realloc is used to modify the size or alignment of an existing allocation,
|
||||
/// as well as to provide the allocator with an opportunity to move an allocation
|
||||
/// to a better location.
|
||||
@ -133,8 +199,7 @@ fn reallocBytes(
|
||||
/// Guaranteed to be >= 1.
|
||||
/// Guaranteed to be a power of 2.
|
||||
old_alignment: u29,
|
||||
/// If `new_byte_count` is 0 then this is a free and it is guaranteed that
|
||||
/// `old_mem.len != 0`.
|
||||
/// `new_byte_count` must be greater than zero
|
||||
new_byte_count: usize,
|
||||
/// Guaranteed to be >= 1.
|
||||
/// Guaranteed to be a power of 2.
|
||||
@ -147,18 +212,20 @@ fn reallocBytes(
|
||||
return_address: usize,
|
||||
) Error![]u8 {
|
||||
if (old_mem.len == 0) {
|
||||
const new_mem = try self.vtable.alloc(self.ptr, new_byte_count, new_alignment, len_align, return_address);
|
||||
const new_mem = try self.rawAlloc(new_byte_count, new_alignment, len_align, return_address);
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(new_mem.ptr, undefined, new_byte_count);
|
||||
return new_mem;
|
||||
}
|
||||
|
||||
assert(new_byte_count > 0); // `new_byte_count` must greater than zero, this is a resize not a free
|
||||
|
||||
if (mem.isAligned(@ptrToInt(old_mem.ptr), new_alignment)) {
|
||||
if (new_byte_count <= old_mem.len) {
|
||||
const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align, return_address);
|
||||
return old_mem.ptr[0..shrunk_len];
|
||||
}
|
||||
if (self.vtable.resize(self.ptr, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
|
||||
if (self.rawResize(old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
|
||||
assert(resized_len >= new_byte_count);
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
|
||||
@ -184,11 +251,11 @@ fn moveBytes(
|
||||
) Error![]u8 {
|
||||
assert(old_mem.len > 0);
|
||||
assert(new_len > 0);
|
||||
const new_mem = try self.vtable.alloc(self.ptr, new_len, new_alignment, len_align, return_address);
|
||||
const new_mem = try self.rawAlloc(new_len, new_alignment, len_align, return_address);
|
||||
@memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len));
|
||||
// TODO https://github.com/ziglang/zig/issues/4298
|
||||
@memset(old_mem.ptr, undefined, old_mem.len);
|
||||
_ = self.shrinkBytes(old_mem, old_align, 0, 0, return_address);
|
||||
self.rawFree(old_mem, old_align, return_address);
|
||||
return new_mem;
|
||||
}
|
||||
|
||||
@ -207,7 +274,7 @@ pub fn destroy(self: Allocator, ptr: anytype) void {
|
||||
const T = info.child;
|
||||
if (@sizeOf(T) == 0) return;
|
||||
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
|
||||
_ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], info.alignment, 0, 0, @returnAddress());
|
||||
self.rawFree(non_const_ptr[0..@sizeOf(T)], info.alignment, @returnAddress());
|
||||
}
|
||||
|
||||
/// Allocates an array of `n` items of type `T` and sets all the
|
||||
@ -326,7 +393,7 @@ pub fn allocAdvancedWithRetAddr(
|
||||
.exact => 0,
|
||||
.at_least => size_of_T,
|
||||
};
|
||||
const byte_slice = try self.vtable.alloc(self.ptr, byte_count, a, len_align, return_address);
|
||||
const byte_slice = try self.rawAlloc(byte_count, a, len_align, return_address);
|
||||
switch (exact) {
|
||||
.exact => assert(byte_slice.len == byte_count),
|
||||
.at_least => assert(byte_slice.len >= byte_count),
|
||||
@ -351,7 +418,7 @@ pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old
|
||||
}
|
||||
const old_byte_slice = mem.sliceAsBytes(old_mem);
|
||||
const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
|
||||
const rc = try self.vtable.resize(self.ptr, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
|
||||
const rc = try self.rawResize(old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
|
||||
assert(rc == new_byte_count);
|
||||
const new_byte_slice = old_byte_slice.ptr[0..new_byte_count];
|
||||
return mem.bytesAsSlice(T, new_byte_slice);
|
||||
@ -465,6 +532,11 @@ pub fn alignedShrinkWithRetAddr(
|
||||
|
||||
if (new_n == old_mem.len)
|
||||
return old_mem;
|
||||
if (new_n == 0) {
|
||||
self.free(old_mem);
|
||||
return @as([*]align(new_alignment) T, undefined)[0..0];
|
||||
}
|
||||
|
||||
assert(new_n < old_mem.len);
|
||||
assert(new_alignment <= Slice.alignment);
|
||||
|
||||
@ -489,7 +561,7 @@ pub fn free(self: Allocator, memory: anytype) void {
|
||||
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(non_const_ptr, undefined, bytes_len);
|
||||
_ = self.shrinkBytes(non_const_ptr[0..bytes_len], Slice.alignment, 0, 0, @returnAddress());
|
||||
self.rawFree(non_const_ptr[0..bytes_len], Slice.alignment, @returnAddress());
|
||||
}
|
||||
|
||||
/// Copies `m` to newly allocated memory. Caller owns the memory.
|
||||
@ -520,5 +592,5 @@ pub fn shrinkBytes(
|
||||
return_address: usize,
|
||||
) usize {
|
||||
assert(new_len <= buf.len);
|
||||
return self.vtable.resize(self.ptr, buf, buf_align, new_len, len_align, return_address) catch unreachable;
|
||||
return self.rawResize(buf, buf_align, new_len, len_align, return_address) catch unreachable;
|
||||
}
|
||||
|
||||
@ -41,7 +41,7 @@ pub const FailingAllocator = struct {
|
||||
}
|
||||
|
||||
pub fn allocator(self: *FailingAllocator) mem.Allocator {
|
||||
return mem.Allocator.init(self, alloc, resize);
|
||||
return mem.Allocator.init(self, alloc, resize, free);
|
||||
}
|
||||
|
||||
fn alloc(
|
||||
@ -54,7 +54,7 @@ pub const FailingAllocator = struct {
|
||||
if (self.index == self.fail_index) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
const result = try self.internal_allocator.vtable.alloc(self.internal_allocator.ptr, len, ptr_align, len_align, return_address);
|
||||
const result = try self.internal_allocator.rawAlloc(len, ptr_align, len_align, return_address);
|
||||
self.allocated_bytes += result.len;
|
||||
self.allocations += 1;
|
||||
self.index += 1;
|
||||
@ -69,18 +69,26 @@ pub const FailingAllocator = struct {
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
const r = self.internal_allocator.vtable.resize(self.internal_allocator.ptr, old_mem, old_align, new_len, len_align, ra) catch |e| {
|
||||
const r = self.internal_allocator.rawResize(old_mem, old_align, new_len, len_align, ra) catch |e| {
|
||||
std.debug.assert(new_len > old_mem.len);
|
||||
return e;
|
||||
};
|
||||
if (new_len == 0) {
|
||||
self.deallocations += 1;
|
||||
self.freed_bytes += old_mem.len;
|
||||
} else if (r < old_mem.len) {
|
||||
if (r < old_mem.len) {
|
||||
self.freed_bytes += old_mem.len - r;
|
||||
} else {
|
||||
self.allocated_bytes += r - old_mem.len;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
fn free(
|
||||
self: *FailingAllocator,
|
||||
old_mem: []u8,
|
||||
old_align: u29,
|
||||
ra: usize,
|
||||
) void {
|
||||
self.internal_allocator.rawFree(old_mem, old_align, ra);
|
||||
self.deallocations += 1;
|
||||
self.freed_bytes += old_mem.len;
|
||||
}
|
||||
};
|
||||
|
||||
@ -155,13 +155,10 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
|
||||
}
|
||||
}
|
||||
|
||||
if (resized_len != 0) {
|
||||
// this was a shrink or a resize
|
||||
if (name) |n| {
|
||||
allocNamed(buf.ptr, resized_len, n);
|
||||
} else {
|
||||
alloc(buf.ptr, resized_len);
|
||||
}
|
||||
if (name) |n| {
|
||||
allocNamed(buf.ptr, resized_len, n);
|
||||
} else {
|
||||
alloc(buf.ptr, resized_len);
|
||||
}
|
||||
|
||||
return resized_len;
|
||||
@ -172,6 +169,15 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
fn freeFn(self: *Self, buf: []u8, buf_align: u29, ret_addr: usize) void {
|
||||
self.parent_allocator.rawFree(buf, buf_align, ret_addr);
|
||||
if (name) |n| {
|
||||
freeNamed(buf.ptr, n);
|
||||
} else {
|
||||
free(buf.ptr);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user