mirror of
https://github.com/ziglang/zig.git
synced 2026-01-20 22:35:24 +00:00
std.mem.Allocator: introduce remap function to the interface
This one changes the size of an allocation, allowing it to be relocated. However, the implementation will still return `null` if it would be equivalent to new = alloc memcpy(new, old) free(old) Mainly this prepares for taking advantage of `mremap` which I thought would be a bigger deal but apparently is only available on Linux. Still, we should use it on Linux.
This commit is contained in:
parent
dd2fa4f75d
commit
7eeef5fb2b
@ -105,21 +105,19 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
return result;
|
||||
}
|
||||
|
||||
/// The caller owns the returned memory. Empties this ArrayList,
|
||||
/// Its capacity is cleared, making deinit() safe but unnecessary to call.
|
||||
/// The caller owns the returned memory. Empties this ArrayList.
|
||||
/// Its capacity is cleared, making `deinit` safe but unnecessary to call.
|
||||
pub fn toOwnedSlice(self: *Self) Allocator.Error!Slice {
|
||||
const allocator = self.allocator;
|
||||
|
||||
const old_memory = self.allocatedSlice();
|
||||
if (allocator.resize(old_memory, self.items.len)) {
|
||||
const result = self.items;
|
||||
if (allocator.remap(old_memory, self.items.len)) |new_items| {
|
||||
self.* = init(allocator);
|
||||
return result;
|
||||
return new_items;
|
||||
}
|
||||
|
||||
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
|
||||
@memcpy(new_memory, self.items);
|
||||
@memset(self.items, undefined);
|
||||
self.clearAndFree();
|
||||
return new_memory;
|
||||
}
|
||||
@ -185,8 +183,9 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
// extra capacity.
|
||||
const new_capacity = growCapacity(self.capacity, new_len);
|
||||
const old_memory = self.allocatedSlice();
|
||||
if (self.allocator.resize(old_memory, new_capacity)) {
|
||||
self.capacity = new_capacity;
|
||||
if (self.allocator.remap(old_memory, new_capacity)) |new_memory| {
|
||||
self.items.ptr = new_memory.ptr;
|
||||
self.capacity = new_memory.len;
|
||||
return addManyAtAssumeCapacity(self, index, count);
|
||||
}
|
||||
|
||||
@ -468,8 +467,9 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
// the allocator implementation would pointlessly copy our
|
||||
// extra capacity.
|
||||
const old_memory = self.allocatedSlice();
|
||||
if (self.allocator.resize(old_memory, new_capacity)) {
|
||||
self.capacity = new_capacity;
|
||||
if (self.allocator.remap(old_memory, new_capacity)) |new_memory| {
|
||||
self.items.ptr = new_memory.ptr;
|
||||
self.capacity = new_memory.len;
|
||||
} else {
|
||||
const new_memory = try self.allocator.alignedAlloc(T, alignment, new_capacity);
|
||||
@memcpy(new_memory[0..self.items.len], self.items);
|
||||
@ -707,15 +707,13 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
/// Its capacity is cleared, making deinit() safe but unnecessary to call.
|
||||
pub fn toOwnedSlice(self: *Self, allocator: Allocator) Allocator.Error!Slice {
|
||||
const old_memory = self.allocatedSlice();
|
||||
if (allocator.resize(old_memory, self.items.len)) {
|
||||
const result = self.items;
|
||||
if (allocator.remap(old_memory, self.items.len)) |new_items| {
|
||||
self.* = .empty;
|
||||
return result;
|
||||
return new_items;
|
||||
}
|
||||
|
||||
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
|
||||
@memcpy(new_memory, self.items);
|
||||
@memset(self.items, undefined);
|
||||
self.clearAndFree(allocator);
|
||||
return new_memory;
|
||||
}
|
||||
@ -1031,9 +1029,9 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
}
|
||||
|
||||
const old_memory = self.allocatedSlice();
|
||||
if (allocator.resize(old_memory, new_len)) {
|
||||
self.capacity = new_len;
|
||||
self.items.len = new_len;
|
||||
if (allocator.remap(old_memory, new_len)) |new_items| {
|
||||
self.capacity = new_items.len;
|
||||
self.items = new_items;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1099,8 +1097,9 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
// the allocator implementation would pointlessly copy our
|
||||
// extra capacity.
|
||||
const old_memory = self.allocatedSlice();
|
||||
if (allocator.resize(old_memory, new_capacity)) {
|
||||
self.capacity = new_capacity;
|
||||
if (allocator.remap(old_memory, new_capacity)) |new_memory| {
|
||||
self.items.ptr = new_memory.ptr;
|
||||
self.capacity = new_memory.len;
|
||||
} else {
|
||||
const new_memory = try allocator.alignedAlloc(T, alignment, new_capacity);
|
||||
@memcpy(new_memory[0..self.items.len], self.items);
|
||||
|
||||
@ -9,7 +9,7 @@ end_index: usize,
|
||||
buffer: []u8,
|
||||
|
||||
pub fn init(buffer: []u8) FixedBufferAllocator {
|
||||
return FixedBufferAllocator{
|
||||
return .{
|
||||
.buffer = buffer,
|
||||
.end_index = 0,
|
||||
};
|
||||
@ -22,6 +22,7 @@ pub fn allocator(self: *FixedBufferAllocator) Allocator {
|
||||
.vtable = &.{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.remap = remap,
|
||||
.free = free,
|
||||
},
|
||||
};
|
||||
@ -36,6 +37,7 @@ pub fn threadSafeAllocator(self: *FixedBufferAllocator) Allocator {
|
||||
.vtable = &.{
|
||||
.alloc = threadSafeAlloc,
|
||||
.resize = Allocator.noResize,
|
||||
.remap = Allocator.noRemap,
|
||||
.free = Allocator.noFree,
|
||||
},
|
||||
};
|
||||
@ -57,10 +59,10 @@ pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool {
|
||||
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
|
||||
}
|
||||
|
||||
pub fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
|
||||
pub fn alloc(ctx: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
|
||||
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
|
||||
_ = ra;
|
||||
const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
|
||||
const ptr_align = alignment.toByteUnits();
|
||||
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return null;
|
||||
const adjusted_index = self.end_index + adjust_off;
|
||||
const new_end_index = adjusted_index + n;
|
||||
@ -72,12 +74,12 @@ pub fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
|
||||
pub fn resize(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
log2_buf_align: u8,
|
||||
alignment: mem.Alignment,
|
||||
new_size: usize,
|
||||
return_address: usize,
|
||||
) bool {
|
||||
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
|
||||
_ = log2_buf_align;
|
||||
_ = alignment;
|
||||
_ = return_address;
|
||||
assert(@inComptime() or self.ownsSlice(buf));
|
||||
|
||||
@ -99,14 +101,24 @@ pub fn resize(
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn remap(
|
||||
context: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null;
|
||||
}
|
||||
|
||||
pub fn free(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
log2_buf_align: u8,
|
||||
alignment: mem.Alignment,
|
||||
return_address: usize,
|
||||
) void {
|
||||
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
|
||||
_ = log2_buf_align;
|
||||
_ = alignment;
|
||||
_ = return_address;
|
||||
assert(@inComptime() or self.ownsSlice(buf));
|
||||
|
||||
@ -115,10 +127,10 @@ pub fn free(
|
||||
}
|
||||
}
|
||||
|
||||
fn threadSafeAlloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
|
||||
fn threadSafeAlloc(ctx: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
|
||||
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
|
||||
_ = ra;
|
||||
const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
|
||||
const ptr_align = alignment.toByteUnits();
|
||||
var end_index = @atomicLoad(usize, &self.end_index, .seq_cst);
|
||||
while (true) {
|
||||
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return null;
|
||||
|
||||
@ -12,18 +12,18 @@ const page_size_min = std.heap.page_size_min;
|
||||
pub const vtable: Allocator.VTable = .{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.remap = remap,
|
||||
.free = free,
|
||||
};
|
||||
|
||||
fn alloc(context: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
|
||||
const requested_alignment: mem.Alignment = @enumFromInt(log2_align);
|
||||
fn alloc(context: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
|
||||
_ = context;
|
||||
_ = ra;
|
||||
assert(n > 0);
|
||||
|
||||
const page_size = std.heap.pageSize();
|
||||
if (n >= maxInt(usize) - page_size) return null;
|
||||
const alignment_bytes = requested_alignment.toByteUnits();
|
||||
const alignment_bytes = alignment.toByteUnits();
|
||||
|
||||
if (native_os == .windows) {
|
||||
// According to official documentation, VirtualAlloc aligns to page
|
||||
@ -103,59 +103,33 @@ fn alloc(context: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
|
||||
|
||||
fn resize(
|
||||
context: *anyopaque,
|
||||
buf_unaligned: []u8,
|
||||
log2_buf_align: u8,
|
||||
new_size: usize,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) bool {
|
||||
_ = context;
|
||||
_ = log2_buf_align;
|
||||
_ = alignment;
|
||||
_ = return_address;
|
||||
const page_size = std.heap.pageSize();
|
||||
const new_size_aligned = mem.alignForward(usize, new_size, page_size);
|
||||
|
||||
if (native_os == .windows) {
|
||||
if (new_size <= buf_unaligned.len) {
|
||||
const base_addr = @intFromPtr(buf_unaligned.ptr);
|
||||
const old_addr_end = base_addr + buf_unaligned.len;
|
||||
const new_addr_end = mem.alignForward(usize, base_addr + new_size, page_size);
|
||||
if (old_addr_end > new_addr_end) {
|
||||
// For shrinking that is not releasing, we will only decommit
|
||||
// the pages not needed anymore.
|
||||
windows.VirtualFree(
|
||||
@as(*anyopaque, @ptrFromInt(new_addr_end)),
|
||||
old_addr_end - new_addr_end,
|
||||
windows.MEM_DECOMMIT,
|
||||
);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, page_size);
|
||||
if (new_size_aligned <= old_size_aligned) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, page_size);
|
||||
if (new_size_aligned == buf_aligned_len)
|
||||
return true;
|
||||
|
||||
if (new_size_aligned < buf_aligned_len) {
|
||||
const ptr = buf_unaligned.ptr + new_size_aligned;
|
||||
// TODO: if the next_mmap_addr_hint is within the unmapped range, update it
|
||||
posix.munmap(@alignCast(ptr[0 .. buf_aligned_len - new_size_aligned]));
|
||||
return true;
|
||||
}
|
||||
|
||||
// TODO: call mremap
|
||||
// TODO: if the next_mmap_addr_hint is within the remapped range, update it
|
||||
return false;
|
||||
return realloc(memory, new_len, false) != null;
|
||||
}
|
||||
|
||||
fn free(context: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) void {
|
||||
pub fn remap(
|
||||
context: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
_ = context;
|
||||
_ = log2_buf_align;
|
||||
_ = alignment;
|
||||
_ = return_address;
|
||||
return realloc(memory, new_len, true);
|
||||
}
|
||||
|
||||
fn free(context: *anyopaque, slice: []u8, alignment: mem.Alignment, return_address: usize) void {
|
||||
_ = context;
|
||||
_ = alignment;
|
||||
_ = return_address;
|
||||
|
||||
if (native_os == .windows) {
|
||||
@ -165,3 +139,50 @@ fn free(context: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: us
|
||||
posix.munmap(@alignCast(slice.ptr[0..buf_aligned_len]));
|
||||
}
|
||||
}
|
||||
|
||||
fn realloc(memory: []u8, new_len: usize, may_move: bool) ?[*]u8 {
|
||||
const page_size = std.heap.pageSize();
|
||||
const new_size_aligned = mem.alignForward(usize, new_len, page_size);
|
||||
|
||||
if (native_os == .windows) {
|
||||
if (new_len <= memory.len) {
|
||||
const base_addr = @intFromPtr(memory.ptr);
|
||||
const old_addr_end = base_addr + memory.len;
|
||||
const new_addr_end = mem.alignForward(usize, base_addr + new_len, page_size);
|
||||
if (old_addr_end > new_addr_end) {
|
||||
// For shrinking that is not releasing, we will only decommit
|
||||
// the pages not needed anymore.
|
||||
windows.VirtualFree(
|
||||
@as(*anyopaque, @ptrFromInt(new_addr_end)),
|
||||
old_addr_end - new_addr_end,
|
||||
windows.MEM_DECOMMIT,
|
||||
);
|
||||
}
|
||||
return memory.ptr;
|
||||
}
|
||||
const old_size_aligned = mem.alignForward(usize, memory.len, page_size);
|
||||
if (new_size_aligned <= old_size_aligned) {
|
||||
return memory.ptr;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
const page_aligned_len = mem.alignForward(usize, memory.len, page_size);
|
||||
if (new_size_aligned == page_aligned_len)
|
||||
return memory.ptr;
|
||||
|
||||
const mremap_available = false; // native_os == .linux;
|
||||
if (mremap_available) {
|
||||
// TODO: if the next_mmap_addr_hint is within the remapped range, update it
|
||||
return posix.mremap(memory, new_len, .{ .MAYMOVE = may_move }, null) catch return null;
|
||||
}
|
||||
|
||||
if (new_size_aligned < page_aligned_len) {
|
||||
const ptr = memory.ptr + new_size_aligned;
|
||||
// TODO: if the next_mmap_addr_hint is within the unmapped range, update it
|
||||
posix.munmap(@alignCast(ptr[0 .. page_aligned_len - new_size_aligned]));
|
||||
return memory.ptr;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@ -29,12 +29,14 @@ pub const ArenaAllocator = struct {
|
||||
.vtable = &.{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.remap = remap,
|
||||
.free = free,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const BufNode = std.SinglyLinkedList(usize).Node;
|
||||
const BufNode_alignment: mem.Alignment = .fromByteUnits(@alignOf(BufNode));
|
||||
|
||||
pub fn init(child_allocator: Allocator) ArenaAllocator {
|
||||
return (State{}).promote(child_allocator);
|
||||
@ -47,9 +49,8 @@ pub const ArenaAllocator = struct {
|
||||
while (it) |node| {
|
||||
// this has to occur before the free because the free frees node
|
||||
const next_it = node.next;
|
||||
const align_bits = std.math.log2_int(usize, @alignOf(BufNode));
|
||||
const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data];
|
||||
self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress());
|
||||
self.child_allocator.rawFree(alloc_buf, BufNode_alignment, @returnAddress());
|
||||
it = next_it;
|
||||
}
|
||||
}
|
||||
@ -120,7 +121,6 @@ pub const ArenaAllocator = struct {
|
||||
return true;
|
||||
}
|
||||
const total_size = requested_capacity + @sizeOf(BufNode);
|
||||
const align_bits = std.math.log2_int(usize, @alignOf(BufNode));
|
||||
// Free all nodes except for the last one
|
||||
var it = self.state.buffer_list.first;
|
||||
const maybe_first_node = while (it) |node| {
|
||||
@ -129,7 +129,7 @@ pub const ArenaAllocator = struct {
|
||||
if (next_it == null)
|
||||
break node;
|
||||
const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data];
|
||||
self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress());
|
||||
self.child_allocator.rawFree(alloc_buf, BufNode_alignment, @returnAddress());
|
||||
it = next_it;
|
||||
} else null;
|
||||
std.debug.assert(maybe_first_node == null or maybe_first_node.?.next == null);
|
||||
@ -141,16 +141,16 @@ pub const ArenaAllocator = struct {
|
||||
if (first_node.data == total_size)
|
||||
return true;
|
||||
const first_alloc_buf = @as([*]u8, @ptrCast(first_node))[0..first_node.data];
|
||||
if (self.child_allocator.rawResize(first_alloc_buf, align_bits, total_size, @returnAddress())) {
|
||||
if (self.child_allocator.rawResize(first_alloc_buf, BufNode_alignment, total_size, @returnAddress())) {
|
||||
// successful resize
|
||||
first_node.data = total_size;
|
||||
} else {
|
||||
// manual realloc
|
||||
const new_ptr = self.child_allocator.rawAlloc(total_size, align_bits, @returnAddress()) orelse {
|
||||
const new_ptr = self.child_allocator.rawAlloc(total_size, BufNode_alignment, @returnAddress()) orelse {
|
||||
// we failed to preheat the arena properly, signal this to the user.
|
||||
return false;
|
||||
};
|
||||
self.child_allocator.rawFree(first_alloc_buf, align_bits, @returnAddress());
|
||||
self.child_allocator.rawFree(first_alloc_buf, BufNode_alignment, @returnAddress());
|
||||
const node: *BufNode = @ptrCast(@alignCast(new_ptr));
|
||||
node.* = .{ .data = total_size };
|
||||
self.state.buffer_list.first = node;
|
||||
@ -163,8 +163,7 @@ pub const ArenaAllocator = struct {
|
||||
const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
|
||||
const big_enough_len = prev_len + actual_min_size;
|
||||
const len = big_enough_len + big_enough_len / 2;
|
||||
const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode));
|
||||
const ptr = self.child_allocator.rawAlloc(len, log2_align, @returnAddress()) orelse
|
||||
const ptr = self.child_allocator.rawAlloc(len, BufNode_alignment, @returnAddress()) orelse
|
||||
return null;
|
||||
const buf_node: *BufNode = @ptrCast(@alignCast(ptr));
|
||||
buf_node.* = .{ .data = len };
|
||||
@ -173,11 +172,11 @@ pub const ArenaAllocator = struct {
|
||||
return buf_node;
|
||||
}
|
||||
|
||||
fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
|
||||
fn alloc(ctx: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
|
||||
const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
|
||||
_ = ra;
|
||||
|
||||
const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
|
||||
const ptr_align = alignment.toByteUnits();
|
||||
var cur_node = if (self.state.buffer_list.first) |first_node|
|
||||
first_node
|
||||
else
|
||||
@ -197,8 +196,7 @@ pub const ArenaAllocator = struct {
|
||||
}
|
||||
|
||||
const bigger_buf_size = @sizeOf(BufNode) + new_end_index;
|
||||
const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode));
|
||||
if (self.child_allocator.rawResize(cur_alloc_buf, log2_align, bigger_buf_size, @returnAddress())) {
|
||||
if (self.child_allocator.rawResize(cur_alloc_buf, BufNode_alignment, bigger_buf_size, @returnAddress())) {
|
||||
cur_node.data = bigger_buf_size;
|
||||
} else {
|
||||
// Allocate a new node if that's not possible
|
||||
@ -207,9 +205,9 @@ pub const ArenaAllocator = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
|
||||
fn resize(ctx: *anyopaque, buf: []u8, alignment: mem.Alignment, new_len: usize, ret_addr: usize) bool {
|
||||
const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
|
||||
_ = log2_buf_align;
|
||||
_ = alignment;
|
||||
_ = ret_addr;
|
||||
|
||||
const cur_node = self.state.buffer_list.first orelse return false;
|
||||
@ -231,8 +229,18 @@ pub const ArenaAllocator = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void {
|
||||
_ = log2_buf_align;
|
||||
fn remap(
|
||||
context: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null;
|
||||
}
|
||||
|
||||
fn free(ctx: *anyopaque, buf: []u8, alignment: mem.Alignment, ret_addr: usize) void {
|
||||
_ = alignment;
|
||||
_ = ret_addr;
|
||||
|
||||
const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
|
||||
|
||||
@ -226,7 +226,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
requested_size: if (config.enable_memory_limit) usize else void,
|
||||
stack_addresses: [trace_n][stack_n]usize,
|
||||
freed: if (config.retain_metadata) bool else void,
|
||||
log2_ptr_align: if (config.never_unmap and config.retain_metadata) u8 else void,
|
||||
alignment: if (config.never_unmap and config.retain_metadata) mem.Alignment else void,
|
||||
|
||||
const trace_n = if (config.retain_metadata) traces_per_slot else 1;
|
||||
|
||||
@ -281,11 +281,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
return sizes[0..slot_count];
|
||||
}
|
||||
|
||||
fn log2PtrAligns(bucket: *BucketHeader, size_class: usize) []u8 {
|
||||
fn log2PtrAligns(bucket: *BucketHeader, size_class: usize) []mem.Alignment {
|
||||
if (!config.safety) @compileError("requested size is only stored when safety is enabled");
|
||||
const aligns_ptr = @as([*]u8, @ptrCast(bucket)) + bucketAlignsStart(size_class);
|
||||
const slot_count = @divExact(page_size, size_class);
|
||||
return aligns_ptr[0..slot_count];
|
||||
return @ptrCast(aligns_ptr[0..slot_count]);
|
||||
}
|
||||
|
||||
fn stackTracePtr(
|
||||
@ -326,6 +326,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
.vtable = &.{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.remap = remap,
|
||||
.free = free,
|
||||
},
|
||||
};
|
||||
@ -455,7 +456,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
var it = self.large_allocations.iterator();
|
||||
while (it.next()) |large| {
|
||||
if (large.value_ptr.freed) {
|
||||
self.backing_allocator.rawFree(large.value_ptr.bytes, large.value_ptr.log2_ptr_align, @returnAddress());
|
||||
self.backing_allocator.rawFree(large.value_ptr.bytes, large.value_ptr.alignment, @returnAddress());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -583,10 +584,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
fn resizeLarge(
|
||||
self: *Self,
|
||||
old_mem: []u8,
|
||||
log2_old_align: u8,
|
||||
alignment: mem.Alignment,
|
||||
new_size: usize,
|
||||
ret_addr: usize,
|
||||
) bool {
|
||||
may_move: bool,
|
||||
) ?[*]u8 {
|
||||
const entry = self.large_allocations.getEntry(@intFromPtr(old_mem.ptr)) orelse {
|
||||
if (config.safety) {
|
||||
@panic("Invalid free");
|
||||
@ -628,30 +630,37 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
if (config.enable_memory_limit) {
|
||||
const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size;
|
||||
if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
|
||||
return false;
|
||||
return null;
|
||||
}
|
||||
self.total_requested_bytes = new_req_bytes;
|
||||
}
|
||||
|
||||
if (!self.backing_allocator.rawResize(old_mem, log2_old_align, new_size, ret_addr)) {
|
||||
const opt_resized_ptr = if (may_move)
|
||||
self.backing_allocator.rawRemap(old_mem, alignment, new_size, ret_addr)
|
||||
else if (self.backing_allocator.rawResize(old_mem, alignment, new_size, ret_addr))
|
||||
old_mem.ptr
|
||||
else
|
||||
null;
|
||||
|
||||
const resized_ptr = opt_resized_ptr orelse {
|
||||
if (config.enable_memory_limit) {
|
||||
self.total_requested_bytes = prev_req_bytes;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
||||
if (config.enable_memory_limit) {
|
||||
entry.value_ptr.requested_size = new_size;
|
||||
}
|
||||
|
||||
if (config.verbose_log) {
|
||||
log.info("large resize {d} bytes at {*} to {d}", .{
|
||||
old_mem.len, old_mem.ptr, new_size,
|
||||
log.info("large resize {d} bytes at {*} to {d} at {*}", .{
|
||||
old_mem.len, old_mem.ptr, new_size, resized_ptr,
|
||||
});
|
||||
}
|
||||
entry.value_ptr.bytes = old_mem.ptr[0..new_size];
|
||||
entry.value_ptr.bytes = resized_ptr[0..new_size];
|
||||
entry.value_ptr.captureStackTrace(ret_addr, .alloc);
|
||||
return true;
|
||||
return resized_ptr;
|
||||
}
|
||||
|
||||
/// This function assumes the object is in the large object storage regardless
|
||||
@ -659,7 +668,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
fn freeLarge(
|
||||
self: *Self,
|
||||
old_mem: []u8,
|
||||
log2_old_align: u8,
|
||||
alignment: mem.Alignment,
|
||||
ret_addr: usize,
|
||||
) void {
|
||||
const entry = self.large_allocations.getEntry(@intFromPtr(old_mem.ptr)) orelse {
|
||||
@ -695,7 +704,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
}
|
||||
|
||||
if (!config.never_unmap) {
|
||||
self.backing_allocator.rawFree(old_mem, log2_old_align, ret_addr);
|
||||
self.backing_allocator.rawFree(old_mem, alignment, ret_addr);
|
||||
}
|
||||
|
||||
if (config.enable_memory_limit) {
|
||||
@ -719,22 +728,42 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
}
|
||||
|
||||
fn resize(
|
||||
ctx: *anyopaque,
|
||||
old_mem: []u8,
|
||||
log2_old_align_u8: u8,
|
||||
new_size: usize,
|
||||
ret_addr: usize,
|
||||
context: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) bool {
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
const log2_old_align = @as(Allocator.Log2Align, @intCast(log2_old_align_u8));
|
||||
return realloc(context, memory, alignment, new_len, return_address, false) != null;
|
||||
}
|
||||
|
||||
fn remap(
|
||||
context: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
return realloc(context, memory, alignment, new_len, return_address, true);
|
||||
}
|
||||
|
||||
fn realloc(
|
||||
context: *anyopaque,
|
||||
old_mem: []u8,
|
||||
alignment: mem.Alignment,
|
||||
new_len: usize,
|
||||
ret_addr: usize,
|
||||
may_move: bool,
|
||||
) ?[*]u8 {
|
||||
const self: *Self = @ptrCast(@alignCast(context));
|
||||
self.mutex.lock();
|
||||
defer self.mutex.unlock();
|
||||
|
||||
assert(old_mem.len != 0);
|
||||
|
||||
const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align);
|
||||
const aligned_size = @max(old_mem.len, alignment.toByteUnits());
|
||||
if (aligned_size > largest_bucket_object_size) {
|
||||
return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr);
|
||||
return self.resizeLarge(old_mem, alignment, new_len, ret_addr, may_move);
|
||||
}
|
||||
const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size);
|
||||
|
||||
@ -758,7 +787,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
}
|
||||
}
|
||||
}
|
||||
return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr);
|
||||
return self.resizeLarge(old_mem, alignment, new_len, ret_addr, may_move);
|
||||
};
|
||||
const byte_offset = @intFromPtr(old_mem.ptr) - @intFromPtr(bucket.page);
|
||||
const slot_index = @as(SlotIndex, @intCast(byte_offset / size_class));
|
||||
@ -779,8 +808,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
if (config.safety) {
|
||||
const requested_size = bucket.requestedSizes(size_class)[slot_index];
|
||||
if (requested_size == 0) @panic("Invalid free");
|
||||
const log2_ptr_align = bucket.log2PtrAligns(size_class)[slot_index];
|
||||
if (old_mem.len != requested_size or log2_old_align != log2_ptr_align) {
|
||||
const slot_alignment = bucket.log2PtrAligns(size_class)[slot_index];
|
||||
if (old_mem.len != requested_size or alignment != slot_alignment) {
|
||||
var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
|
||||
var free_stack_trace = StackTrace{
|
||||
.instruction_addresses = &addresses,
|
||||
@ -795,10 +824,10 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
free_stack_trace,
|
||||
});
|
||||
}
|
||||
if (log2_old_align != log2_ptr_align) {
|
||||
if (alignment != slot_alignment) {
|
||||
log.err("Allocation alignment {d} does not match resize alignment {d}. Allocation: {} Resize: {}", .{
|
||||
@as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_ptr_align)),
|
||||
@as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_old_align)),
|
||||
slot_alignment.toByteUnits(),
|
||||
alignment.toByteUnits(),
|
||||
bucketStackTrace(bucket, size_class, slot_index, .alloc),
|
||||
free_stack_trace,
|
||||
});
|
||||
@ -807,52 +836,51 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
}
|
||||
const prev_req_bytes = self.total_requested_bytes;
|
||||
if (config.enable_memory_limit) {
|
||||
const new_req_bytes = prev_req_bytes + new_size - old_mem.len;
|
||||
const new_req_bytes = prev_req_bytes + new_len - old_mem.len;
|
||||
if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
|
||||
return false;
|
||||
return null;
|
||||
}
|
||||
self.total_requested_bytes = new_req_bytes;
|
||||
}
|
||||
|
||||
const new_aligned_size = @max(new_size, @as(usize, 1) << log2_old_align);
|
||||
const new_aligned_size = @max(new_len, alignment.toByteUnits());
|
||||
const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
|
||||
if (new_size_class <= size_class) {
|
||||
if (old_mem.len > new_size) {
|
||||
@memset(old_mem[new_size..], undefined);
|
||||
if (old_mem.len > new_len) {
|
||||
@memset(old_mem[new_len..], undefined);
|
||||
}
|
||||
if (config.verbose_log) {
|
||||
log.info("small resize {d} bytes at {*} to {d}", .{
|
||||
old_mem.len, old_mem.ptr, new_size,
|
||||
old_mem.len, old_mem.ptr, new_len,
|
||||
});
|
||||
}
|
||||
if (config.safety) {
|
||||
bucket.requestedSizes(size_class)[slot_index] = @intCast(new_size);
|
||||
bucket.requestedSizes(size_class)[slot_index] = @intCast(new_len);
|
||||
}
|
||||
return true;
|
||||
return old_mem.ptr;
|
||||
}
|
||||
|
||||
if (config.enable_memory_limit) {
|
||||
self.total_requested_bytes = prev_req_bytes;
|
||||
}
|
||||
return false;
|
||||
return null;
|
||||
}
|
||||
|
||||
fn free(
|
||||
ctx: *anyopaque,
|
||||
old_mem: []u8,
|
||||
log2_old_align_u8: u8,
|
||||
alignment: mem.Alignment,
|
||||
ret_addr: usize,
|
||||
) void {
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
const log2_old_align = @as(Allocator.Log2Align, @intCast(log2_old_align_u8));
|
||||
self.mutex.lock();
|
||||
defer self.mutex.unlock();
|
||||
|
||||
assert(old_mem.len != 0);
|
||||
|
||||
const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align);
|
||||
const aligned_size = @max(old_mem.len, alignment.toByteUnits());
|
||||
if (aligned_size > largest_bucket_object_size) {
|
||||
self.freeLarge(old_mem, log2_old_align, ret_addr);
|
||||
self.freeLarge(old_mem, alignment, ret_addr);
|
||||
return;
|
||||
}
|
||||
const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size);
|
||||
@ -877,7 +905,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
}
|
||||
}
|
||||
}
|
||||
self.freeLarge(old_mem, log2_old_align, ret_addr);
|
||||
self.freeLarge(old_mem, alignment, ret_addr);
|
||||
return;
|
||||
};
|
||||
const byte_offset = @intFromPtr(old_mem.ptr) - @intFromPtr(bucket.page);
|
||||
@ -900,8 +928,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
if (config.safety) {
|
||||
const requested_size = bucket.requestedSizes(size_class)[slot_index];
|
||||
if (requested_size == 0) @panic("Invalid free");
|
||||
const log2_ptr_align = bucket.log2PtrAligns(size_class)[slot_index];
|
||||
if (old_mem.len != requested_size or log2_old_align != log2_ptr_align) {
|
||||
const slot_alignment = bucket.log2PtrAligns(size_class)[slot_index];
|
||||
if (old_mem.len != requested_size or alignment != slot_alignment) {
|
||||
var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
|
||||
var free_stack_trace = StackTrace{
|
||||
.instruction_addresses = &addresses,
|
||||
@ -916,10 +944,10 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
free_stack_trace,
|
||||
});
|
||||
}
|
||||
if (log2_old_align != log2_ptr_align) {
|
||||
if (alignment != slot_alignment) {
|
||||
log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {} Free: {}", .{
|
||||
@as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_ptr_align)),
|
||||
@as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_old_align)),
|
||||
slot_alignment.toByteUnits(),
|
||||
alignment.toByteUnits(),
|
||||
bucketStackTrace(bucket, size_class, slot_index, .alloc),
|
||||
free_stack_trace,
|
||||
});
|
||||
@ -981,24 +1009,24 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
return true;
|
||||
}
|
||||
|
||||
fn alloc(ctx: *anyopaque, len: usize, log2_ptr_align: u8, ret_addr: usize) ?[*]u8 {
|
||||
fn alloc(ctx: *anyopaque, len: usize, alignment: mem.Alignment, ret_addr: usize) ?[*]u8 {
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
self.mutex.lock();
|
||||
defer self.mutex.unlock();
|
||||
if (!self.isAllocationAllowed(len)) return null;
|
||||
return allocInner(self, len, @as(Allocator.Log2Align, @intCast(log2_ptr_align)), ret_addr) catch return null;
|
||||
return allocInner(self, len, alignment, ret_addr) catch return null;
|
||||
}
|
||||
|
||||
fn allocInner(
|
||||
self: *Self,
|
||||
len: usize,
|
||||
log2_ptr_align: Allocator.Log2Align,
|
||||
alignment: mem.Alignment,
|
||||
ret_addr: usize,
|
||||
) Allocator.Error![*]u8 {
|
||||
const new_aligned_size = @max(len, @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)));
|
||||
const new_aligned_size = @max(len, alignment.toByteUnits());
|
||||
if (new_aligned_size > largest_bucket_object_size) {
|
||||
try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
|
||||
const ptr = self.backing_allocator.rawAlloc(len, log2_ptr_align, ret_addr) orelse
|
||||
const ptr = self.backing_allocator.rawAlloc(len, alignment, ret_addr) orelse
|
||||
return error.OutOfMemory;
|
||||
const slice = ptr[0..len];
|
||||
|
||||
@ -1016,7 +1044,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
if (config.retain_metadata) {
|
||||
gop.value_ptr.freed = false;
|
||||
if (config.never_unmap) {
|
||||
gop.value_ptr.log2_ptr_align = log2_ptr_align;
|
||||
gop.value_ptr.alignment = alignment;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1030,7 +1058,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
const slot = try self.allocSlot(new_size_class, ret_addr);
|
||||
if (config.safety) {
|
||||
slot.bucket.requestedSizes(new_size_class)[slot.slot_index] = @intCast(len);
|
||||
slot.bucket.log2PtrAligns(new_size_class)[slot.slot_index] = log2_ptr_align;
|
||||
slot.bucket.log2PtrAligns(new_size_class)[slot.slot_index] = alignment;
|
||||
}
|
||||
if (config.verbose_log) {
|
||||
log.info("small alloc {d} bytes at {*}", .{ len, slot.ptr });
|
||||
@ -1150,7 +1178,7 @@ test "realloc" {
|
||||
}
|
||||
|
||||
test "shrink" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
var gpa: GeneralPurposeAllocator(test_config) = .{};
|
||||
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
@ -1214,7 +1242,7 @@ test "realloc small object to large object" {
|
||||
}
|
||||
|
||||
test "shrink large object to large object" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
var gpa: GeneralPurposeAllocator(test_config) = .{};
|
||||
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
|
||||
@ -6,19 +6,21 @@ const math = std.math;
|
||||
const mem = std.mem;
|
||||
const Allocator = @This();
|
||||
const builtin = @import("builtin");
|
||||
const Alignment = std.mem.Alignment;
|
||||
|
||||
pub const Error = error{OutOfMemory};
|
||||
pub const Log2Align = math.Log2Int(usize);
|
||||
|
||||
/// The type erased pointer to the allocator implementation.
|
||||
/// Any comparison of this field may result in illegal behavior, since it may be set to
|
||||
/// `undefined` in cases where the allocator implementation does not have any associated
|
||||
/// state.
|
||||
///
|
||||
/// Any comparison of this field may result in illegal behavior, since it may
|
||||
/// be set to `undefined` in cases where the allocator implementation does not
|
||||
/// have any associated state.
|
||||
ptr: *anyopaque,
|
||||
vtable: *const VTable,
|
||||
|
||||
pub const VTable = struct {
|
||||
/// Allocate exactly `len` bytes aligned to `1 << ptr_align`, or return `null`
|
||||
/// Allocate exactly `len` bytes aligned to `alignment`, or return `null`
|
||||
/// indicating the allocation failed.
|
||||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the
|
||||
@ -27,12 +29,14 @@ pub const VTable = struct {
|
||||
///
|
||||
/// The returned slice of memory must have been `@memset` to `undefined`
|
||||
/// by the allocator implementation.
|
||||
alloc: *const fn (ctx: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8,
|
||||
alloc: *const fn (*anyopaque, len: usize, alignment: Alignment, ret_addr: usize) ?[*]u8,
|
||||
|
||||
/// Attempt to expand or shrink memory in place. `buf.len` must equal the
|
||||
/// length requested from the most recent successful call to `alloc` or
|
||||
/// `resize`. `buf_align` must equal the same value that was passed as the
|
||||
/// `ptr_align` parameter to the original `alloc` call.
|
||||
/// Attempt to expand or shrink memory in place.
|
||||
///
|
||||
/// `memory.len` must equal the length requested from the most recent
|
||||
/// successful call to `alloc` or `resize`. `alignment` must equal the same
|
||||
/// value that was passed as the `alignment` parameter to the original
|
||||
/// `alloc` call.
|
||||
///
|
||||
/// A result of `true` indicates the resize was successful and the
|
||||
/// allocation now has the same address but a size of `new_len`. `false`
|
||||
@ -44,72 +48,114 @@ pub const VTable = struct {
|
||||
/// `ret_addr` is optionally provided as the first return address of the
|
||||
/// allocation call stack. If the value is `0` it means no return address
|
||||
/// has been provided.
|
||||
resize: *const fn (ctx: *anyopaque, buf: []u8, buf_align: u8, new_len: usize, ret_addr: usize) bool,
|
||||
resize: *const fn (*anyopaque, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) bool,
|
||||
|
||||
/// Free and invalidate a buffer.
|
||||
/// Attempt to expand or shrink memory, allowing relocation.
|
||||
///
|
||||
/// `buf.len` must equal the most recent length returned by `alloc` or
|
||||
/// given to a successful `resize` call.
|
||||
/// `memory.len` must equal the length requested from the most recent
|
||||
/// successful call to `alloc` or `resize`. `alignment` must equal the same
|
||||
/// value that was passed as the `alignment` parameter to the original
|
||||
/// `alloc` call.
|
||||
///
|
||||
/// `buf_align` must equal the same value that was passed as the
|
||||
/// `ptr_align` parameter to the original `alloc` call.
|
||||
/// A non-`null` return value indicates the resize was successful. The
|
||||
/// allocation may have same address, or may have been relocated. In either
|
||||
/// case, the allocation now has size of `new_len`. A `null` return value
|
||||
/// indicates that the resize would be equivalent to allocating new memory,
|
||||
/// copying the bytes from the old memory, and then freeing the old memory.
|
||||
/// In such case, it is more efficient for the caller to perform the copy.
|
||||
///
|
||||
/// `new_len` must be greater than zero.
|
||||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the
|
||||
/// allocation call stack. If the value is `0` it means no return address
|
||||
/// has been provided.
|
||||
free: *const fn (ctx: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void,
|
||||
remap: *const fn (*anyopaque, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) ?[*]u8,
|
||||
|
||||
/// Free and invalidate a region of memory.
|
||||
///
|
||||
/// `memory.len` must equal the most recent length returned by `alloc` or
|
||||
/// given to a successful `resize` call.
|
||||
///
|
||||
/// `alignment` must equal the same value that was passed as the
|
||||
/// `alignment` parameter to the original `alloc` call.
|
||||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the
|
||||
/// allocation call stack. If the value is `0` it means no return address
|
||||
/// has been provided.
|
||||
free: *const fn (*anyopaque, memory: []u8, alignment: Alignment, ret_addr: usize) void,
|
||||
};
|
||||
|
||||
pub fn noResize(
|
||||
self: *anyopaque,
|
||||
buf: []u8,
|
||||
log2_buf_align: u8,
|
||||
memory: []u8,
|
||||
alignment: Alignment,
|
||||
new_len: usize,
|
||||
ret_addr: usize,
|
||||
) bool {
|
||||
_ = self;
|
||||
_ = buf;
|
||||
_ = log2_buf_align;
|
||||
_ = memory;
|
||||
_ = alignment;
|
||||
_ = new_len;
|
||||
_ = ret_addr;
|
||||
return false;
|
||||
}
|
||||
|
||||
pub fn noRemap(
|
||||
self: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: Alignment,
|
||||
new_len: usize,
|
||||
ret_addr: usize,
|
||||
) ?[*]u8 {
|
||||
_ = self;
|
||||
_ = memory;
|
||||
_ = alignment;
|
||||
_ = new_len;
|
||||
_ = ret_addr;
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn noFree(
|
||||
self: *anyopaque,
|
||||
buf: []u8,
|
||||
log2_buf_align: u8,
|
||||
memory: []u8,
|
||||
alignment: Alignment,
|
||||
ret_addr: usize,
|
||||
) void {
|
||||
_ = self;
|
||||
_ = buf;
|
||||
_ = log2_buf_align;
|
||||
_ = memory;
|
||||
_ = alignment;
|
||||
_ = ret_addr;
|
||||
}
|
||||
|
||||
/// This function is not intended to be called except from within the
|
||||
/// implementation of an Allocator
|
||||
pub inline fn rawAlloc(self: Allocator, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 {
|
||||
return self.vtable.alloc(self.ptr, len, ptr_align, ret_addr);
|
||||
pub inline fn rawAlloc(a: Allocator, len: usize, alignment: Alignment, ret_addr: usize) ?[*]u8 {
|
||||
return a.vtable.alloc(a.ptr, len, alignment, ret_addr);
|
||||
}
|
||||
|
||||
/// This function is not intended to be called except from within the
|
||||
/// implementation of an Allocator.
|
||||
pub inline fn rawResize(a: Allocator, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) bool {
|
||||
return a.vtable.resize(a.ptr, memory, alignment, new_len, ret_addr);
|
||||
}
|
||||
|
||||
/// This function is not intended to be called except from within the
|
||||
/// implementation of an Allocator.
|
||||
pub inline fn rawRemap(a: Allocator, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) ?[*]u8 {
|
||||
return a.vtable.remap(a.ptr, memory, alignment, new_len, ret_addr);
|
||||
}
|
||||
|
||||
/// This function is not intended to be called except from within the
|
||||
/// implementation of an Allocator
|
||||
pub inline fn rawResize(self: Allocator, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
|
||||
return self.vtable.resize(self.ptr, buf, log2_buf_align, new_len, ret_addr);
|
||||
}
|
||||
|
||||
/// This function is not intended to be called except from within the
|
||||
/// implementation of an Allocator
|
||||
pub inline fn rawFree(self: Allocator, buf: []u8, log2_buf_align: u8, ret_addr: usize) void {
|
||||
return self.vtable.free(self.ptr, buf, log2_buf_align, ret_addr);
|
||||
pub inline fn rawFree(a: Allocator, memory: []u8, alignment: Alignment, ret_addr: usize) void {
|
||||
return a.vtable.free(a.ptr, memory, alignment, ret_addr);
|
||||
}
|
||||
|
||||
/// Returns a pointer to undefined memory.
|
||||
/// Call `destroy` with the result to free the memory.
|
||||
pub fn create(self: Allocator, comptime T: type) Error!*T {
|
||||
pub fn create(a: Allocator, comptime T: type) Error!*T {
|
||||
if (@sizeOf(T) == 0) return @as(*T, @ptrFromInt(math.maxInt(usize)));
|
||||
const ptr: *T = @ptrCast(try self.allocBytesWithAlignment(@alignOf(T), @sizeOf(T), @returnAddress()));
|
||||
const ptr: *T = @ptrCast(try a.allocBytesWithAlignment(@alignOf(T), @sizeOf(T), @returnAddress()));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
@ -121,7 +167,7 @@ pub fn destroy(self: Allocator, ptr: anytype) void {
|
||||
const T = info.child;
|
||||
if (@sizeOf(T) == 0) return;
|
||||
const non_const_ptr = @as([*]u8, @ptrCast(@constCast(ptr)));
|
||||
self.rawFree(non_const_ptr[0..@sizeOf(T)], log2a(info.alignment), @returnAddress());
|
||||
self.rawFree(non_const_ptr[0..@sizeOf(T)], .fromByteUnits(info.alignment), @returnAddress());
|
||||
}
|
||||
|
||||
/// Allocates an array of `n` items of type `T` and sets all the
|
||||
@ -224,36 +270,88 @@ fn allocBytesWithAlignment(self: Allocator, comptime alignment: u29, byte_count:
|
||||
return @as([*]align(alignment) u8, @ptrFromInt(ptr));
|
||||
}
|
||||
|
||||
const byte_ptr = self.rawAlloc(byte_count, log2a(alignment), return_address) orelse return Error.OutOfMemory;
|
||||
const byte_ptr = self.rawAlloc(byte_count, .fromByteUnits(alignment), return_address) orelse return Error.OutOfMemory;
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(byte_ptr[0..byte_count], undefined);
|
||||
return @alignCast(byte_ptr);
|
||||
}
|
||||
|
||||
/// Requests to modify the size of an allocation. It is guaranteed to not move
|
||||
/// the pointer, however the allocator implementation may refuse the resize
|
||||
/// request by returning `false`.
|
||||
pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) bool {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).pointer;
|
||||
/// Request to modify the size of an allocation.
|
||||
///
|
||||
/// It is guaranteed to not move the pointer, however the allocator
|
||||
/// implementation may refuse the resize request by returning `false`.
|
||||
///
|
||||
/// `allocation` may be an empty slice, in which case a new allocation is made.
|
||||
///
|
||||
/// `new_len` may be zero, in which case the allocation is freed.
|
||||
pub fn resize(self: Allocator, allocation: anytype, new_len: usize) bool {
|
||||
const Slice = @typeInfo(@TypeOf(allocation)).pointer;
|
||||
const T = Slice.child;
|
||||
if (new_n == 0) {
|
||||
self.free(old_mem);
|
||||
const alignment = Slice.alignment;
|
||||
if (new_len == 0) {
|
||||
self.free(allocation);
|
||||
return true;
|
||||
}
|
||||
if (old_mem.len == 0) {
|
||||
if (allocation.len == 0) {
|
||||
return false;
|
||||
}
|
||||
const old_byte_slice = mem.sliceAsBytes(old_mem);
|
||||
const old_memory = mem.sliceAsBytes(allocation);
|
||||
// I would like to use saturating multiplication here, but LLVM cannot lower it
|
||||
// on WebAssembly: https://github.com/ziglang/zig/issues/9660
|
||||
//const new_byte_count = new_n *| @sizeOf(T);
|
||||
const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return false;
|
||||
return self.rawResize(old_byte_slice, log2a(Slice.alignment), new_byte_count, @returnAddress());
|
||||
//const new_len_bytes = new_len *| @sizeOf(T);
|
||||
const new_len_bytes = math.mul(usize, @sizeOf(T), new_len) catch return false;
|
||||
return self.rawResize(old_memory, .fromByteUnits(alignment), new_len_bytes, @returnAddress());
|
||||
}
|
||||
|
||||
/// Request to modify the size of an allocation, allowing relocation.
|
||||
///
|
||||
/// A non-`null` return value indicates the resize was successful. The
|
||||
/// allocation may have same address, or may have been relocated. In either
|
||||
/// case, the allocation now has size of `new_len`. A `null` return value
|
||||
/// indicates that the resize would be equivalent to allocating new memory,
|
||||
/// copying the bytes from the old memory, and then freeing the old memory.
|
||||
/// In such case, it is more efficient for the caller to perform those
|
||||
/// operations.
|
||||
///
|
||||
/// `allocation` may be an empty slice, in which case a new allocation is made.
|
||||
///
|
||||
/// `new_len` may be zero, in which case the allocation is freed.
|
||||
pub fn remap(self: Allocator, allocation: anytype, new_len: usize) t: {
|
||||
const Slice = @typeInfo(@TypeOf(allocation)).pointer;
|
||||
break :t ?[]align(Slice.alignment) Slice.child;
|
||||
} {
|
||||
const Slice = @typeInfo(@TypeOf(allocation)).pointer;
|
||||
const T = Slice.child;
|
||||
const alignment = Slice.alignment;
|
||||
if (new_len == 0) {
|
||||
self.free(allocation);
|
||||
return allocation[0..0];
|
||||
}
|
||||
if (allocation.len == 0) {
|
||||
return null;
|
||||
}
|
||||
const old_memory = mem.sliceAsBytes(allocation);
|
||||
// I would like to use saturating multiplication here, but LLVM cannot lower it
|
||||
// on WebAssembly: https://github.com/ziglang/zig/issues/9660
|
||||
//const new_len_bytes = new_len *| @sizeOf(T);
|
||||
const new_len_bytes = math.mul(usize, @sizeOf(T), new_len) catch return null;
|
||||
const new_ptr = self.rawRemap(old_memory, .fromByteUnits(alignment), new_len_bytes, @returnAddress()) orelse return null;
|
||||
const new_memory: []align(alignment) u8 = @alignCast(new_ptr[0..new_len_bytes]);
|
||||
return mem.bytesAsSlice(T, new_memory);
|
||||
}
|
||||
|
||||
/// This function requests a new byte size for an existing allocation, which
|
||||
/// can be larger, smaller, or the same size as the old memory allocation.
|
||||
///
|
||||
/// If `new_n` is 0, this is the same as `free` and it always succeeds.
|
||||
///
|
||||
/// `old_mem` may have length zero, which makes a new allocation.
|
||||
///
|
||||
/// This function only fails on out-of-memory conditions, unlike:
|
||||
/// * `remap` which returns `null` when the `Allocator` implementation cannot
|
||||
/// do the realloc more efficiently than the caller
|
||||
/// * `resize` which returns `false` when the `Allocator` implementation cannot
|
||||
/// change the size without relocating the allocation.
|
||||
pub fn realloc(self: Allocator, old_mem: anytype, new_n: usize) t: {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).pointer;
|
||||
break :t Error![]align(Slice.alignment) Slice.child;
|
||||
@ -284,18 +382,18 @@ pub fn reallocAdvanced(
|
||||
const old_byte_slice = mem.sliceAsBytes(old_mem);
|
||||
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
|
||||
// Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
|
||||
if (self.rawResize(old_byte_slice, log2a(Slice.alignment), byte_count, return_address)) {
|
||||
const new_bytes: []align(Slice.alignment) u8 = @alignCast(old_byte_slice.ptr[0..byte_count]);
|
||||
if (self.rawRemap(old_byte_slice, .fromByteUnits(Slice.alignment), byte_count, return_address)) |p| {
|
||||
const new_bytes: []align(Slice.alignment) u8 = @alignCast(p[0..byte_count]);
|
||||
return mem.bytesAsSlice(T, new_bytes);
|
||||
}
|
||||
|
||||
const new_mem = self.rawAlloc(byte_count, log2a(Slice.alignment), return_address) orelse
|
||||
const new_mem = self.rawAlloc(byte_count, .fromByteUnits(Slice.alignment), return_address) orelse
|
||||
return error.OutOfMemory;
|
||||
const copy_len = @min(byte_count, old_byte_slice.len);
|
||||
@memcpy(new_mem[0..copy_len], old_byte_slice[0..copy_len]);
|
||||
// TODO https://github.com/ziglang/zig/issues/4298
|
||||
@memset(old_byte_slice, undefined);
|
||||
self.rawFree(old_byte_slice, log2a(Slice.alignment), return_address);
|
||||
self.rawFree(old_byte_slice, .fromByteUnits(Slice.alignment), return_address);
|
||||
|
||||
const new_bytes: []align(Slice.alignment) u8 = @alignCast(new_mem[0..byte_count]);
|
||||
return mem.bytesAsSlice(T, new_bytes);
|
||||
@ -312,7 +410,7 @@ pub fn free(self: Allocator, memory: anytype) void {
|
||||
const non_const_ptr = @constCast(bytes.ptr);
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(non_const_ptr[0..bytes_len], undefined);
|
||||
self.rawFree(non_const_ptr[0..bytes_len], log2a(Slice.alignment), @returnAddress());
|
||||
self.rawFree(non_const_ptr[0..bytes_len], .fromByteUnits(Slice.alignment), @returnAddress());
|
||||
}
|
||||
|
||||
/// Copies `m` to newly allocated memory. Caller owns the memory.
|
||||
@ -329,17 +427,3 @@ pub fn dupeZ(allocator: Allocator, comptime T: type, m: []const T) Error![:0]T {
|
||||
new_buf[m.len] = 0;
|
||||
return new_buf[0..m.len :0];
|
||||
}
|
||||
|
||||
/// TODO replace callsites with `@log2` after this proposal is implemented:
|
||||
/// https://github.com/ziglang/zig/issues/13642
|
||||
inline fn log2a(x: anytype) switch (@typeInfo(@TypeOf(x))) {
|
||||
.int => math.Log2Int(@TypeOf(x)),
|
||||
.comptime_int => comptime_int,
|
||||
else => @compileError("int please"),
|
||||
} {
|
||||
switch (@typeInfo(@TypeOf(x))) {
|
||||
.int => return math.log2_int(@TypeOf(x), x),
|
||||
.comptime_int => return math.log2(x),
|
||||
else => @compileError("bad"),
|
||||
}
|
||||
}
|
||||
|
||||
@ -62,6 +62,7 @@ pub const FailingAllocator = struct {
|
||||
.vtable = &.{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.remap = remap,
|
||||
.free = free,
|
||||
},
|
||||
};
|
||||
@ -70,7 +71,7 @@ pub const FailingAllocator = struct {
|
||||
fn alloc(
|
||||
ctx: *anyopaque,
|
||||
len: usize,
|
||||
log2_ptr_align: u8,
|
||||
alignment: mem.Alignment,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
|
||||
@ -86,7 +87,7 @@ pub const FailingAllocator = struct {
|
||||
}
|
||||
return null;
|
||||
}
|
||||
const result = self.internal_allocator.rawAlloc(len, log2_ptr_align, return_address) orelse
|
||||
const result = self.internal_allocator.rawAlloc(len, alignment, return_address) orelse
|
||||
return null;
|
||||
self.allocated_bytes += len;
|
||||
self.allocations += 1;
|
||||
@ -96,33 +97,52 @@ pub const FailingAllocator = struct {
|
||||
|
||||
fn resize(
|
||||
ctx: *anyopaque,
|
||||
old_mem: []u8,
|
||||
log2_old_align: u8,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
new_len: usize,
|
||||
ra: usize,
|
||||
) bool {
|
||||
const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
|
||||
if (self.resize_index == self.resize_fail_index)
|
||||
return false;
|
||||
if (!self.internal_allocator.rawResize(old_mem, log2_old_align, new_len, ra))
|
||||
if (!self.internal_allocator.rawResize(memory, alignment, new_len, ra))
|
||||
return false;
|
||||
if (new_len < old_mem.len) {
|
||||
self.freed_bytes += old_mem.len - new_len;
|
||||
if (new_len < memory.len) {
|
||||
self.freed_bytes += memory.len - new_len;
|
||||
} else {
|
||||
self.allocated_bytes += new_len - old_mem.len;
|
||||
self.allocated_bytes += new_len - memory.len;
|
||||
}
|
||||
self.resize_index += 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
fn remap(
|
||||
ctx: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: mem.Alignment,
|
||||
new_len: usize,
|
||||
ra: usize,
|
||||
) ?[*]u8 {
|
||||
const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
|
||||
if (self.resize_index == self.resize_fail_index) return null;
|
||||
const new_ptr = self.internal_allocator.rawRemap(memory, alignment, new_len, ra) orelse return null;
|
||||
if (new_len < memory.len) {
|
||||
self.freed_bytes += memory.len - new_len;
|
||||
} else {
|
||||
self.allocated_bytes += new_len - memory.len;
|
||||
}
|
||||
self.resize_index += 1;
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
fn free(
|
||||
ctx: *anyopaque,
|
||||
old_mem: []u8,
|
||||
log2_old_align: u8,
|
||||
alignment: mem.Alignment,
|
||||
ra: usize,
|
||||
) void {
|
||||
const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
|
||||
self.internal_allocator.rawFree(old_mem, log2_old_align, ra);
|
||||
self.internal_allocator.rawFree(old_mem, alignment, ra);
|
||||
self.deallocations += 1;
|
||||
self.freed_bytes += old_mem.len;
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user