mirror of
https://github.com/ziglang/zig.git
synced 2026-02-13 12:59:04 +00:00
new allocator interface
This commit is contained in:
parent
129a4fb251
commit
dc9648f868
@ -219,7 +219,8 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
if (better_capacity >= new_capacity) break;
|
||||
}
|
||||
|
||||
const new_memory = try self.allocator.realloc(self.allocatedSlice(), better_capacity);
|
||||
const new_memory = try self.allocator.reallocAtLeast(self.allocatedSlice(), better_capacity);
|
||||
assert(new_memory.len >= better_capacity);
|
||||
self.items.ptr = new_memory.ptr;
|
||||
self.capacity = new_memory.len;
|
||||
}
|
||||
@ -441,7 +442,8 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
if (better_capacity >= new_capacity) break;
|
||||
}
|
||||
|
||||
const new_memory = try allocator.realloc(self.allocatedSlice(), better_capacity);
|
||||
const new_memory = try allocator.reallocAtLeast(self.allocatedSlice(), better_capacity);
|
||||
assert(new_memory.len >= better_capacity);
|
||||
self.items.ptr = new_memory.ptr;
|
||||
self.capacity = new_memory.len;
|
||||
}
|
||||
|
||||
@ -232,6 +232,17 @@ pub extern "c" fn setuid(uid: c_uint) c_int;
|
||||
|
||||
pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?*c_void;
|
||||
pub extern "c" fn malloc(usize) ?*c_void;
|
||||
|
||||
pub usingnamespace switch (builtin.os.tag) {
|
||||
.linux, .freebsd, .kfreebsd, .netbsd, .openbsd => struct {
|
||||
pub extern "c" fn malloc_usable_size(?*const c_void) usize;
|
||||
},
|
||||
.macosx, .ios, .watchos, .tvos => struct {
|
||||
pub extern "c" fn malloc_size(?*const c_void) usize;
|
||||
},
|
||||
else => struct {},
|
||||
};
|
||||
|
||||
pub extern "c" fn realloc(?*c_void, usize) ?*c_void;
|
||||
pub extern "c" fn free(*c_void) void;
|
||||
pub extern "c" fn posix_memalign(memptr: **c_void, alignment: usize, size: usize) c_int;
|
||||
|
||||
602
lib/std/heap.zig
602
lib/std/heap.zig
@ -15,48 +15,88 @@ pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator;
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
|
||||
pub const c_allocator = &c_allocator_state;
|
||||
var c_allocator_state = Allocator{
|
||||
.reallocFn = cRealloc,
|
||||
.shrinkFn = cShrink,
|
||||
usingnamespace if (comptime @hasDecl(c, "malloc_size")) struct {
|
||||
pub const supports_malloc_size = true;
|
||||
pub const malloc_size = c.malloc_size;
|
||||
} else if (comptime @hasDecl(c, "malloc_usable_size")) struct {
|
||||
pub const supports_malloc_size = true;
|
||||
pub const malloc_size = c.malloc_usable_size;
|
||||
} else struct {
|
||||
pub const supports_malloc_size = false;
|
||||
};
|
||||
|
||||
fn cRealloc(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
assert(new_align <= @alignOf(c_longdouble));
|
||||
const old_ptr = if (old_mem.len == 0) null else @ptrCast(*c_void, old_mem.ptr);
|
||||
const buf = c.realloc(old_ptr, new_size) orelse return error.OutOfMemory;
|
||||
return @ptrCast([*]u8, buf)[0..new_size];
|
||||
pub const c_allocator = mem.getAllocatorPtr(&c_allocator_state);
|
||||
var c_allocator_state = Allocator{
|
||||
.allocFn = cAlloc,
|
||||
.resizeFn = cResize,
|
||||
};
|
||||
|
||||
fn cAlloc(self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 {
|
||||
assert(ptr_align <= @alignOf(c_longdouble));
|
||||
const ptr = @ptrCast([*]u8, c.malloc(len) orelse return error.OutOfMemory);
|
||||
if (len_align == 0) {
|
||||
return ptr[0..len];
|
||||
}
|
||||
const full_len = init: {
|
||||
if (comptime supports_malloc_size) {
|
||||
const s = malloc_size(ptr);
|
||||
assert(s >= len);
|
||||
break :init s;
|
||||
}
|
||||
break :init len;
|
||||
};
|
||||
return ptr[0..mem.alignBackwardAnyAlign(full_len, len_align)];
|
||||
}
|
||||
|
||||
fn cShrink(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
const old_ptr = @ptrCast(*c_void, old_mem.ptr);
|
||||
const buf = c.realloc(old_ptr, new_size) orelse return old_mem[0..new_size];
|
||||
return @ptrCast([*]u8, buf)[0..new_size];
|
||||
fn cResize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize {
|
||||
if (new_len == 0) {
|
||||
c.free(buf.ptr);
|
||||
return 0;
|
||||
}
|
||||
if (new_len <= buf.len) {
|
||||
return mem.alignAllocLen(buf.len, new_len, len_align);
|
||||
}
|
||||
if (comptime supports_malloc_size) {
|
||||
const full_len = malloc_size(buf.ptr);
|
||||
if (new_len <= full_len) {
|
||||
return mem.alignAllocLen(full_len, new_len, len_align);
|
||||
}
|
||||
}
|
||||
// TODO: could we still use realloc? are there any cases where we can guarantee that realloc won't move memory?
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
/// This allocator makes a syscall directly for every allocation and free.
|
||||
/// Thread-safe and lock-free.
|
||||
pub const page_allocator = if (std.Target.current.isWasm())
|
||||
&wasm_page_allocator_state
|
||||
mem.getAllocatorPtr(&wasm_page_allocator_state)
|
||||
else if (std.Target.current.os.tag == .freestanding)
|
||||
root.os.heap.page_allocator
|
||||
else
|
||||
&page_allocator_state;
|
||||
mem.getAllocatorPtr(&page_allocator_state);
|
||||
|
||||
var page_allocator_state = Allocator{
|
||||
.reallocFn = PageAllocator.realloc,
|
||||
.shrinkFn = PageAllocator.shrink,
|
||||
.allocFn = PageAllocator.alloc,
|
||||
.resizeFn = PageAllocator.resize,
|
||||
};
|
||||
var wasm_page_allocator_state = Allocator{
|
||||
.reallocFn = WasmPageAllocator.realloc,
|
||||
.shrinkFn = WasmPageAllocator.shrink,
|
||||
.allocFn = WasmPageAllocator.alloc,
|
||||
.resizeFn = WasmPageAllocator.resize,
|
||||
};
|
||||
|
||||
pub const direct_allocator = @compileError("deprecated; use std.heap.page_allocator");
|
||||
|
||||
/// Verifies that the adjusted length will still map to the full length
|
||||
pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
|
||||
const aligned_len = mem.alignAllocLen(full_len, len, len_align);
|
||||
assert(mem.alignForward(aligned_len, mem.page_size) == full_len);
|
||||
return aligned_len;
|
||||
}
|
||||
|
||||
const PageAllocator = struct {
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 {
|
||||
if (n == 0) return &[0]u8{};
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 {
|
||||
assert(n > 0);
|
||||
const alignedLen = mem.alignForward(n, mem.page_size);
|
||||
|
||||
if (builtin.os.tag == .windows) {
|
||||
const w = os.windows;
|
||||
@ -68,21 +108,21 @@ const PageAllocator = struct {
|
||||
// see https://devblogs.microsoft.com/oldnewthing/?p=42223
|
||||
const addr = w.VirtualAlloc(
|
||||
null,
|
||||
n,
|
||||
alignedLen,
|
||||
w.MEM_COMMIT | w.MEM_RESERVE,
|
||||
w.PAGE_READWRITE,
|
||||
) catch return error.OutOfMemory;
|
||||
|
||||
// If the allocation is sufficiently aligned, use it.
|
||||
if (@ptrToInt(addr) & (alignment - 1) == 0) {
|
||||
return @ptrCast([*]u8, addr)[0..n];
|
||||
return @ptrCast([*]u8, addr)[0..alignPageAllocLen(alignedLen, n, len_align)];
|
||||
}
|
||||
|
||||
// If it wasn't, actually do an explicitely aligned allocation.
|
||||
w.VirtualFree(addr, 0, w.MEM_RELEASE);
|
||||
const alloc_size = n + alignment;
|
||||
const alloc_size = n + alignment - mem.page_size;
|
||||
|
||||
const final_addr = while (true) {
|
||||
while (true) {
|
||||
// Reserve a range of memory large enough to find a sufficiently
|
||||
// aligned address.
|
||||
const reserved_addr = w.VirtualAlloc(
|
||||
@ -102,48 +142,50 @@ const PageAllocator = struct {
|
||||
// until it succeeds.
|
||||
const ptr = w.VirtualAlloc(
|
||||
@intToPtr(*c_void, aligned_addr),
|
||||
n,
|
||||
alignedLen,
|
||||
w.MEM_COMMIT | w.MEM_RESERVE,
|
||||
w.PAGE_READWRITE,
|
||||
) catch continue;
|
||||
|
||||
return @ptrCast([*]u8, ptr)[0..n];
|
||||
};
|
||||
|
||||
return @ptrCast([*]u8, final_addr)[0..n];
|
||||
return @ptrCast([*]u8, ptr)[0..alignPageAllocLen(alignedLen, n, len_align)];
|
||||
}
|
||||
}
|
||||
|
||||
const alloc_size = if (alignment <= mem.page_size) n else n + alignment;
|
||||
const maxDropLen = alignment - std.math.min(alignment, mem.page_size);
|
||||
const allocLen = if (maxDropLen <= alignedLen - n) alignedLen
|
||||
else mem.alignForward(alignedLen + maxDropLen, mem.page_size);
|
||||
const slice = os.mmap(
|
||||
null,
|
||||
mem.alignForward(alloc_size, mem.page_size),
|
||||
allocLen,
|
||||
os.PROT_READ | os.PROT_WRITE,
|
||||
os.MAP_PRIVATE | os.MAP_ANONYMOUS,
|
||||
-1,
|
||||
0,
|
||||
) catch return error.OutOfMemory;
|
||||
if (alloc_size == n) return slice[0..n];
|
||||
assert(mem.isAligned(@ptrToInt(slice.ptr), mem.page_size));
|
||||
|
||||
const aligned_addr = mem.alignForward(@ptrToInt(slice.ptr), alignment);
|
||||
|
||||
// Unmap the extra bytes that were only requested in order to guarantee
|
||||
// that the range of memory we were provided had a proper alignment in
|
||||
// it somewhere. The extra bytes could be at the beginning, or end, or both.
|
||||
const unused_start_len = aligned_addr - @ptrToInt(slice.ptr);
|
||||
if (unused_start_len != 0) {
|
||||
os.munmap(slice[0..unused_start_len]);
|
||||
}
|
||||
const aligned_end_addr = mem.alignForward(aligned_addr + n, mem.page_size);
|
||||
const unused_end_len = @ptrToInt(slice.ptr) + slice.len - aligned_end_addr;
|
||||
if (unused_end_len != 0) {
|
||||
os.munmap(@intToPtr([*]align(mem.page_size) u8, aligned_end_addr)[0..unused_end_len]);
|
||||
const dropLen = aligned_addr - @ptrToInt(slice.ptr);
|
||||
if (dropLen != 0) {
|
||||
os.munmap(slice[0..dropLen]);
|
||||
}
|
||||
|
||||
return @intToPtr([*]u8, aligned_addr)[0..n];
|
||||
// Unmap extra pages
|
||||
const alignedBufferLen = allocLen - dropLen;
|
||||
if (alignedBufferLen > alignedLen) {
|
||||
os.munmap(@alignCast(mem.page_size, @intToPtr([*]u8, aligned_addr))[alignedLen..alignedBufferLen]);
|
||||
}
|
||||
|
||||
return @intToPtr([*]u8, aligned_addr)[0..alignPageAllocLen(alignedLen, n, len_align)];
|
||||
}
|
||||
|
||||
fn shrink(allocator: *Allocator, old_mem_unaligned: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
const old_mem = @alignCast(mem.page_size, old_mem_unaligned);
|
||||
fn resize(allocator: *Allocator, buf_unaligned: []u8, new_size: usize, len_align: u29) Allocator.Error!usize {
|
||||
const new_size_aligned = mem.alignForward(new_size, mem.page_size);
|
||||
|
||||
if (builtin.os.tag == .windows) {
|
||||
const w = os.windows;
|
||||
if (new_size == 0) {
|
||||
@ -153,100 +195,45 @@ const PageAllocator = struct {
|
||||
// is reserved in the initial allocation call to VirtualAlloc."
|
||||
// So we can only use MEM_RELEASE when actually releasing the
|
||||
// whole allocation.
|
||||
w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE);
|
||||
} else {
|
||||
const base_addr = @ptrToInt(old_mem.ptr);
|
||||
const old_addr_end = base_addr + old_mem.len;
|
||||
const new_addr_end = base_addr + new_size;
|
||||
const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size);
|
||||
if (old_addr_end > new_addr_end_rounded) {
|
||||
w.VirtualFree(buf_unaligned.ptr, 0, w.MEM_RELEASE);
|
||||
return 0;
|
||||
}
|
||||
if (new_size < buf_unaligned.len) {
|
||||
const base_addr = @ptrToInt(buf_unaligned.ptr);
|
||||
const old_addr_end = base_addr + buf_unaligned.len;
|
||||
const new_addr_end = mem.alignForward(base_addr + new_size, mem.page_size);
|
||||
if (old_addr_end > new_addr_end) {
|
||||
// For shrinking that is not releasing, we will only
|
||||
// decommit the pages not needed anymore.
|
||||
w.VirtualFree(
|
||||
@intToPtr(*c_void, new_addr_end_rounded),
|
||||
old_addr_end - new_addr_end_rounded,
|
||||
@intToPtr(*c_void, new_addr_end),
|
||||
old_addr_end - new_addr_end,
|
||||
w.MEM_DECOMMIT,
|
||||
);
|
||||
}
|
||||
return alignPageAllocLen(new_size_aligned, new_size, len_align);
|
||||
}
|
||||
return old_mem[0..new_size];
|
||||
}
|
||||
const base_addr = @ptrToInt(old_mem.ptr);
|
||||
const old_addr_end = base_addr + old_mem.len;
|
||||
const new_addr_end = base_addr + new_size;
|
||||
const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size);
|
||||
if (old_addr_end > new_addr_end_rounded) {
|
||||
const ptr = @intToPtr([*]align(mem.page_size) u8, new_addr_end_rounded);
|
||||
os.munmap(ptr[0 .. old_addr_end - new_addr_end_rounded]);
|
||||
}
|
||||
return old_mem[0..new_size];
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem_unaligned: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
const old_mem = @alignCast(mem.page_size, old_mem_unaligned);
|
||||
if (builtin.os.tag == .windows) {
|
||||
if (old_mem.len == 0) {
|
||||
return alloc(allocator, new_size, new_align);
|
||||
if (new_size == buf_unaligned.len) {
|
||||
return alignPageAllocLen(new_size_aligned, new_size, len_align);
|
||||
}
|
||||
|
||||
if (new_size <= old_mem.len and new_align <= old_align) {
|
||||
return shrink(allocator, old_mem, old_align, new_size, new_align);
|
||||
}
|
||||
|
||||
const w = os.windows;
|
||||
const base_addr = @ptrToInt(old_mem.ptr);
|
||||
|
||||
if (new_align > old_align and base_addr & (new_align - 1) != 0) {
|
||||
// Current allocation doesn't satisfy the new alignment.
|
||||
// For now we'll do a new one no matter what, but maybe
|
||||
// there is something smarter to do instead.
|
||||
const result = try alloc(allocator, new_size, new_align);
|
||||
assert(old_mem.len != 0);
|
||||
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
|
||||
w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
const old_addr_end = base_addr + old_mem.len;
|
||||
const old_addr_end_rounded = mem.alignForward(old_addr_end, mem.page_size);
|
||||
const new_addr_end = base_addr + new_size;
|
||||
const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size);
|
||||
if (new_addr_end_rounded == old_addr_end_rounded) {
|
||||
// The reallocation fits in the already allocated pages.
|
||||
return @ptrCast([*]u8, old_mem.ptr)[0..new_size];
|
||||
}
|
||||
assert(new_addr_end_rounded > old_addr_end_rounded);
|
||||
|
||||
// We need to commit new pages.
|
||||
const additional_size = new_addr_end - old_addr_end_rounded;
|
||||
const realloc_addr = w.kernel32.VirtualAlloc(
|
||||
@intToPtr(*c_void, old_addr_end_rounded),
|
||||
additional_size,
|
||||
w.MEM_COMMIT | w.MEM_RESERVE,
|
||||
w.PAGE_READWRITE,
|
||||
) orelse {
|
||||
// Committing new pages at the end of the existing allocation
|
||||
// failed, we need to try a new one.
|
||||
const new_alloc_mem = try alloc(allocator, new_size, new_align);
|
||||
@memcpy(new_alloc_mem.ptr, old_mem.ptr, old_mem.len);
|
||||
w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE);
|
||||
|
||||
return new_alloc_mem;
|
||||
};
|
||||
|
||||
assert(@ptrToInt(realloc_addr) == old_addr_end_rounded);
|
||||
return @ptrCast([*]u8, old_mem.ptr)[0..new_size];
|
||||
// new_size > buf_unaligned.len not implemented
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
if (new_size <= old_mem.len and new_align <= old_align) {
|
||||
return shrink(allocator, old_mem, old_align, new_size, new_align);
|
||||
|
||||
const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size);
|
||||
if (new_size_aligned == buf_aligned_len)
|
||||
return alignPageAllocLen(new_size_aligned, new_size, len_align);
|
||||
|
||||
if (new_size_aligned < buf_aligned_len) {
|
||||
const ptr = @intToPtr([*]align(mem.page_size) u8, @ptrToInt(buf_unaligned.ptr) + new_size_aligned);
|
||||
os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]);
|
||||
if (new_size_aligned == 0)
|
||||
return 0;
|
||||
return alignPageAllocLen(new_size_aligned, new_size, len_align);
|
||||
}
|
||||
const result = try alloc(allocator, new_size, new_align);
|
||||
if (old_mem.len != 0) {
|
||||
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
|
||||
os.munmap(old_mem);
|
||||
}
|
||||
return result;
|
||||
|
||||
// TODO: call mremap
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
};
|
||||
|
||||
@ -338,16 +325,24 @@ const WasmPageAllocator = struct {
|
||||
}
|
||||
|
||||
fn nPages(memsize: usize) usize {
|
||||
return std.mem.alignForward(memsize, std.mem.page_size) / std.mem.page_size;
|
||||
return mem.alignForward(memsize, mem.page_size) / mem.page_size;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, page_count: usize, alignment: u29) error{OutOfMemory}!usize {
|
||||
var idx = conventional.useRecycled(page_count);
|
||||
if (idx != FreeBlock.not_found) {
|
||||
return idx;
|
||||
fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 {
|
||||
const page_count = nPages(len);
|
||||
const page_idx = try allocPages(page_count);
|
||||
return @intToPtr([*]u8, page_idx * mem.page_size)
|
||||
[0..alignPageAllocLen(page_count * mem.page_size, len, len_align)];
|
||||
}
|
||||
fn allocPages(page_count: usize) !usize {
|
||||
{
|
||||
const idx = conventional.useRecycled(page_count);
|
||||
if (idx != FreeBlock.not_found) {
|
||||
return idx;
|
||||
}
|
||||
}
|
||||
|
||||
idx = extended.useRecycled(page_count);
|
||||
const idx = extended.useRecycled(page_count);
|
||||
if (idx != FreeBlock.not_found) {
|
||||
return idx + extendedOffset();
|
||||
}
|
||||
@ -360,51 +355,36 @@ const WasmPageAllocator = struct {
|
||||
return @intCast(usize, prev_page_count);
|
||||
}
|
||||
|
||||
pub fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) Allocator.Error![]u8 {
|
||||
if (new_align > std.mem.page_size) {
|
||||
return error.OutOfMemory;
|
||||
fn freePages(start: usize, end: usize) void {
|
||||
if (start < extendedOffset()) {
|
||||
conventional.recycle(start, std.math.min(extendedOffset(), end) - start);
|
||||
}
|
||||
if (end > extendedOffset()) {
|
||||
var new_end = end;
|
||||
if (!extended.isInitialized()) {
|
||||
// Steal the last page from the memory currently being recycled
|
||||
// TODO: would it be better if we use the first page instead?
|
||||
new_end -= 1;
|
||||
|
||||
if (nPages(new_size) == nPages(old_mem.len)) {
|
||||
return old_mem.ptr[0..new_size];
|
||||
} else if (new_size < old_mem.len) {
|
||||
return shrink(allocator, old_mem, old_align, new_size, new_align);
|
||||
} else {
|
||||
const page_idx = try alloc(allocator, nPages(new_size), new_align);
|
||||
const new_mem = @intToPtr([*]u8, page_idx * std.mem.page_size)[0..new_size];
|
||||
std.mem.copy(u8, new_mem, old_mem);
|
||||
_ = shrink(allocator, old_mem, old_align, 0, 0);
|
||||
return new_mem;
|
||||
extended.data = @intToPtr([*]u128, new_end * mem.page_size)[0 .. mem.page_size / @sizeOf(u128)];
|
||||
// Since this is the first page being freed and we consume it, assume *nothing* is free.
|
||||
mem.set(u128, extended.data, PageStatus.none_free);
|
||||
}
|
||||
const clamped_start = std.math.max(extendedOffset(), start);
|
||||
extended.recycle(clamped_start - extendedOffset(), new_end - clamped_start);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
@setCold(true);
|
||||
const free_start = nPages(@ptrToInt(old_mem.ptr) + new_size);
|
||||
var free_end = nPages(@ptrToInt(old_mem.ptr) + old_mem.len);
|
||||
|
||||
if (free_end > free_start) {
|
||||
if (free_start < extendedOffset()) {
|
||||
const clamped_end = std.math.min(extendedOffset(), free_end);
|
||||
conventional.recycle(free_start, clamped_end - free_start);
|
||||
}
|
||||
|
||||
if (free_end > extendedOffset()) {
|
||||
if (!extended.isInitialized()) {
|
||||
// Steal the last page from the memory currently being recycled
|
||||
// TODO: would it be better if we use the first page instead?
|
||||
free_end -= 1;
|
||||
|
||||
extended.data = @intToPtr([*]u128, free_end * std.mem.page_size)[0 .. std.mem.page_size / @sizeOf(u128)];
|
||||
// Since this is the first page being freed and we consume it, assume *nothing* is free.
|
||||
std.mem.set(u128, extended.data, PageStatus.none_free);
|
||||
}
|
||||
const clamped_start = std.math.max(extendedOffset(), free_start);
|
||||
extended.recycle(clamped_start - extendedOffset(), free_end - clamped_start);
|
||||
}
|
||||
fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize {
|
||||
const aligned_len = mem.alignForward(buf.len, mem.page_size);
|
||||
if (new_len > aligned_len) return error.OutOfMemory;
|
||||
const current_n = nPages(aligned_len);
|
||||
const new_n = nPages(new_len);
|
||||
if (new_n != current_n) {
|
||||
const base = nPages(@ptrToInt(buf.ptr));
|
||||
freePages(base + new_n, base + current_n);
|
||||
}
|
||||
|
||||
return old_mem[0..new_size];
|
||||
return if (new_len == 0) 0 else alignPageAllocLen(new_n * mem.page_size, new_len, len_align);
|
||||
}
|
||||
};
|
||||
|
||||
@ -418,8 +398,8 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
pub fn init() HeapAllocator {
|
||||
return HeapAllocator{
|
||||
.allocator = Allocator{
|
||||
.reallocFn = realloc,
|
||||
.shrinkFn = shrink,
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
.heap_handle = null,
|
||||
};
|
||||
@ -431,11 +411,14 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
}
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 {
|
||||
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
|
||||
if (n == 0) return &[0]u8{};
|
||||
fn getRecordPtr(buf: []u8) *align(1) usize {
|
||||
return @intToPtr(*align(1) usize, @ptrToInt(buf.ptr) + buf.len);
|
||||
}
|
||||
|
||||
const amt = n + alignment + @sizeOf(usize);
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
|
||||
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
|
||||
|
||||
const amt = n + ptr_align - 1 + @sizeOf(usize);
|
||||
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, builtin.AtomicOrder.SeqCst);
|
||||
const heap_handle = optional_heap_handle orelse blk: {
|
||||
const options = if (builtin.single_threaded) os.windows.HEAP_NO_SERIALIZE else 0;
|
||||
@ -446,66 +429,60 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
};
|
||||
const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return error.OutOfMemory;
|
||||
const root_addr = @ptrToInt(ptr);
|
||||
const adjusted_addr = mem.alignForward(root_addr, alignment);
|
||||
const record_addr = adjusted_addr + n;
|
||||
@intToPtr(*align(1) usize, record_addr).* = root_addr;
|
||||
return @intToPtr([*]u8, adjusted_addr)[0..n];
|
||||
}
|
||||
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
return realloc(allocator, old_mem, old_align, new_size, new_align) catch {
|
||||
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
|
||||
const old_record_addr = old_adjusted_addr + old_mem.len;
|
||||
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
|
||||
const old_ptr = @intToPtr(*c_void, root_addr);
|
||||
const new_record_addr = old_record_addr - new_size + old_mem.len;
|
||||
@intToPtr(*align(1) usize, new_record_addr).* = root_addr;
|
||||
return old_mem[0..new_size];
|
||||
const aligned_addr = mem.alignForward(root_addr, ptr_align);
|
||||
const return_len = init: {
|
||||
if (len_align == 0) break :init n;
|
||||
const full_len = os.windows.kernel32.HeapSize(heap_handle, 0, ptr);
|
||||
assert(full_len != std.math.maxInt(usize));
|
||||
assert(full_len >= amt);
|
||||
break :init mem.alignBackwardAnyAlign(full_len - (aligned_addr - root_addr), len_align);
|
||||
};
|
||||
const buf = @intToPtr([*]u8, aligned_addr)[0..return_len];
|
||||
getRecordPtr(buf).* = root_addr;
|
||||
return buf;
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
if (old_mem.len == 0) return alloc(allocator, new_size, new_align);
|
||||
|
||||
fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) error{OutOfMemory}!usize {
|
||||
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
|
||||
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
|
||||
const old_record_addr = old_adjusted_addr + old_mem.len;
|
||||
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
|
||||
const old_ptr = @intToPtr(*c_void, root_addr);
|
||||
|
||||
if (new_size == 0) {
|
||||
os.windows.HeapFree(self.heap_handle.?, 0, old_ptr);
|
||||
return old_mem[0..0];
|
||||
os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void ,getRecordPtr(buf).*));
|
||||
return 0;
|
||||
}
|
||||
|
||||
const amt = new_size + new_align + @sizeOf(usize);
|
||||
const root_addr = getRecordPtr(buf).*;
|
||||
const align_offset = @ptrToInt(buf.ptr) - root_addr;
|
||||
const amt = align_offset + new_size + @sizeOf(usize);
|
||||
const new_ptr = os.windows.kernel32.HeapReAlloc(
|
||||
self.heap_handle.?,
|
||||
0,
|
||||
old_ptr,
|
||||
os.windows.HEAP_REALLOC_IN_PLACE_ONLY,
|
||||
@intToPtr(*c_void, root_addr),
|
||||
amt,
|
||||
) orelse return error.OutOfMemory;
|
||||
const offset = old_adjusted_addr - root_addr;
|
||||
const new_root_addr = @ptrToInt(new_ptr);
|
||||
var new_adjusted_addr = new_root_addr + offset;
|
||||
const offset_is_valid = new_adjusted_addr + new_size + @sizeOf(usize) <= new_root_addr + amt;
|
||||
const offset_is_aligned = new_adjusted_addr % new_align == 0;
|
||||
if (!offset_is_valid or !offset_is_aligned) {
|
||||
// If HeapReAlloc didn't happen to move the memory to the new alignment,
|
||||
// or the memory starting at the old offset would be outside of the new allocation,
|
||||
// then we need to copy the memory to a valid aligned address and use that
|
||||
const new_aligned_addr = mem.alignForward(new_root_addr, new_align);
|
||||
@memcpy(@intToPtr([*]u8, new_aligned_addr), @intToPtr([*]u8, new_adjusted_addr), std.math.min(old_mem.len, new_size));
|
||||
new_adjusted_addr = new_aligned_addr;
|
||||
}
|
||||
const new_record_addr = new_adjusted_addr + new_size;
|
||||
@intToPtr(*align(1) usize, new_record_addr).* = new_root_addr;
|
||||
return @intToPtr([*]u8, new_adjusted_addr)[0..new_size];
|
||||
assert(new_ptr == @intToPtr(*c_void, root_addr));
|
||||
const return_len = init: {
|
||||
if (len_align == 0) break :init new_size;
|
||||
const full_len = os.windows.kernel32.HeapSize(self.heap_handle.?, 0, new_ptr);
|
||||
assert(full_len != std.math.maxInt(usize));
|
||||
assert(full_len >= amt);
|
||||
break :init mem.alignBackwardAnyAlign(full_len - align_offset, len_align);
|
||||
};
|
||||
getRecordPtr(buf.ptr[0..return_len]).* = root_addr;
|
||||
return return_len;
|
||||
}
|
||||
},
|
||||
else => @compileError("Unsupported OS"),
|
||||
};
|
||||
|
||||
fn sliceContainsPtr(container: []u8, ptr: [*]u8) bool {
|
||||
return @ptrToInt(ptr) >= @ptrToInt(container.ptr) and
|
||||
@ptrToInt(ptr) < (@ptrToInt(container.ptr) + container.len);
|
||||
}
|
||||
|
||||
fn sliceContainsSlice(container: []u8, slice: []u8) bool {
|
||||
return @ptrToInt(slice.ptr) >= @ptrToInt(container.ptr) and
|
||||
(@ptrToInt(slice.ptr) + slice.len) <= (@ptrToInt(container.ptr) + container.len);
|
||||
}
|
||||
|
||||
pub const FixedBufferAllocator = struct {
|
||||
allocator: Allocator,
|
||||
end_index: usize,
|
||||
@ -514,19 +491,33 @@ pub const FixedBufferAllocator = struct {
|
||||
pub fn init(buffer: []u8) FixedBufferAllocator {
|
||||
return FixedBufferAllocator{
|
||||
.allocator = Allocator{
|
||||
.reallocFn = realloc,
|
||||
.shrinkFn = shrink,
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
.buffer = buffer,
|
||||
.end_index = 0,
|
||||
};
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
|
||||
pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool {
|
||||
return sliceContainsPtr(self.buffer, ptr);
|
||||
}
|
||||
|
||||
pub fn ownsSlice(self: *FixedBufferAllocator, slice: []u8) bool {
|
||||
return sliceContainsSlice(self.buffer, slice);
|
||||
}
|
||||
|
||||
// NOTE: this will not work in all cases, if the last allocation had an adjusted_index
|
||||
// then we won't be able to determine what the last allocation was. This is because
|
||||
// the alignForward operation done in alloc is not reverisible.
|
||||
pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool {
|
||||
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 {
|
||||
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
|
||||
const addr = @ptrToInt(self.buffer.ptr) + self.end_index;
|
||||
const adjusted_addr = mem.alignForward(addr, alignment);
|
||||
const adjusted_index = self.end_index + (adjusted_addr - addr);
|
||||
const aligned_addr = mem.alignForward(@ptrToInt(self.buffer.ptr) + self.end_index, ptr_align);
|
||||
const adjusted_index = aligned_addr - @ptrToInt(self.buffer.ptr);
|
||||
const new_end_index = adjusted_index + n;
|
||||
if (new_end_index > self.buffer.len) {
|
||||
return error.OutOfMemory;
|
||||
@ -534,33 +525,32 @@ pub const FixedBufferAllocator = struct {
|
||||
const result = self.buffer[adjusted_index..new_end_index];
|
||||
self.end_index = new_end_index;
|
||||
|
||||
return result;
|
||||
return result[0..mem.alignAllocLen(result.len, n, len_align)];
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) Allocator.Error!usize {
|
||||
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
|
||||
assert(old_mem.len <= self.end_index);
|
||||
if (old_mem.ptr == self.buffer.ptr + self.end_index - old_mem.len and
|
||||
mem.alignForward(@ptrToInt(old_mem.ptr), new_align) == @ptrToInt(old_mem.ptr))
|
||||
{
|
||||
const start_index = self.end_index - old_mem.len;
|
||||
const new_end_index = start_index + new_size;
|
||||
if (new_end_index > self.buffer.len) return error.OutOfMemory;
|
||||
const result = self.buffer[start_index..new_end_index];
|
||||
self.end_index = new_end_index;
|
||||
return result;
|
||||
} else if (new_size <= old_mem.len and new_align <= old_align) {
|
||||
// We can't do anything with the memory, so tell the client to keep it.
|
||||
return error.OutOfMemory;
|
||||
} else {
|
||||
const result = try alloc(allocator, new_size, new_align);
|
||||
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
|
||||
return result;
|
||||
}
|
||||
}
|
||||
assert(self.ownsSlice(buf)); // sanity check
|
||||
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
return old_mem[0..new_size];
|
||||
if (!self.isLastAllocation(buf)) {
|
||||
if (new_size > buf.len)
|
||||
return error.OutOfMemory;
|
||||
return if (new_size == 0) 0 else mem.alignAllocLen(buf.len, new_size, len_align);
|
||||
}
|
||||
|
||||
if (new_size <= buf.len) {
|
||||
const sub = buf.len - new_size;
|
||||
self.end_index -= sub;
|
||||
return if (new_size == 0) 0 else mem.alignAllocLen(buf.len - sub, new_size, len_align);
|
||||
}
|
||||
|
||||
var add = new_size - buf.len;
|
||||
if (add + self.end_index > self.buffer.len) {
|
||||
//add = self.buffer.len - self.end_index;
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
self.end_index += add;
|
||||
return mem.alignAllocLen(buf.len + add, new_size, len_align);
|
||||
}
|
||||
|
||||
pub fn reset(self: *FixedBufferAllocator) void {
|
||||
@ -581,20 +571,20 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
|
||||
pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator {
|
||||
return ThreadSafeFixedBufferAllocator{
|
||||
.allocator = Allocator{
|
||||
.reallocFn = realloc,
|
||||
.shrinkFn = shrink,
|
||||
.allocFn = alloc,
|
||||
.resizeFn = Allocator.noResize,
|
||||
},
|
||||
.buffer = buffer,
|
||||
.end_index = 0,
|
||||
};
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 {
|
||||
const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
|
||||
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
|
||||
while (true) {
|
||||
const addr = @ptrToInt(self.buffer.ptr) + end_index;
|
||||
const adjusted_addr = mem.alignForward(addr, alignment);
|
||||
const adjusted_addr = mem.alignForward(addr, ptr_align);
|
||||
const adjusted_index = end_index + (adjusted_addr - addr);
|
||||
const new_end_index = adjusted_index + n;
|
||||
if (new_end_index > self.buffer.len) {
|
||||
@ -604,21 +594,6 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
|
||||
}
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
if (new_size <= old_mem.len and new_align <= old_align) {
|
||||
// We can't do anything useful with the memory, tell the client to keep it.
|
||||
return error.OutOfMemory;
|
||||
} else {
|
||||
const result = try alloc(allocator, new_size, new_align);
|
||||
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
return old_mem[0..new_size];
|
||||
}
|
||||
|
||||
pub fn reset(self: *ThreadSafeFixedBufferAllocator) void {
|
||||
self.end_index = 0;
|
||||
}
|
||||
@ -632,8 +607,8 @@ pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) Stack
|
||||
.fallback_allocator = fallback_allocator,
|
||||
.fixed_buffer_allocator = undefined,
|
||||
.allocator = Allocator{
|
||||
.reallocFn = StackFallbackAllocator(size).realloc,
|
||||
.shrinkFn = StackFallbackAllocator(size).shrink,
|
||||
.allocFn = StackFallbackAllocator(size).realloc,
|
||||
.resizeFn = StackFallbackAllocator(size).resize,
|
||||
},
|
||||
};
|
||||
}
|
||||
@ -652,58 +627,19 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
|
||||
return &self.allocator;
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![*]u8 {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and
|
||||
@ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
|
||||
if (in_buffer) {
|
||||
return FixedBufferAllocator.realloc(
|
||||
&self.fixed_buffer_allocator.allocator,
|
||||
old_mem,
|
||||
old_align,
|
||||
new_size,
|
||||
new_align,
|
||||
) catch {
|
||||
const result = try self.fallback_allocator.reallocFn(
|
||||
self.fallback_allocator,
|
||||
&[0]u8{},
|
||||
undefined,
|
||||
new_size,
|
||||
new_align,
|
||||
);
|
||||
mem.copy(u8, result, old_mem);
|
||||
return result;
|
||||
};
|
||||
}
|
||||
return self.fallback_allocator.reallocFn(
|
||||
self.fallback_allocator,
|
||||
old_mem,
|
||||
old_align,
|
||||
new_size,
|
||||
new_align,
|
||||
);
|
||||
return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align) catch
|
||||
return fallback_allocator.alloc(len, ptr_align);
|
||||
}
|
||||
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
fn resize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!void {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and
|
||||
@ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
|
||||
if (in_buffer) {
|
||||
return FixedBufferAllocator.shrink(
|
||||
&self.fixed_buffer_allocator.allocator,
|
||||
old_mem,
|
||||
old_align,
|
||||
new_size,
|
||||
new_align,
|
||||
);
|
||||
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
|
||||
try self.fixed_buffer_allocator.callResizeFn(buf, new_len);
|
||||
} else {
|
||||
try self.fallback_allocator.callResizeFn(buf, new_len);
|
||||
}
|
||||
return self.fallback_allocator.shrinkFn(
|
||||
self.fallback_allocator,
|
||||
old_mem,
|
||||
old_align,
|
||||
new_size,
|
||||
new_align,
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -718,8 +654,8 @@ test "c_allocator" {
|
||||
|
||||
test "WasmPageAllocator internals" {
|
||||
if (comptime std.Target.current.isWasm()) {
|
||||
const conventional_memsize = WasmPageAllocator.conventional.totalPages() * std.mem.page_size;
|
||||
const initial = try page_allocator.alloc(u8, std.mem.page_size);
|
||||
const conventional_memsize = WasmPageAllocator.conventional.totalPages() * mem.page_size;
|
||||
const initial = try page_allocator.alloc(u8, mem.page_size);
|
||||
std.debug.assert(@ptrToInt(initial.ptr) < conventional_memsize); // If this isn't conventional, the rest of these tests don't make sense. Also we have a serious memory leak in the test suite.
|
||||
|
||||
var inplace = try page_allocator.realloc(initial, 1);
|
||||
@ -799,7 +735,7 @@ test "ArenaAllocator" {
|
||||
|
||||
var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined;
|
||||
test "FixedBufferAllocator" {
|
||||
var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
|
||||
var fixed_buffer_allocator = mem.sanityWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]));
|
||||
|
||||
try testAllocator(&fixed_buffer_allocator.allocator);
|
||||
try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16);
|
||||
@ -865,7 +801,10 @@ test "ThreadSafeFixedBufferAllocator" {
|
||||
try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator);
|
||||
}
|
||||
|
||||
fn testAllocator(allocator: *mem.Allocator) !void {
|
||||
fn testAllocator(base_allocator: *mem.Allocator) !void {
|
||||
var sanityAllocator = mem.sanityWrap(base_allocator);
|
||||
const allocator = &sanityAllocator.allocator;
|
||||
|
||||
var slice = try allocator.alloc(*i32, 100);
|
||||
testing.expect(slice.len == 100);
|
||||
for (slice) |*item, i| {
|
||||
@ -893,7 +832,10 @@ fn testAllocator(allocator: *mem.Allocator) !void {
|
||||
allocator.free(slice);
|
||||
}
|
||||
|
||||
fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !void {
|
||||
fn testAllocatorAligned(base_allocator: *mem.Allocator, comptime alignment: u29) !void {
|
||||
var sanityAllocator = mem.sanityWrap(base_allocator);
|
||||
const allocator = &sanityAllocator.allocator;
|
||||
|
||||
// initial
|
||||
var slice = try allocator.alignedAlloc(u8, alignment, 10);
|
||||
testing.expect(slice.len == 10);
|
||||
@ -917,7 +859,10 @@ fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !voi
|
||||
testing.expect(slice.len == 0);
|
||||
}
|
||||
|
||||
fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void {
|
||||
fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) mem.Allocator.Error!void {
|
||||
var sanityAllocator = mem.sanityWrap(base_allocator);
|
||||
const allocator = &sanityAllocator.allocator;
|
||||
|
||||
//Maybe a platform's page_size is actually the same as or
|
||||
// very near usize?
|
||||
if (mem.page_size << 2 > maxInt(usize)) return;
|
||||
@ -946,7 +891,10 @@ fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!vo
|
||||
allocator.free(slice);
|
||||
}
|
||||
|
||||
fn testAllocatorAlignedShrink(allocator: *mem.Allocator) mem.Allocator.Error!void {
|
||||
fn testAllocatorAlignedShrink(base_allocator: *mem.Allocator) mem.Allocator.Error!void {
|
||||
var sanityAllocator = mem.sanityWrap(base_allocator);
|
||||
const allocator = &sanityAllocator.allocator;
|
||||
|
||||
var debug_buffer: [1000]u8 = undefined;
|
||||
const debug_allocator = &FixedBufferAllocator.init(&debug_buffer).allocator;
|
||||
|
||||
|
||||
@ -20,8 +20,8 @@ pub const ArenaAllocator = struct {
|
||||
pub fn promote(self: State, child_allocator: *Allocator) ArenaAllocator {
|
||||
return .{
|
||||
.allocator = Allocator{
|
||||
.reallocFn = realloc,
|
||||
.shrinkFn = shrink,
|
||||
.allocFn = alloc,
|
||||
.resizeFn = Allocator.noResize,
|
||||
},
|
||||
.child_allocator = child_allocator,
|
||||
.state = self,
|
||||
@ -61,38 +61,23 @@ pub const ArenaAllocator = struct {
|
||||
return buf_node;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 {
|
||||
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
|
||||
|
||||
var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + alignment);
|
||||
var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + ptr_align);
|
||||
while (true) {
|
||||
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
|
||||
const addr = @ptrToInt(cur_buf.ptr) + self.state.end_index;
|
||||
const adjusted_addr = mem.alignForward(addr, alignment);
|
||||
const adjusted_addr = mem.alignForward(addr, ptr_align);
|
||||
const adjusted_index = self.state.end_index + (adjusted_addr - addr);
|
||||
const new_end_index = adjusted_index + n;
|
||||
if (new_end_index > cur_buf.len) {
|
||||
cur_node = try self.createNode(cur_buf.len, n + alignment);
|
||||
cur_node = try self.createNode(cur_buf.len, n + ptr_align);
|
||||
continue;
|
||||
}
|
||||
const result = cur_buf[adjusted_index..new_end_index];
|
||||
self.state.end_index = new_end_index;
|
||||
return result;
|
||||
return result[0..mem.alignAllocLen(result.len, n, len_align)];
|
||||
}
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
if (new_size <= old_mem.len and new_align <= new_size) {
|
||||
// We can't do anything with the memory, so tell the client to keep it.
|
||||
return error.OutOfMemory;
|
||||
} else {
|
||||
const result = try alloc(allocator, new_size, new_align);
|
||||
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
return old_mem[0..new_size];
|
||||
}
|
||||
};
|
||||
|
||||
@ -15,39 +15,45 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type {
|
||||
pub fn init(parent_allocator: *Allocator, out_stream: OutStreamType) Self {
|
||||
return Self{
|
||||
.allocator = Allocator{
|
||||
.reallocFn = realloc,
|
||||
.shrinkFn = shrink,
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
.parent_allocator = parent_allocator,
|
||||
.out_stream = out_stream,
|
||||
};
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
if (old_mem.len == 0) {
|
||||
self.out_stream.print("allocation of {} ", .{new_size}) catch {};
|
||||
} else {
|
||||
self.out_stream.print("resize from {} to {} ", .{ old_mem.len, new_size }) catch {};
|
||||
}
|
||||
const result = self.parent_allocator.reallocFn(self.parent_allocator, old_mem, old_align, new_size, new_align);
|
||||
self.out_stream.print("alloc : {}", .{len}) catch {};
|
||||
const result = self.parent_allocator.callAllocFn(len, ptr_align, len_align);
|
||||
if (result) |buff| {
|
||||
self.out_stream.print("success!\n", .{}) catch {};
|
||||
self.out_stream.print(" success!\n", .{}) catch {};
|
||||
} else |err| {
|
||||
self.out_stream.print("failure!\n", .{}) catch {};
|
||||
self.out_stream.print(" failure!\n", .{}) catch {};
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
const result = self.parent_allocator.shrinkFn(self.parent_allocator, old_mem, old_align, new_size, new_align);
|
||||
if (new_size == 0) {
|
||||
self.out_stream.print("free of {} bytes success!\n", .{old_mem.len}) catch {};
|
||||
if (new_len == 0) {
|
||||
self.out_stream.print("free : {}\n", .{buf.len}) catch {};
|
||||
} else if (new_len <= buf.len) {
|
||||
self.out_stream.print("shrink: {} to {}\n", .{buf.len, new_len}) catch {};
|
||||
} else {
|
||||
self.out_stream.print("shrink from {} bytes to {} bytes success!\n", .{ old_mem.len, new_size }) catch {};
|
||||
self.out_stream.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
|
||||
}
|
||||
if (self.parent_allocator.callResizeFn(buf, new_len, len_align)) |resized_len| {
|
||||
if (new_len > buf.len) {
|
||||
self.out_stream.print(" success!\n", .{}) catch {};
|
||||
}
|
||||
return resized_len;
|
||||
} else |e| {
|
||||
std.debug.assert(new_len > buf.len);
|
||||
self.out_stream.print(" failure!\n", .{}) catch {};
|
||||
return e;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -60,17 +66,24 @@ pub fn loggingAllocator(
|
||||
}
|
||||
|
||||
test "LoggingAllocator" {
|
||||
var buf: [255]u8 = undefined;
|
||||
var fbs = std.io.fixedBufferStream(&buf);
|
||||
var log_buf: [255]u8 = undefined;
|
||||
var fbs = std.io.fixedBufferStream(&log_buf);
|
||||
|
||||
const allocator = &loggingAllocator(std.testing.allocator, fbs.outStream()).allocator;
|
||||
var allocator_buf: [10]u8 = undefined;
|
||||
var fixedBufferAllocator = std.mem.sanityWrap(std.heap.FixedBufferAllocator.init(&allocator_buf));
|
||||
const allocator = &loggingAllocator(&fixedBufferAllocator.allocator, fbs.outStream()).allocator;
|
||||
|
||||
const ptr = try allocator.alloc(u8, 10);
|
||||
allocator.free(ptr);
|
||||
var a = try allocator.alloc(u8, 10);
|
||||
a.len = allocator.shrinkBytes(a, 5, 0);
|
||||
std.debug.assert(a.len == 5);
|
||||
std.testing.expectError(error.OutOfMemory, allocator.callResizeFn(a, 20, 0));
|
||||
allocator.free(a);
|
||||
|
||||
std.testing.expectEqualSlices(u8,
|
||||
\\allocation of 10 success!
|
||||
\\free of 10 bytes success!
|
||||
\\alloc : 10 success!
|
||||
\\shrink: 10 to 5
|
||||
\\expand: 5 to 20 failure!
|
||||
\\free : 5
|
||||
\\
|
||||
, fbs.getWritten());
|
||||
}
|
||||
|
||||
323
lib/std/mem.zig
323
lib/std/mem.zig
@ -16,6 +16,52 @@ pub const page_size = switch (builtin.arch) {
|
||||
pub const Allocator = struct {
|
||||
pub const Error = error{OutOfMemory};
|
||||
|
||||
/// Attempt to allocate at least `len` bytes aligned to `ptr_align`.
|
||||
///
|
||||
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
|
||||
/// otherwise, the length must be aligned to `len_align`.
|
||||
///
|
||||
/// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
|
||||
allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Error![]u8,
|
||||
|
||||
/// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent
|
||||
/// length returned by `allocFn` or `resizeFn`.
|
||||
///
|
||||
/// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no
|
||||
/// longer be passed to `resizeFn`.
|
||||
///
|
||||
/// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`.
|
||||
/// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be
|
||||
/// unmodified and error.OutOfMemory MUST be returned.
|
||||
///
|
||||
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
|
||||
/// otherwise, the length must be aligned to `len_align`.
|
||||
///
|
||||
/// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
|
||||
resizeFn: fn (self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize,
|
||||
|
||||
pub fn callAllocFn(self: *Allocator, new_len: usize, alignment: u29, len_align: u29) Error![]u8 {
|
||||
return self.allocFn(self, new_len, alignment, len_align);
|
||||
}
|
||||
|
||||
pub fn callResizeFn(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize {
|
||||
return self.resizeFn(self, buf, new_len, len_align);
|
||||
}
|
||||
|
||||
/// Set to resizeFn if in-place resize is not supported.
|
||||
pub fn noResize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize {
|
||||
if (new_len > buf.len)
|
||||
return error.OutOfMemory;
|
||||
return new_len;
|
||||
}
|
||||
|
||||
/// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning
|
||||
/// error.OutOfMemory should be impossible.
|
||||
pub fn shrinkBytes(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) usize {
|
||||
assert(new_len <= buf.len);
|
||||
return self.callResizeFn(buf, new_len, len_align) catch unreachable;
|
||||
}
|
||||
|
||||
/// Realloc is used to modify the size or alignment of an existing allocation,
|
||||
/// as well as to provide the allocator with an opportunity to move an allocation
|
||||
/// to a better location.
|
||||
@ -24,7 +70,7 @@ pub const Allocator = struct {
|
||||
/// When the size/alignment is less than or equal to the previous allocation,
|
||||
/// this function returns `error.OutOfMemory` when the allocator decides the client
|
||||
/// would be better off keeping the extra alignment/size. Clients will call
|
||||
/// `shrinkFn` when they require the allocator to track a new alignment/size,
|
||||
/// `callResizeFn` when they require the allocator to track a new alignment/size,
|
||||
/// and so this function should only return success when the allocator considers
|
||||
/// the reallocation desirable from the allocator's perspective.
|
||||
/// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle
|
||||
@ -37,16 +83,15 @@ pub const Allocator = struct {
|
||||
/// as `old_mem` was when `reallocFn` is called. The bytes of
|
||||
/// `return_value[old_mem.len..]` have undefined values.
|
||||
/// The returned slice must have its pointer aligned at least to `new_alignment` bytes.
|
||||
reallocFn: fn (
|
||||
fn reallocBytes(
|
||||
self: *Allocator,
|
||||
/// Guaranteed to be the same as what was returned from most recent call to
|
||||
/// `reallocFn` or `shrinkFn`.
|
||||
/// `allocFn` or `resizeFn`.
|
||||
/// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
|
||||
/// is guaranteed to be >= 1.
|
||||
old_mem: []u8,
|
||||
/// If `old_mem.len == 0` then this is `undefined`, otherwise:
|
||||
/// Guaranteed to be the same as what was returned from most recent call to
|
||||
/// `reallocFn` or `shrinkFn`.
|
||||
/// Guaranteed to be the same as what was passed to `allocFn`.
|
||||
/// Guaranteed to be >= 1.
|
||||
/// Guaranteed to be a power of 2.
|
||||
old_alignment: u29,
|
||||
@ -57,23 +102,49 @@ pub const Allocator = struct {
|
||||
/// Guaranteed to be a power of 2.
|
||||
/// Returned slice's pointer must have this alignment.
|
||||
new_alignment: u29,
|
||||
) Error![]u8,
|
||||
/// 0 indicates the length of the slice returned MUST match `new_byte_count` exactly
|
||||
/// non-zero means the length of the returned slice must be aligned by `len_align`
|
||||
/// `new_len` must be aligned by `len_align`
|
||||
len_align: u29,
|
||||
) Error![]u8 {
|
||||
if (old_mem.len == 0) {
|
||||
const new_mem = try self.callAllocFn(new_byte_count, new_alignment, len_align);
|
||||
@memset(new_mem.ptr, undefined, new_byte_count);
|
||||
return new_mem;
|
||||
}
|
||||
|
||||
/// This function deallocates memory. It must succeed.
|
||||
shrinkFn: fn (
|
||||
self: *Allocator,
|
||||
/// Guaranteed to be the same as what was returned from most recent call to
|
||||
/// `reallocFn` or `shrinkFn`.
|
||||
old_mem: []u8,
|
||||
/// Guaranteed to be the same as what was returned from most recent call to
|
||||
/// `reallocFn` or `shrinkFn`.
|
||||
old_alignment: u29,
|
||||
/// Guaranteed to be less than or equal to `old_mem.len`.
|
||||
new_byte_count: usize,
|
||||
/// If `new_byte_count == 0` then this is `undefined`, otherwise:
|
||||
/// Guaranteed to be less than or equal to `old_alignment`.
|
||||
new_alignment: u29,
|
||||
) []u8,
|
||||
if (isAligned(@ptrToInt(old_mem.ptr), new_alignment)) {
|
||||
if (new_byte_count <= old_mem.len) {
|
||||
const shrunk_len = self.shrinkBytes(old_mem, new_byte_count, len_align);
|
||||
if (shrunk_len < old_mem.len) {
|
||||
@memset(old_mem.ptr + shrunk_len, undefined, old_mem.len - shrunk_len);
|
||||
}
|
||||
return old_mem.ptr[0..shrunk_len];
|
||||
}
|
||||
if (self.callResizeFn(old_mem, new_byte_count, len_align)) |resized_len| {
|
||||
assert(resized_len >= new_byte_count);
|
||||
@memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
|
||||
return old_mem.ptr[0..resized_len];
|
||||
} else |_| { }
|
||||
}
|
||||
if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
return self.moveBytes(old_mem, new_byte_count, new_alignment, len_align);
|
||||
}
|
||||
|
||||
/// Move the given memory to a new location in the given allocator to accomodate a new
|
||||
/// size and alignment.
|
||||
fn moveBytes(self: *Allocator, old_mem: []u8, new_len: usize, new_alignment: u29, len_align: u29) Error![]u8 {
|
||||
assert(old_mem.len > 0);
|
||||
assert(new_len > 0);
|
||||
const new_mem = try self.callAllocFn(new_len, new_alignment, len_align);
|
||||
@memcpy(new_mem.ptr, old_mem.ptr, std.math.min(new_len, old_mem.len));
|
||||
// DISABLED TO AVOID BUGS IN TRANSLATE C
|
||||
//@memset(old_mem.ptr, undefined, old_mem.len);
|
||||
_ = self.shrinkBytes(old_mem, 0, 0);
|
||||
return new_mem;
|
||||
}
|
||||
|
||||
/// Returns a pointer to undefined memory.
|
||||
/// Call `destroy` with the result to free the memory.
|
||||
@ -89,8 +160,7 @@ pub const Allocator = struct {
|
||||
const T = @TypeOf(ptr).Child;
|
||||
if (@sizeOf(T) == 0) return;
|
||||
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
|
||||
const shrink_result = self.shrinkFn(self, non_const_ptr[0..@sizeOf(T)], @alignOf(T), 0, 1);
|
||||
assert(shrink_result.len == 0);
|
||||
_ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], 0, 0);
|
||||
}
|
||||
|
||||
/// Allocates an array of `n` items of type `T` and sets all the
|
||||
@ -150,9 +220,21 @@ pub const Allocator = struct {
|
||||
/// null means naturally aligned
|
||||
comptime alignment: ?u29,
|
||||
n: usize,
|
||||
) Error![]align(alignment orelse @alignOf(T)) T {
|
||||
return self.alignedAlloc2(T, alignment, n, .exact);
|
||||
}
|
||||
|
||||
const Exact = enum {exact,atLeast};
|
||||
pub fn alignedAlloc2(
|
||||
self: *Allocator,
|
||||
comptime T: type,
|
||||
/// null means naturally aligned
|
||||
comptime alignment: ?u29,
|
||||
n: usize,
|
||||
exact: Exact,
|
||||
) Error![]align(alignment orelse @alignOf(T)) T {
|
||||
const a = if (alignment) |a| blk: {
|
||||
if (a == @alignOf(T)) return alignedAlloc(self, T, null, n);
|
||||
if (a == @alignOf(T)) return alignedAlloc2(self, T, null, n, exact);
|
||||
break :blk a;
|
||||
} else @alignOf(T);
|
||||
|
||||
@ -161,15 +243,16 @@ pub const Allocator = struct {
|
||||
}
|
||||
|
||||
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
|
||||
const byte_slice = try self.reallocFn(self, &[0]u8{}, undefined, byte_count, a);
|
||||
assert(byte_slice.len == byte_count);
|
||||
// TODO The `if (alignment == null)` blocks are workarounds for zig not being able to
|
||||
// access certain type information about T without creating a circular dependency in async
|
||||
// functions that heap-allocate their own frame with @Frame(func).
|
||||
const sizeOfT = if (alignment == null) @intCast(u29, @divExact(byte_count, n)) else @sizeOf(T);
|
||||
const byte_slice = try self.callAllocFn(byte_count, a, if (exact == .exact) @as(u29, 0) else sizeOfT);
|
||||
assert(if (exact == .exact) byte_slice.len == byte_count else byte_slice.len >= byte_count);
|
||||
@memset(byte_slice.ptr, undefined, byte_slice.len);
|
||||
if (alignment == null) {
|
||||
// TODO This is a workaround for zig not being able to successfully do
|
||||
// @bytesToSlice(T, @alignCast(a, byte_slice)) without resolving alignment of T,
|
||||
// which causes a circular dependency in async functions which try to heap-allocate
|
||||
// their own frame with @Frame(func).
|
||||
return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..n];
|
||||
// This if block is a workaround (see comment above)
|
||||
return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..@divExact(byte_slice.len, @sizeOf(T))];
|
||||
} else {
|
||||
return mem.bytesAsSlice(T, @alignCast(a, byte_slice));
|
||||
}
|
||||
@ -190,7 +273,15 @@ pub const Allocator = struct {
|
||||
break :t Error![]align(Slice.alignment) Slice.child;
|
||||
} {
|
||||
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
|
||||
return self.alignedRealloc(old_mem, old_alignment, new_n);
|
||||
return self.alignedRealloc2(old_mem, old_alignment, new_n, .exact);
|
||||
}
|
||||
|
||||
pub fn reallocAtLeast(self: *Allocator, old_mem: var, new_n: usize) t: {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
break :t Error![]align(Slice.alignment) Slice.child;
|
||||
} {
|
||||
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
|
||||
return self.alignedRealloc2(old_mem, old_alignment, new_n, .atLeast);
|
||||
}
|
||||
|
||||
/// This is the same as `realloc`, except caller may additionally request
|
||||
@ -201,11 +292,24 @@ pub const Allocator = struct {
|
||||
old_mem: var,
|
||||
comptime new_alignment: u29,
|
||||
new_n: usize,
|
||||
) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
|
||||
return self.alignedRealloc2(old_mem, new_alignment, new_n, .exact);
|
||||
}
|
||||
|
||||
/// This is the same as `realloc`, except caller may additionally request
|
||||
/// a new alignment, which can be larger, smaller, or the same as the old
|
||||
/// allocation.
|
||||
pub fn alignedRealloc2(
|
||||
self: *Allocator,
|
||||
old_mem: var,
|
||||
comptime new_alignment: u29,
|
||||
new_n: usize,
|
||||
exact: Exact,
|
||||
) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
const T = Slice.child;
|
||||
if (old_mem.len == 0) {
|
||||
return self.alignedAlloc(T, new_alignment, new_n);
|
||||
return self.alignedAlloc2(T, new_alignment, new_n, exact);
|
||||
}
|
||||
if (new_n == 0) {
|
||||
self.free(old_mem);
|
||||
@ -215,12 +319,9 @@ pub const Allocator = struct {
|
||||
const old_byte_slice = mem.sliceAsBytes(old_mem);
|
||||
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
|
||||
// Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
|
||||
const byte_slice = try self.reallocFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
|
||||
assert(byte_slice.len == byte_count);
|
||||
if (new_n > old_mem.len) {
|
||||
@memset(byte_slice.ptr + old_byte_slice.len, undefined, byte_slice.len - old_byte_slice.len);
|
||||
}
|
||||
return mem.bytesAsSlice(T, @alignCast(new_alignment, byte_slice));
|
||||
const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment,
|
||||
if (exact == .exact) @as(u29, 0) else @sizeOf(T));
|
||||
return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice));
|
||||
}
|
||||
|
||||
/// Prefer calling realloc to shrink if you can tolerate failure, such as
|
||||
@ -248,12 +349,9 @@ pub const Allocator = struct {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
const T = Slice.child;
|
||||
|
||||
if (new_n == 0) {
|
||||
self.free(old_mem);
|
||||
return old_mem[0..0];
|
||||
}
|
||||
|
||||
assert(new_n <= old_mem.len);
|
||||
if (new_n == old_mem.len)
|
||||
return old_mem;
|
||||
assert(new_n < old_mem.len);
|
||||
assert(new_alignment <= Slice.alignment);
|
||||
|
||||
// Here we skip the overflow checking on the multiplication because
|
||||
@ -262,9 +360,8 @@ pub const Allocator = struct {
|
||||
|
||||
const old_byte_slice = mem.sliceAsBytes(old_mem);
|
||||
@memset(old_byte_slice.ptr + byte_count, undefined, old_byte_slice.len - byte_count);
|
||||
const byte_slice = self.shrinkFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
|
||||
assert(byte_slice.len == byte_count);
|
||||
return mem.bytesAsSlice(T, @alignCast(new_alignment, byte_slice));
|
||||
_ = self.shrinkBytes(old_byte_slice, byte_count, 0);
|
||||
return old_mem[0..new_n];
|
||||
}
|
||||
|
||||
/// Free an array allocated with `alloc`. To free a single item,
|
||||
@ -276,8 +373,7 @@ pub const Allocator = struct {
|
||||
if (bytes_len == 0) return;
|
||||
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
|
||||
@memset(non_const_ptr, undefined, bytes_len);
|
||||
const shrink_result = self.shrinkFn(self, non_const_ptr[0..bytes_len], Slice.alignment, 0, 1);
|
||||
assert(shrink_result.len == 0);
|
||||
_ = self.shrinkBytes(non_const_ptr[0..bytes_len], 0, 0);
|
||||
}
|
||||
|
||||
/// Copies `m` to newly allocated memory. Caller owns the memory.
|
||||
@ -296,15 +392,106 @@ pub const Allocator = struct {
|
||||
}
|
||||
};
|
||||
|
||||
var failAllocator = Allocator{
|
||||
.reallocFn = failAllocatorRealloc,
|
||||
.shrinkFn = failAllocatorShrink,
|
||||
};
|
||||
fn failAllocatorRealloc(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
return error.OutOfMemory;
|
||||
/// Given a pointer to an allocator, return the *Allocator for it. `allocatorStatePtr` can
|
||||
/// either be a `*Allocator`, in which case it is returned as-is, otherwise, the address of
|
||||
/// the `allocator` field is returned.
|
||||
pub fn getAllocatorPtr(allocatorStatePtr: var) *Allocator {
|
||||
// allocator must be a pointer or else this function will return a copy of the allocator which
|
||||
// is not what this is for
|
||||
const T = @TypeOf(allocatorStatePtr);
|
||||
switch (@typeInfo(T)) {
|
||||
.Pointer => {},
|
||||
else => @compileError("getAllocatorPtr expects a pointer to an allocator but got: " ++ @typeName(T)),
|
||||
}
|
||||
if (T == *Allocator)
|
||||
return allocatorStatePtr;
|
||||
return &allocatorStatePtr.allocator;
|
||||
}
|
||||
fn failAllocatorShrink(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
@panic("failAllocatorShrink should never be called because it cannot allocate");
|
||||
|
||||
/// Detects and asserts if the std.mem.Allocator interface is violated
|
||||
pub fn SanityAllocator(comptime T: type) type { return struct {
|
||||
const Self = @This();
|
||||
allocator: Allocator,
|
||||
underlying_allocator: T,
|
||||
pub fn init(allocator: T) @This() {
|
||||
return .{
|
||||
.allocator = .{
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
.underlying_allocator = allocator,
|
||||
};
|
||||
}
|
||||
fn getUnderlyingAllocatorPtr(self: *@This()) *Allocator {
|
||||
if (T == *Allocator) return self.underlying_allocator;
|
||||
return getAllocatorPtr(&self.underlying_allocator);
|
||||
}
|
||||
pub fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 {
|
||||
assert(n > 0);
|
||||
assert(mem.isValidAlign(ptr_align));
|
||||
if (len_align != 0) {
|
||||
assert(mem.isAlignedAnyAlign(n, len_align));
|
||||
assert(n >= len_align);
|
||||
}
|
||||
|
||||
const self = @fieldParentPtr(@This(), "allocator", allocator);
|
||||
const result = try self.getUnderlyingAllocatorPtr().callAllocFn(n, ptr_align, len_align);
|
||||
if (len_align == 0) {
|
||||
assert(result.len == n);
|
||||
} else {
|
||||
assert(result.len >= n);
|
||||
assert(mem.isAlignedAnyAlign(result.len, len_align));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
pub fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize {
|
||||
assert(buf.len > 0);
|
||||
if (len_align != 0) {
|
||||
assert(mem.isAlignedAnyAlign(new_len, len_align));
|
||||
assert(new_len >= len_align);
|
||||
}
|
||||
const self = @fieldParentPtr(@This(), "allocator", allocator);
|
||||
const result = try self.getUnderlyingAllocatorPtr().callResizeFn(buf, new_len, len_align);
|
||||
if (len_align == 0) {
|
||||
assert(result == new_len);
|
||||
} else {
|
||||
assert(result >= new_len);
|
||||
assert(mem.isAlignedAnyAlign(result, len_align));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
pub usingnamespace if (T == *Allocator or !@hasDecl(T, "reset")) struct {} else struct {
|
||||
pub fn reset(self: *Self) void {
|
||||
self.underlying_allocator.reset();
|
||||
}
|
||||
};
|
||||
};}
|
||||
|
||||
pub fn sanityWrap(allocator: var) SanityAllocator(@TypeOf(allocator)) {
|
||||
return SanityAllocator(@TypeOf(allocator)).init(allocator);
|
||||
}
|
||||
|
||||
/// An allocator helper function. Adjusts an allocation length satisfy `len_align`.
|
||||
/// `full_len` should be the full capacity of the allocation which may be greater
|
||||
/// than the `len` that was requsted. This function should only be used by allocators
|
||||
/// that are unaffected by `len_align`.
|
||||
pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize {
|
||||
assert(alloc_len > 0);
|
||||
assert(alloc_len >= len_align);
|
||||
assert(full_len >= alloc_len);
|
||||
if (len_align == 0)
|
||||
return alloc_len;
|
||||
const adjusted = alignBackwardAnyAlign(full_len, len_align);
|
||||
assert(adjusted >= alloc_len);
|
||||
return adjusted;
|
||||
}
|
||||
|
||||
var failAllocator = Allocator{
|
||||
.allocFn = failAllocatorAlloc,
|
||||
.resizeFn = Allocator.noResize,
|
||||
};
|
||||
fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29) Allocator.Error![]u8 {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
test "mem.Allocator basics" {
|
||||
@ -2190,6 +2377,13 @@ test "alignForward" {
|
||||
testing.expect(alignForward(17, 8) == 24);
|
||||
}
|
||||
|
||||
pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize {
|
||||
if (@popCount(usize, alignment) == 1)
|
||||
return alignBackward(i, alignment);
|
||||
assert(alignment != 0);
|
||||
return i - @mod(i, alignment);
|
||||
}
|
||||
|
||||
/// Round an address up to the previous aligned address
|
||||
/// The alignment must be a power of 2 and greater than 0.
|
||||
pub fn alignBackward(addr: usize, alignment: usize) usize {
|
||||
@ -2206,6 +2400,19 @@ pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T {
|
||||
return addr & ~(alignment - 1);
|
||||
}
|
||||
|
||||
/// Returns whether `alignment` is a valid alignment, meaning it is
|
||||
/// a positive power of 2.
|
||||
pub fn isValidAlign(alignment: u29) bool {
|
||||
return @popCount(u29, alignment) == 1;
|
||||
}
|
||||
|
||||
pub fn isAlignedAnyAlign(i: usize, alignment: usize) bool {
|
||||
if (@popCount(usize, alignment) == 1)
|
||||
return isAligned(i, alignment);
|
||||
assert(alignment != 0);
|
||||
return 0 == @mod(i, alignment);
|
||||
}
|
||||
|
||||
/// Given an address and an alignment, return true if the address is a multiple of the alignment
|
||||
/// The alignment must be a power of 2 and greater than 0.
|
||||
pub fn isAligned(addr: usize, alignment: usize) bool {
|
||||
|
||||
@ -593,6 +593,7 @@ pub const FILE_CURRENT = 1;
|
||||
pub const FILE_END = 2;
|
||||
|
||||
pub const HEAP_CREATE_ENABLE_EXECUTE = 0x00040000;
|
||||
pub const HEAP_REALLOC_IN_PLACE_ONLY = 0x00000010;
|
||||
pub const HEAP_GENERATE_EXCEPTIONS = 0x00000004;
|
||||
pub const HEAP_NO_SERIALIZE = 0x00000001;
|
||||
|
||||
|
||||
@ -11,7 +11,7 @@ pub var allocator_instance = LeakCountAllocator.init(&base_allocator_instance.al
|
||||
pub const failing_allocator = &failing_allocator_instance.allocator;
|
||||
pub var failing_allocator_instance = FailingAllocator.init(&base_allocator_instance.allocator, 0);
|
||||
|
||||
pub var base_allocator_instance = std.heap.ThreadSafeFixedBufferAllocator.init(allocator_mem[0..]);
|
||||
pub var base_allocator_instance = std.mem.sanityWrap(std.heap.ThreadSafeFixedBufferAllocator.init(allocator_mem[0..]));
|
||||
var allocator_mem: [2 * 1024 * 1024]u8 = undefined;
|
||||
|
||||
/// This function is intended to be used only in tests. It prints diagnostics to stderr
|
||||
|
||||
@ -39,43 +39,38 @@ pub const FailingAllocator = struct {
|
||||
.allocations = 0,
|
||||
.deallocations = 0,
|
||||
.allocator = mem.Allocator{
|
||||
.reallocFn = realloc,
|
||||
.shrinkFn = shrink,
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn realloc(allocator: *mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
fn alloc(allocator: *std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
|
||||
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
|
||||
if (self.index == self.fail_index) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
const result = try self.internal_allocator.reallocFn(
|
||||
self.internal_allocator,
|
||||
old_mem,
|
||||
old_align,
|
||||
new_size,
|
||||
new_align,
|
||||
);
|
||||
if (new_size < old_mem.len) {
|
||||
self.freed_bytes += old_mem.len - new_size;
|
||||
if (new_size == 0)
|
||||
self.deallocations += 1;
|
||||
} else if (new_size > old_mem.len) {
|
||||
self.allocated_bytes += new_size - old_mem.len;
|
||||
if (old_mem.len == 0)
|
||||
self.allocations += 1;
|
||||
}
|
||||
const result = try self.internal_allocator.callAllocFn(len, ptr_align, len_align);
|
||||
self.allocated_bytes += result.len;
|
||||
self.allocations += 1;
|
||||
self.index += 1;
|
||||
return result;
|
||||
}
|
||||
|
||||
fn shrink(allocator: *mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
fn resize(allocator: *std.mem.Allocator, old_mem: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize {
|
||||
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
|
||||
const r = self.internal_allocator.shrinkFn(self.internal_allocator, old_mem, old_align, new_size, new_align);
|
||||
self.freed_bytes += old_mem.len - r.len;
|
||||
if (new_size == 0)
|
||||
const r = self.internal_allocator.callResizeFn(old_mem, new_len, len_align) catch |e| {
|
||||
std.debug.assert(new_len > old_mem.len);
|
||||
return e;
|
||||
};
|
||||
if (new_len == 0) {
|
||||
self.deallocations += 1;
|
||||
self.freed_bytes += old_mem.len;
|
||||
} else if (r < old_mem.len) {
|
||||
self.freed_bytes += old_mem.len - r;
|
||||
} else {
|
||||
self.allocated_bytes += r - old_mem.len;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
};
|
||||
|
||||
@ -14,23 +14,21 @@ pub const LeakCountAllocator = struct {
|
||||
return .{
|
||||
.count = 0,
|
||||
.allocator = .{
|
||||
.reallocFn = realloc,
|
||||
.shrinkFn = shrink,
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
.internal_allocator = allocator,
|
||||
};
|
||||
}
|
||||
|
||||
fn realloc(allocator: *std.mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
fn alloc(allocator: *std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
|
||||
const self = @fieldParentPtr(LeakCountAllocator, "allocator", allocator);
|
||||
var data = try self.internal_allocator.reallocFn(self.internal_allocator, old_mem, old_align, new_size, new_align);
|
||||
if (old_mem.len == 0) {
|
||||
self.count += 1;
|
||||
}
|
||||
return data;
|
||||
const ptr = try self.internal_allocator.callAllocFn(len, ptr_align, len_align);
|
||||
self.count += 1;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
fn shrink(allocator: *std.mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
fn resize(allocator: *std.mem.Allocator, old_mem: []u8, new_size: usize, len_align: u29) error{OutOfMemory}!usize {
|
||||
const self = @fieldParentPtr(LeakCountAllocator, "allocator", allocator);
|
||||
if (new_size == 0) {
|
||||
if (self.count == 0) {
|
||||
@ -38,7 +36,10 @@ pub const LeakCountAllocator = struct {
|
||||
}
|
||||
self.count -= 1;
|
||||
}
|
||||
return self.internal_allocator.shrinkFn(self.internal_allocator, old_mem, old_align, new_size, new_align);
|
||||
return self.internal_allocator.callResizeFn(old_mem, new_size, len_align) catch |e| {
|
||||
std.debug.assert(new_size > old_mem.len);
|
||||
return e;
|
||||
};
|
||||
}
|
||||
|
||||
pub fn validate(self: LeakCountAllocator) !void {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user