zig/lib/std/heap.zig
2022-11-29 23:30:38 -07:00

1170 lines
40 KiB
Zig

const std = @import("std.zig");
const builtin = @import("builtin");
const root = @import("root");
const debug = std.debug;
const assert = debug.assert;
const testing = std.testing;
const mem = std.mem;
const os = std.os;
const c = std.c;
const maxInt = std.math.maxInt;
pub const LoggingAllocator = @import("heap/logging_allocator.zig").LoggingAllocator;
pub const loggingAllocator = @import("heap/logging_allocator.zig").loggingAllocator;
pub const ScopedLoggingAllocator = @import("heap/logging_allocator.zig").ScopedLoggingAllocator;
pub const LogToWriterAllocator = @import("heap/log_to_writer_allocator.zig").LogToWriterAllocator;
pub const logToWriterAllocator = @import("heap/log_to_writer_allocator.zig").logToWriterAllocator;
pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator;
pub const GeneralPurposeAllocator = @import("heap/general_purpose_allocator.zig").GeneralPurposeAllocator;
const Allocator = mem.Allocator;
const CAllocator = struct {
comptime {
if (!builtin.link_libc) {
@compileError("C allocator is only available when linking against libc");
}
}
usingnamespace if (@hasDecl(c, "malloc_size"))
struct {
pub const supports_malloc_size = true;
pub const malloc_size = c.malloc_size;
}
else if (@hasDecl(c, "malloc_usable_size"))
struct {
pub const supports_malloc_size = true;
pub const malloc_size = c.malloc_usable_size;
}
else if (@hasDecl(c, "_msize"))
struct {
pub const supports_malloc_size = true;
pub const malloc_size = c._msize;
}
else
struct {
pub const supports_malloc_size = false;
};
pub const supports_posix_memalign = @hasDecl(c, "posix_memalign");
fn getHeader(ptr: [*]u8) *[*]u8 {
return @intToPtr(*[*]u8, @ptrToInt(ptr) - @sizeOf(usize));
}
fn alignedAlloc(len: usize, log2_align: u8) ?[*]u8 {
const alignment = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_align);
if (supports_posix_memalign) {
// The posix_memalign only accepts alignment values that are a
// multiple of the pointer size
const eff_alignment = @max(alignment, @sizeOf(usize));
var aligned_ptr: ?*anyopaque = undefined;
if (c.posix_memalign(&aligned_ptr, eff_alignment, len) != 0)
return null;
return @ptrCast([*]u8, aligned_ptr);
}
// Thin wrapper around regular malloc, overallocate to account for
// alignment padding and store the orignal malloc()'ed pointer before
// the aligned address.
var unaligned_ptr = @ptrCast([*]u8, c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null);
const unaligned_addr = @ptrToInt(unaligned_ptr);
const aligned_addr = mem.alignForward(unaligned_addr + @sizeOf(usize), alignment);
var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr);
getHeader(aligned_ptr).* = unaligned_ptr;
return aligned_ptr;
}
fn alignedFree(ptr: [*]u8) void {
if (supports_posix_memalign) {
return c.free(ptr);
}
const unaligned_ptr = getHeader(ptr).*;
c.free(unaligned_ptr);
}
fn alignedAllocSize(ptr: [*]u8) usize {
if (supports_posix_memalign) {
return CAllocator.malloc_size(ptr);
}
const unaligned_ptr = getHeader(ptr).*;
const delta = @ptrToInt(ptr) - @ptrToInt(unaligned_ptr);
return CAllocator.malloc_size(unaligned_ptr) - delta;
}
fn alloc(
_: *anyopaque,
len: usize,
log2_align: u8,
return_address: usize,
) ?[*]u8 {
_ = return_address;
assert(len > 0);
return alignedAlloc(len, log2_align);
}
fn resize(
_: *anyopaque,
buf: []u8,
log2_buf_align: u8,
new_len: usize,
return_address: usize,
) bool {
_ = log2_buf_align;
_ = return_address;
if (new_len <= buf.len) {
return true;
}
if (CAllocator.supports_malloc_size) {
const full_len = alignedAllocSize(buf.ptr);
if (new_len <= full_len) {
return true;
}
}
return false;
}
fn free(
_: *anyopaque,
buf: []u8,
log2_buf_align: u8,
return_address: usize,
) void {
_ = log2_buf_align;
_ = return_address;
alignedFree(buf.ptr);
}
};
/// Supports the full Allocator interface, including alignment, and exploiting
/// `malloc_usable_size` if available. For an allocator that directly calls
/// `malloc`/`free`, see `raw_c_allocator`.
pub const c_allocator = Allocator{
.ptr = undefined,
.vtable = &c_allocator_vtable,
};
const c_allocator_vtable = Allocator.VTable{
.alloc = CAllocator.alloc,
.resize = CAllocator.resize,
.free = CAllocator.free,
};
/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls
/// `malloc`/`free`. Does not attempt to utilize `malloc_usable_size`.
/// This allocator is safe to use as the backing allocator with
/// `ArenaAllocator` for example and is more optimal in such a case
/// than `c_allocator`.
pub const raw_c_allocator = Allocator{
.ptr = undefined,
.vtable = &raw_c_allocator_vtable,
};
const raw_c_allocator_vtable = Allocator.VTable{
.alloc = rawCAlloc,
.resize = rawCResize,
.free = rawCFree,
};
fn rawCAlloc(
_: *anyopaque,
len: usize,
log2_ptr_align: u8,
ret_addr: usize,
) ?[*]u8 {
_ = ret_addr;
assert(log2_ptr_align <= comptime std.math.log2_int(usize, @alignOf(std.c.max_align_t)));
// TODO: change the language to make @ptrCast also do alignment cast
const ptr = @alignCast(@alignOf(std.c.max_align_t), c.malloc(len));
return @ptrCast(?[*]align(@alignOf(std.c.max_align_t)) u8, ptr);
}
fn rawCResize(
_: *anyopaque,
buf: []u8,
log2_old_align: u8,
new_len: usize,
ret_addr: usize,
) bool {
_ = log2_old_align;
_ = ret_addr;
return new_len <= buf.len;
}
fn rawCFree(
_: *anyopaque,
buf: []u8,
log2_old_align: u8,
ret_addr: usize,
) void {
_ = log2_old_align;
_ = ret_addr;
c.free(buf.ptr);
}
/// This allocator makes a syscall directly for every allocation and free.
/// Thread-safe and lock-free.
pub const page_allocator = if (builtin.target.isWasm())
Allocator{
.ptr = undefined,
.vtable = &WasmPageAllocator.vtable,
}
else if (builtin.target.os.tag == .freestanding)
root.os.heap.page_allocator
else
Allocator{
.ptr = undefined,
.vtable = &PageAllocator.vtable,
};
/// Verifies that the adjusted length will still map to the full length
pub fn alignPageAllocLen(full_len: usize, len: usize) usize {
const aligned_len = mem.alignAllocLen(full_len, len);
assert(mem.alignForward(aligned_len, mem.page_size) == full_len);
return aligned_len;
}
/// TODO Utilize this on Windows.
pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null;
const PageAllocator = struct {
const vtable = Allocator.VTable{
.alloc = alloc,
.resize = resize,
.free = free,
};
fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
_ = ra;
_ = log2_align;
assert(n > 0);
if (n > maxInt(usize) - (mem.page_size - 1)) return null;
const aligned_len = mem.alignForward(n, mem.page_size);
if (builtin.os.tag == .windows) {
const w = os.windows;
const addr = w.VirtualAlloc(
null,
aligned_len,
w.MEM_COMMIT | w.MEM_RESERVE,
w.PAGE_READWRITE,
) catch return null;
return @ptrCast([*]align(mem.page_size) u8, @alignCast(mem.page_size, addr));
}
const hint = @atomicLoad(@TypeOf(next_mmap_addr_hint), &next_mmap_addr_hint, .Unordered);
const slice = os.mmap(
hint,
aligned_len,
os.PROT.READ | os.PROT.WRITE,
os.MAP.PRIVATE | os.MAP.ANONYMOUS,
-1,
0,
) catch return null;
assert(mem.isAligned(@ptrToInt(slice.ptr), mem.page_size));
const new_hint = @alignCast(mem.page_size, slice.ptr + aligned_len);
_ = @cmpxchgStrong(@TypeOf(next_mmap_addr_hint), &next_mmap_addr_hint, hint, new_hint, .Monotonic, .Monotonic);
return slice.ptr;
}
fn resize(
_: *anyopaque,
buf_unaligned: []u8,
log2_buf_align: u8,
new_size: usize,
return_address: usize,
) bool {
_ = log2_buf_align;
_ = return_address;
const new_size_aligned = mem.alignForward(new_size, mem.page_size);
if (builtin.os.tag == .windows) {
const w = os.windows;
if (new_size <= buf_unaligned.len) {
const base_addr = @ptrToInt(buf_unaligned.ptr);
const old_addr_end = base_addr + buf_unaligned.len;
const new_addr_end = mem.alignForward(base_addr + new_size, mem.page_size);
if (old_addr_end > new_addr_end) {
// For shrinking that is not releasing, we will only
// decommit the pages not needed anymore.
w.VirtualFree(
@intToPtr(*anyopaque, new_addr_end),
old_addr_end - new_addr_end,
w.MEM_DECOMMIT,
);
}
return true;
}
const old_size_aligned = mem.alignForward(buf_unaligned.len, mem.page_size);
if (new_size_aligned <= old_size_aligned) {
return true;
}
return false;
}
const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size);
if (new_size_aligned == buf_aligned_len)
return true;
if (new_size_aligned < buf_aligned_len) {
const ptr = @alignCast(mem.page_size, buf_unaligned.ptr + new_size_aligned);
// TODO: if the next_mmap_addr_hint is within the unmapped range, update it
os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]);
return true;
}
// TODO: call mremap
// TODO: if the next_mmap_addr_hint is within the remapped range, update it
return false;
}
fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) void {
_ = log2_buf_align;
_ = return_address;
if (builtin.os.tag == .windows) {
os.windows.VirtualFree(slice.ptr, 0, os.windows.MEM_RELEASE);
} else {
const buf_aligned_len = mem.alignForward(slice.len, mem.page_size);
const ptr = @alignCast(mem.page_size, slice.ptr);
os.munmap(ptr[0..buf_aligned_len]);
}
}
};
const WasmPageAllocator = struct {
comptime {
if (!builtin.target.isWasm()) {
@compileError("WasmPageAllocator is only available for wasm32 arch");
}
}
const vtable = Allocator.VTable{
.alloc = alloc,
.resize = resize,
.free = free,
};
const PageStatus = enum(u1) {
used = 0,
free = 1,
pub const none_free: u8 = 0;
};
const FreeBlock = struct {
data: []u128,
const Io = std.packed_int_array.PackedIntIo(u1, .Little);
fn totalPages(self: FreeBlock) usize {
return self.data.len * 128;
}
fn isInitialized(self: FreeBlock) bool {
return self.data.len > 0;
}
fn getBit(self: FreeBlock, idx: usize) PageStatus {
const bit_offset = 0;
return @intToEnum(PageStatus, Io.get(mem.sliceAsBytes(self.data), idx, bit_offset));
}
fn setBits(self: FreeBlock, start_idx: usize, len: usize, val: PageStatus) void {
const bit_offset = 0;
var i: usize = 0;
while (i < len) : (i += 1) {
Io.set(mem.sliceAsBytes(self.data), start_idx + i, bit_offset, @enumToInt(val));
}
}
// Use '0xFFFFFFFF' as a _missing_ sentinel
// This saves ~50 bytes compared to returning a nullable
// We can guarantee that conventional memory never gets this big,
// and wasm32 would not be able to address this memory (32 GB > usize).
// Revisit if this is settled: https://github.com/ziglang/zig/issues/3806
const not_found = std.math.maxInt(usize);
fn useRecycled(self: FreeBlock, num_pages: usize, log2_align: u8) usize {
@setCold(true);
for (self.data) |segment, i| {
const spills_into_next = @bitCast(i128, segment) < 0;
const has_enough_bits = @popCount(segment) >= num_pages;
if (!spills_into_next and !has_enough_bits) continue;
var j: usize = i * 128;
while (j < (i + 1) * 128) : (j += 1) {
var count: usize = 0;
while (j + count < self.totalPages() and self.getBit(j + count) == .free) {
count += 1;
const addr = j * mem.page_size;
if (count >= num_pages and mem.isAlignedLog2(addr, log2_align)) {
self.setBits(j, num_pages, .used);
return j;
}
}
j += count;
}
}
return not_found;
}
fn recycle(self: FreeBlock, start_idx: usize, len: usize) void {
self.setBits(start_idx, len, .free);
}
};
var _conventional_data = [_]u128{0} ** 16;
// Marking `conventional` as const saves ~40 bytes
const conventional = FreeBlock{ .data = &_conventional_data };
var extended = FreeBlock{ .data = &[_]u128{} };
fn extendedOffset() usize {
return conventional.totalPages();
}
fn nPages(memsize: usize) usize {
return mem.alignForward(memsize, mem.page_size) / mem.page_size;
}
fn alloc(_: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 {
_ = ra;
if (len > maxInt(usize) - (mem.page_size - 1)) return null;
const page_count = nPages(len);
const page_idx = allocPages(page_count, log2_align) catch return null;
return @intToPtr([*]u8, page_idx * mem.page_size);
}
fn allocPages(page_count: usize, log2_align: u8) !usize {
{
const idx = conventional.useRecycled(page_count, log2_align);
if (idx != FreeBlock.not_found) {
return idx;
}
}
const idx = extended.useRecycled(page_count, log2_align);
if (idx != FreeBlock.not_found) {
return idx + extendedOffset();
}
const next_page_idx = @wasmMemorySize(0);
const next_page_addr = next_page_idx * mem.page_size;
const aligned_addr = mem.alignForwardLog2(next_page_addr, log2_align);
const drop_page_count = @divExact(aligned_addr - next_page_addr, mem.page_size);
const result = @wasmMemoryGrow(0, @intCast(u32, drop_page_count + page_count));
if (result <= 0)
return error.OutOfMemory;
assert(result == next_page_idx);
const aligned_page_idx = next_page_idx + drop_page_count;
if (drop_page_count > 0) {
freePages(next_page_idx, aligned_page_idx);
}
return @intCast(usize, aligned_page_idx);
}
fn freePages(start: usize, end: usize) void {
if (start < extendedOffset()) {
conventional.recycle(start, @min(extendedOffset(), end) - start);
}
if (end > extendedOffset()) {
var new_end = end;
if (!extended.isInitialized()) {
// Steal the last page from the memory currently being recycled
// TODO: would it be better if we use the first page instead?
new_end -= 1;
extended.data = @intToPtr([*]u128, new_end * mem.page_size)[0 .. mem.page_size / @sizeOf(u128)];
// Since this is the first page being freed and we consume it, assume *nothing* is free.
mem.set(u128, extended.data, PageStatus.none_free);
}
const clamped_start = @max(extendedOffset(), start);
extended.recycle(clamped_start - extendedOffset(), new_end - clamped_start);
}
}
fn resize(
_: *anyopaque,
buf: []u8,
log2_buf_align: u8,
new_len: usize,
return_address: usize,
) bool {
_ = log2_buf_align;
_ = return_address;
const aligned_len = mem.alignForward(buf.len, mem.page_size);
if (new_len > aligned_len) return false;
const current_n = nPages(aligned_len);
const new_n = nPages(new_len);
if (new_n != current_n) {
const base = nPages(@ptrToInt(buf.ptr));
freePages(base + new_n, base + current_n);
}
return true;
}
fn free(
_: *anyopaque,
buf: []u8,
log2_buf_align: u8,
return_address: usize,
) void {
_ = log2_buf_align;
_ = return_address;
const aligned_len = mem.alignForward(buf.len, mem.page_size);
const current_n = nPages(aligned_len);
const base = nPages(@ptrToInt(buf.ptr));
freePages(base, base + current_n);
}
};
pub const HeapAllocator = switch (builtin.os.tag) {
.windows => struct {
heap_handle: ?HeapHandle,
const HeapHandle = os.windows.HANDLE;
pub fn init() HeapAllocator {
return HeapAllocator{
.heap_handle = null,
};
}
pub fn allocator(self: *HeapAllocator) Allocator {
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
pub fn deinit(self: *HeapAllocator) void {
if (self.heap_handle) |heap_handle| {
os.windows.HeapDestroy(heap_handle);
}
}
fn getRecordPtr(buf: []u8) *align(1) usize {
return @intToPtr(*align(1) usize, @ptrToInt(buf.ptr) + buf.len);
}
fn alloc(
ctx: *anyopaque,
n: usize,
log2_ptr_align: u8,
return_address: usize,
) ?[*]u8 {
_ = return_address;
const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx));
const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
const amt = n + ptr_align - 1 + @sizeOf(usize);
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .SeqCst);
const heap_handle = optional_heap_handle orelse blk: {
const options = if (builtin.single_threaded) os.windows.HEAP_NO_SERIALIZE else 0;
const hh = os.windows.kernel32.HeapCreate(options, amt, 0) orelse return null;
const other_hh = @cmpxchgStrong(?HeapHandle, &self.heap_handle, null, hh, .SeqCst, .SeqCst) orelse break :blk hh;
os.windows.HeapDestroy(hh);
break :blk other_hh.?; // can't be null because of the cmpxchg
};
const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return null;
const root_addr = @ptrToInt(ptr);
const aligned_addr = mem.alignForward(root_addr, ptr_align);
const buf = @intToPtr([*]u8, aligned_addr)[0..n];
getRecordPtr(buf).* = root_addr;
return buf.ptr;
}
fn resize(
ctx: *anyopaque,
buf: []u8,
log2_buf_align: u8,
new_size: usize,
return_address: usize,
) bool {
_ = log2_buf_align;
_ = return_address;
const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx));
const root_addr = getRecordPtr(buf).*;
const align_offset = @ptrToInt(buf.ptr) - root_addr;
const amt = align_offset + new_size + @sizeOf(usize);
const new_ptr = os.windows.kernel32.HeapReAlloc(
self.heap_handle.?,
os.windows.HEAP_REALLOC_IN_PLACE_ONLY,
@intToPtr(*anyopaque, root_addr),
amt,
) orelse return false;
assert(new_ptr == @intToPtr(*anyopaque, root_addr));
getRecordPtr(buf.ptr[0..new_size]).* = root_addr;
return true;
}
fn free(
ctx: *anyopaque,
buf: []u8,
log2_buf_align: u8,
return_address: usize,
) void {
_ = log2_buf_align;
_ = return_address;
const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx));
os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*anyopaque, getRecordPtr(buf).*));
}
},
else => @compileError("Unsupported OS"),
};
fn sliceContainsPtr(container: []u8, ptr: [*]u8) bool {
return @ptrToInt(ptr) >= @ptrToInt(container.ptr) and
@ptrToInt(ptr) < (@ptrToInt(container.ptr) + container.len);
}
fn sliceContainsSlice(container: []u8, slice: []u8) bool {
return @ptrToInt(slice.ptr) >= @ptrToInt(container.ptr) and
(@ptrToInt(slice.ptr) + slice.len) <= (@ptrToInt(container.ptr) + container.len);
}
pub const FixedBufferAllocator = struct {
end_index: usize,
buffer: []u8,
pub fn init(buffer: []u8) FixedBufferAllocator {
return FixedBufferAllocator{
.buffer = buffer,
.end_index = 0,
};
}
/// *WARNING* using this at the same time as the interface returned by `threadSafeAllocator` is not thread safe
pub fn allocator(self: *FixedBufferAllocator) Allocator {
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
/// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator`
/// *WARNING* using this at the same time as the interface returned by `allocator` is not thread safe
pub fn threadSafeAllocator(self: *FixedBufferAllocator) Allocator {
return .{
.ptr = self,
.vtable = &.{
.alloc = threadSafeAlloc,
.resize = Allocator.noResize,
.free = Allocator.noFree,
},
};
}
pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool {
return sliceContainsPtr(self.buffer, ptr);
}
pub fn ownsSlice(self: *FixedBufferAllocator, slice: []u8) bool {
return sliceContainsSlice(self.buffer, slice);
}
/// NOTE: this will not work in all cases, if the last allocation had an adjusted_index
/// then we won't be able to determine what the last allocation was. This is because
/// the alignForward operation done in alloc is not reversible.
pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool {
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
}
fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx));
_ = ra;
const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return null;
const adjusted_index = self.end_index + adjust_off;
const new_end_index = adjusted_index + n;
if (new_end_index > self.buffer.len) return null;
self.end_index = new_end_index;
return self.buffer.ptr + adjusted_index;
}
fn resize(
ctx: *anyopaque,
buf: []u8,
log2_buf_align: u8,
new_size: usize,
return_address: usize,
) bool {
const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx));
_ = log2_buf_align;
_ = return_address;
assert(self.ownsSlice(buf)); // sanity check
if (!self.isLastAllocation(buf)) {
if (new_size > buf.len) return false;
return true;
}
if (new_size <= buf.len) {
const sub = buf.len - new_size;
self.end_index -= sub;
return true;
}
const add = new_size - buf.len;
if (add + self.end_index > self.buffer.len) return false;
self.end_index += add;
return true;
}
fn free(
ctx: *anyopaque,
buf: []u8,
log2_buf_align: u8,
return_address: usize,
) void {
const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx));
_ = log2_buf_align;
_ = return_address;
assert(self.ownsSlice(buf)); // sanity check
if (self.isLastAllocation(buf)) {
self.end_index -= buf.len;
}
}
fn threadSafeAlloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx));
_ = ra;
const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
var end_index = @atomicLoad(usize, &self.end_index, .SeqCst);
while (true) {
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return null;
const adjusted_index = end_index + adjust_off;
const new_end_index = adjusted_index + n;
if (new_end_index > self.buffer.len) return null;
end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .SeqCst, .SeqCst) orelse
return self.buffer[adjusted_index..new_end_index].ptr;
}
}
pub fn reset(self: *FixedBufferAllocator) void {
self.end_index = 0;
}
};
pub const ThreadSafeFixedBufferAllocator = @compileError("ThreadSafeFixedBufferAllocator has been replaced with `threadSafeAllocator` on FixedBufferAllocator");
/// Returns a `StackFallbackAllocator` allocating using either a
/// `FixedBufferAllocator` on an array of size `size` and falling back to
/// `fallback_allocator` if that fails.
pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size) {
return StackFallbackAllocator(size){
.buffer = undefined,
.fallback_allocator = fallback_allocator,
.fixed_buffer_allocator = undefined,
};
}
/// An allocator that attempts to allocate using a
/// `FixedBufferAllocator` using an array of size `size`. If the
/// allocation fails, it will fall back to using
/// `fallback_allocator`. Easily created with `stackFallback`.
pub fn StackFallbackAllocator(comptime size: usize) type {
return struct {
const Self = @This();
buffer: [size]u8,
fallback_allocator: Allocator,
fixed_buffer_allocator: FixedBufferAllocator,
/// This function both fetches a `Allocator` interface to this
/// allocator *and* resets the internal buffer allocator.
pub fn get(self: *Self) Allocator {
self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
fn alloc(
ctx: *anyopaque,
len: usize,
log2_ptr_align: u8,
ra: usize,
) ?[*]u8 {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, log2_ptr_align, ra) orelse
return self.fallback_allocator.rawAlloc(len, log2_ptr_align, ra);
}
fn resize(
ctx: *anyopaque,
buf: []u8,
log2_buf_align: u8,
new_len: usize,
ra: usize,
) bool {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, log2_buf_align, new_len, ra);
} else {
return self.fallback_allocator.rawResize(buf, log2_buf_align, new_len, ra);
}
}
fn free(
ctx: *anyopaque,
buf: []u8,
log2_buf_align: u8,
ra: usize,
) void {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, log2_buf_align, ra);
} else {
return self.fallback_allocator.rawFree(buf, log2_buf_align, ra);
}
}
};
}
test "c_allocator" {
if (builtin.link_libc) {
try testAllocator(c_allocator);
try testAllocatorAligned(c_allocator);
try testAllocatorLargeAlignment(c_allocator);
try testAllocatorAlignedShrink(c_allocator);
}
}
test "raw_c_allocator" {
if (builtin.link_libc) {
try testAllocator(raw_c_allocator);
}
}
test "WasmPageAllocator internals" {
if (comptime builtin.target.isWasm()) {
const conventional_memsize = WasmPageAllocator.conventional.totalPages() * mem.page_size;
const initial = try page_allocator.alloc(u8, mem.page_size);
try testing.expect(@ptrToInt(initial.ptr) < conventional_memsize); // If this isn't conventional, the rest of these tests don't make sense. Also we have a serious memory leak in the test suite.
var inplace = try page_allocator.realloc(initial, 1);
try testing.expectEqual(initial.ptr, inplace.ptr);
inplace = try page_allocator.realloc(inplace, 4);
try testing.expectEqual(initial.ptr, inplace.ptr);
page_allocator.free(inplace);
const reuse = try page_allocator.alloc(u8, 1);
try testing.expectEqual(initial.ptr, reuse.ptr);
page_allocator.free(reuse);
// This segment may span conventional and extended which has really complex rules so we're just ignoring it for now.
const padding = try page_allocator.alloc(u8, conventional_memsize);
page_allocator.free(padding);
const extended = try page_allocator.alloc(u8, conventional_memsize);
try testing.expect(@ptrToInt(extended.ptr) >= conventional_memsize);
const use_small = try page_allocator.alloc(u8, 1);
try testing.expectEqual(initial.ptr, use_small.ptr);
page_allocator.free(use_small);
inplace = try page_allocator.realloc(extended, 1);
try testing.expectEqual(extended.ptr, inplace.ptr);
page_allocator.free(inplace);
const reuse_extended = try page_allocator.alloc(u8, conventional_memsize);
try testing.expectEqual(extended.ptr, reuse_extended.ptr);
page_allocator.free(reuse_extended);
}
}
test "PageAllocator" {
const allocator = page_allocator;
try testAllocator(allocator);
try testAllocatorAligned(allocator);
if (!builtin.target.isWasm()) {
try testAllocatorLargeAlignment(allocator);
try testAllocatorAlignedShrink(allocator);
}
if (builtin.os.tag == .windows) {
const slice = try allocator.alignedAlloc(u8, mem.page_size, 128);
slice[0] = 0x12;
slice[127] = 0x34;
allocator.free(slice);
}
{
var buf = try allocator.alloc(u8, mem.page_size + 1);
defer allocator.free(buf);
buf = try allocator.realloc(buf, 1); // shrink past the page boundary
}
}
test "HeapAllocator" {
if (builtin.os.tag == .windows) {
// https://github.com/ziglang/zig/issues/13702
if (builtin.cpu.arch == .aarch64) return error.SkipZigTest;
var heap_allocator = HeapAllocator.init();
defer heap_allocator.deinit();
const allocator = heap_allocator.allocator();
try testAllocator(allocator);
try testAllocatorAligned(allocator);
try testAllocatorLargeAlignment(allocator);
try testAllocatorAlignedShrink(allocator);
}
}
test "ArenaAllocator" {
var arena_allocator = ArenaAllocator.init(page_allocator);
defer arena_allocator.deinit();
const allocator = arena_allocator.allocator();
try testAllocator(allocator);
try testAllocatorAligned(allocator);
try testAllocatorLargeAlignment(allocator);
try testAllocatorAlignedShrink(allocator);
}
var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined;
test "FixedBufferAllocator" {
var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]));
const allocator = fixed_buffer_allocator.allocator();
try testAllocator(allocator);
try testAllocatorAligned(allocator);
try testAllocatorLargeAlignment(allocator);
try testAllocatorAlignedShrink(allocator);
}
test "FixedBufferAllocator.reset" {
var buf: [8]u8 align(@alignOf(u64)) = undefined;
var fba = FixedBufferAllocator.init(buf[0..]);
const allocator = fba.allocator();
const X = 0xeeeeeeeeeeeeeeee;
const Y = 0xffffffffffffffff;
var x = try allocator.create(u64);
x.* = X;
try testing.expectError(error.OutOfMemory, allocator.create(u64));
fba.reset();
var y = try allocator.create(u64);
y.* = Y;
// we expect Y to have overwritten X.
try testing.expect(x.* == y.*);
try testing.expect(y.* == Y);
}
test "StackFallbackAllocator" {
const fallback_allocator = page_allocator;
var stack_allocator = stackFallback(4096, fallback_allocator);
try testAllocator(stack_allocator.get());
try testAllocatorAligned(stack_allocator.get());
try testAllocatorLargeAlignment(stack_allocator.get());
try testAllocatorAlignedShrink(stack_allocator.get());
}
test "FixedBufferAllocator Reuse memory on realloc" {
var small_fixed_buffer: [10]u8 = undefined;
// check if we re-use the memory
{
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
const allocator = fixed_buffer_allocator.allocator();
var slice0 = try allocator.alloc(u8, 5);
try testing.expect(slice0.len == 5);
var slice1 = try allocator.realloc(slice0, 10);
try testing.expect(slice1.ptr == slice0.ptr);
try testing.expect(slice1.len == 10);
try testing.expectError(error.OutOfMemory, allocator.realloc(slice1, 11));
}
// check that we don't re-use the memory if it's not the most recent block
{
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
const allocator = fixed_buffer_allocator.allocator();
var slice0 = try allocator.alloc(u8, 2);
slice0[0] = 1;
slice0[1] = 2;
var slice1 = try allocator.alloc(u8, 2);
var slice2 = try allocator.realloc(slice0, 4);
try testing.expect(slice0.ptr != slice2.ptr);
try testing.expect(slice1.ptr != slice2.ptr);
try testing.expect(slice2[0] == 1);
try testing.expect(slice2[1] == 2);
}
}
test "Thread safe FixedBufferAllocator" {
var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
try testAllocator(fixed_buffer_allocator.threadSafeAllocator());
try testAllocatorAligned(fixed_buffer_allocator.threadSafeAllocator());
try testAllocatorLargeAlignment(fixed_buffer_allocator.threadSafeAllocator());
try testAllocatorAlignedShrink(fixed_buffer_allocator.threadSafeAllocator());
}
/// This one should not try alignments that exceed what C malloc can handle.
pub fn testAllocator(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
const allocator = validationAllocator.allocator();
var slice = try allocator.alloc(*i32, 100);
try testing.expect(slice.len == 100);
for (slice) |*item, i| {
item.* = try allocator.create(i32);
item.*.* = @intCast(i32, i);
}
slice = try allocator.realloc(slice, 20000);
try testing.expect(slice.len == 20000);
for (slice[0..100]) |item, i| {
try testing.expect(item.* == @intCast(i32, i));
allocator.destroy(item);
}
if (allocator.resize(slice, 50)) {
slice = slice[0..50];
if (allocator.resize(slice, 25)) {
slice = slice[0..25];
try testing.expect(allocator.resize(slice, 0));
slice = slice[0..0];
slice = try allocator.realloc(slice, 10);
try testing.expect(slice.len == 10);
}
}
allocator.free(slice);
// Zero-length allocation
var empty = try allocator.alloc(u8, 0);
allocator.free(empty);
// Allocation with zero-sized types
const zero_bit_ptr = try allocator.create(u0);
zero_bit_ptr.* = 0;
allocator.destroy(zero_bit_ptr);
const oversize = try allocator.alignedAlloc(u32, null, 5);
try testing.expect(oversize.len >= 5);
for (oversize) |*item| {
item.* = 0xDEADBEEF;
}
allocator.free(oversize);
}
pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
const allocator = validationAllocator.allocator();
// Test a few alignment values, smaller and bigger than the type's one
inline for ([_]u29{ 1, 2, 4, 8, 16, 32, 64 }) |alignment| {
// initial
var slice = try allocator.alignedAlloc(u8, alignment, 10);
try testing.expect(slice.len == 10);
// grow
slice = try allocator.realloc(slice, 100);
try testing.expect(slice.len == 100);
if (allocator.resize(slice, 10)) {
slice = slice[0..10];
}
try testing.expect(allocator.resize(slice, 0));
slice = slice[0..0];
// realloc from zero
slice = try allocator.realloc(slice, 100);
try testing.expect(slice.len == 100);
if (allocator.resize(slice, 10)) {
slice = slice[0..10];
}
try testing.expect(allocator.resize(slice, 0));
}
}
pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
const allocator = validationAllocator.allocator();
const large_align: usize = mem.page_size / 2;
var align_mask: usize = undefined;
_ = @shlWithOverflow(usize, ~@as(usize, 0), @as(Allocator.Log2Align, @ctz(large_align)), &align_mask);
var slice = try allocator.alignedAlloc(u8, large_align, 500);
try testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
if (allocator.resize(slice, 100)) {
slice = slice[0..100];
}
slice = try allocator.realloc(slice, 5000);
try testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
if (allocator.resize(slice, 10)) {
slice = slice[0..10];
}
slice = try allocator.realloc(slice, 20000);
try testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
allocator.free(slice);
}
pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
const allocator = validationAllocator.allocator();
var debug_buffer: [1000]u8 = undefined;
var fib = FixedBufferAllocator.init(&debug_buffer);
const debug_allocator = fib.allocator();
const alloc_size = mem.page_size * 2 + 50;
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
defer allocator.free(slice);
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
// On Windows, VirtualAlloc returns addresses aligned to a 64K boundary,
// which is 16 pages, hence the 32. This test may require to increase
// the size of the allocations feeding the `allocator` parameter if they
// fail, because of this high over-alignment we want to have.
while (@ptrToInt(slice.ptr) == mem.alignForward(@ptrToInt(slice.ptr), mem.page_size * 32)) {
try stuff_to_free.append(slice);
slice = try allocator.alignedAlloc(u8, 16, alloc_size);
}
while (stuff_to_free.popOrNull()) |item| {
allocator.free(item);
}
slice[0] = 0x12;
slice[60] = 0x34;
slice = try allocator.reallocAdvanced(slice, alloc_size / 2, 0);
try testing.expect(slice[0] == 0x12);
try testing.expect(slice[60] == 0x34);
}
test "heap" {
_ = @import("heap/logging_allocator.zig");
_ = @import("heap/log_to_writer_allocator.zig");
}