mirror of
https://github.com/ziglang/zig.git
synced 2026-02-12 20:37:54 +00:00
Merge pull request #5998 from ziglang/general-purpose-allocator
std: introduce GeneralPurposeAllocator
This commit is contained in:
commit
069a6f2432
@ -9357,9 +9357,17 @@ pub fn main() !void {
|
||||
is handled correctly? In this case, use {#syntax#}std.testing.FailingAllocator{#endsyntax#}.
|
||||
</li>
|
||||
<li>
|
||||
Finally, if none of the above apply, you need a general purpose allocator. Zig does not
|
||||
yet have a general purpose allocator in the standard library,
|
||||
<a href="https://github.com/andrewrk/zig-general-purpose-allocator/">but one is being actively developed</a>.
|
||||
Are you writing a test? In this case, use {#syntax#}std.testing.allocator{#endsyntax#}.
|
||||
</li>
|
||||
<li>
|
||||
Finally, if none of the above apply, you need a general purpose allocator.
|
||||
Zig's general purpose allocator is available as a function that takes a {#link|comptime#}
|
||||
{#link|struct#} of configuration options and returns a type.
|
||||
Generally, you will set up one {#syntax#}std.heap.GeneralPurposeAllocator{#endsyntax#} in
|
||||
your main function, and then pass it or sub-allocators around to various parts of your
|
||||
application.
|
||||
</li>
|
||||
<li>
|
||||
You can also consider {#link|Implementing an Allocator#}.
|
||||
</li>
|
||||
</ol>
|
||||
|
||||
@ -263,6 +263,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
if (better_capacity >= new_capacity) break;
|
||||
}
|
||||
|
||||
// TODO This can be optimized to avoid needlessly copying undefined memory.
|
||||
const new_memory = try self.allocator.reallocAtLeast(self.allocatedSlice(), better_capacity);
|
||||
self.items.ptr = new_memory.ptr;
|
||||
self.capacity = new_memory.len;
|
||||
|
||||
@ -22,7 +22,7 @@ pub fn Queue(comptime T: type) type {
|
||||
return Self{
|
||||
.head = null,
|
||||
.tail = null,
|
||||
.mutex = std.Mutex.init(),
|
||||
.mutex = std.Mutex{},
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@ -19,9 +19,6 @@ const windows = std.os.windows;
|
||||
|
||||
pub const leb = @import("debug/leb128.zig");
|
||||
|
||||
pub const global_allocator = @compileError("Please switch to std.testing.allocator.");
|
||||
pub const failing_allocator = @compileError("Please switch to std.testing.failing_allocator.");
|
||||
|
||||
pub const runtime_safety = switch (builtin.mode) {
|
||||
.Debug, .ReleaseSafe => true,
|
||||
.ReleaseFast, .ReleaseSmall => false,
|
||||
@ -50,7 +47,7 @@ pub const LineInfo = struct {
|
||||
}
|
||||
};
|
||||
|
||||
var stderr_mutex = std.Mutex.init();
|
||||
var stderr_mutex = std.Mutex{};
|
||||
|
||||
/// Deprecated. Use `std.log` functions for logging or `std.debug.print` for
|
||||
/// "printf debugging".
|
||||
@ -235,7 +232,7 @@ pub fn panic(comptime format: []const u8, args: anytype) noreturn {
|
||||
var panicking: u8 = 0;
|
||||
|
||||
// Locked to avoid interleaving panic messages from multiple threads.
|
||||
var panic_mutex = std.Mutex.init();
|
||||
var panic_mutex = std.Mutex{};
|
||||
|
||||
/// Counts how many times the panic handler is invoked by this thread.
|
||||
/// This is used to catch and handle panics triggered by the panic handler.
|
||||
|
||||
140
lib/std/heap.zig
140
lib/std/heap.zig
@ -12,6 +12,7 @@ const maxInt = std.math.maxInt;
|
||||
pub const LoggingAllocator = @import("heap/logging_allocator.zig").LoggingAllocator;
|
||||
pub const loggingAllocator = @import("heap/logging_allocator.zig").loggingAllocator;
|
||||
pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator;
|
||||
pub const GeneralPurposeAllocator = @import("heap/general_purpose_allocator.zig").GeneralPurposeAllocator;
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
|
||||
@ -36,7 +37,7 @@ var c_allocator_state = Allocator{
|
||||
.resizeFn = cResize,
|
||||
};
|
||||
|
||||
fn cAlloc(self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 {
|
||||
fn cAlloc(self: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Allocator.Error![]u8 {
|
||||
assert(ptr_align <= @alignOf(c_longdouble));
|
||||
const ptr = @ptrCast([*]u8, c.malloc(len) orelse return error.OutOfMemory);
|
||||
if (len_align == 0) {
|
||||
@ -53,7 +54,14 @@ fn cAlloc(self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Allocato
|
||||
return ptr[0..mem.alignBackwardAnyAlign(full_len, len_align)];
|
||||
}
|
||||
|
||||
fn cResize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize {
|
||||
fn cResize(
|
||||
self: *Allocator,
|
||||
buf: []u8,
|
||||
old_align: u29,
|
||||
new_len: usize,
|
||||
len_align: u29,
|
||||
ret_addr: usize,
|
||||
) Allocator.Error!usize {
|
||||
if (new_len == 0) {
|
||||
c.free(buf.ptr);
|
||||
return 0;
|
||||
@ -88,8 +96,6 @@ var wasm_page_allocator_state = Allocator{
|
||||
.resizeFn = WasmPageAllocator.resize,
|
||||
};
|
||||
|
||||
pub const direct_allocator = @compileError("deprecated; use std.heap.page_allocator");
|
||||
|
||||
/// Verifies that the adjusted length will still map to the full length
|
||||
pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
|
||||
const aligned_len = mem.alignAllocLen(full_len, len, len_align);
|
||||
@ -97,10 +103,13 @@ pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
|
||||
return aligned_len;
|
||||
}
|
||||
|
||||
/// TODO Utilize this on Windows.
|
||||
pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null;
|
||||
|
||||
const PageAllocator = struct {
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 {
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
|
||||
assert(n > 0);
|
||||
const alignedLen = mem.alignForward(n, mem.page_size);
|
||||
const aligned_len = mem.alignForward(n, mem.page_size);
|
||||
|
||||
if (builtin.os.tag == .windows) {
|
||||
const w = os.windows;
|
||||
@ -112,14 +121,14 @@ const PageAllocator = struct {
|
||||
// see https://devblogs.microsoft.com/oldnewthing/?p=42223
|
||||
const addr = w.VirtualAlloc(
|
||||
null,
|
||||
alignedLen,
|
||||
aligned_len,
|
||||
w.MEM_COMMIT | w.MEM_RESERVE,
|
||||
w.PAGE_READWRITE,
|
||||
) catch return error.OutOfMemory;
|
||||
|
||||
// If the allocation is sufficiently aligned, use it.
|
||||
if (@ptrToInt(addr) & (alignment - 1) == 0) {
|
||||
return @ptrCast([*]u8, addr)[0..alignPageAllocLen(alignedLen, n, len_align)];
|
||||
return @ptrCast([*]u8, addr)[0..alignPageAllocLen(aligned_len, n, len_align)];
|
||||
}
|
||||
|
||||
// If it wasn't, actually do an explicitely aligned allocation.
|
||||
@ -146,20 +155,24 @@ const PageAllocator = struct {
|
||||
// until it succeeds.
|
||||
const ptr = w.VirtualAlloc(
|
||||
@intToPtr(*c_void, aligned_addr),
|
||||
alignedLen,
|
||||
aligned_len,
|
||||
w.MEM_COMMIT | w.MEM_RESERVE,
|
||||
w.PAGE_READWRITE,
|
||||
) catch continue;
|
||||
|
||||
return @ptrCast([*]u8, ptr)[0..alignPageAllocLen(alignedLen, n, len_align)];
|
||||
return @ptrCast([*]u8, ptr)[0..alignPageAllocLen(aligned_len, n, len_align)];
|
||||
}
|
||||
}
|
||||
|
||||
const maxDropLen = alignment - std.math.min(alignment, mem.page_size);
|
||||
const allocLen = if (maxDropLen <= alignedLen - n) alignedLen else mem.alignForward(alignedLen + maxDropLen, mem.page_size);
|
||||
const max_drop_len = alignment - std.math.min(alignment, mem.page_size);
|
||||
const alloc_len = if (max_drop_len <= aligned_len - n)
|
||||
aligned_len
|
||||
else
|
||||
mem.alignForward(aligned_len + max_drop_len, mem.page_size);
|
||||
const hint = @atomicLoad(@TypeOf(next_mmap_addr_hint), &next_mmap_addr_hint, .Unordered);
|
||||
const slice = os.mmap(
|
||||
null,
|
||||
allocLen,
|
||||
hint,
|
||||
alloc_len,
|
||||
os.PROT_READ | os.PROT_WRITE,
|
||||
os.MAP_PRIVATE | os.MAP_ANONYMOUS,
|
||||
-1,
|
||||
@ -168,25 +181,36 @@ const PageAllocator = struct {
|
||||
assert(mem.isAligned(@ptrToInt(slice.ptr), mem.page_size));
|
||||
|
||||
const aligned_addr = mem.alignForward(@ptrToInt(slice.ptr), alignment);
|
||||
const result_ptr = @alignCast(mem.page_size, @intToPtr([*]u8, aligned_addr));
|
||||
|
||||
// Unmap the extra bytes that were only requested in order to guarantee
|
||||
// that the range of memory we were provided had a proper alignment in
|
||||
// it somewhere. The extra bytes could be at the beginning, or end, or both.
|
||||
const dropLen = aligned_addr - @ptrToInt(slice.ptr);
|
||||
if (dropLen != 0) {
|
||||
os.munmap(slice[0..dropLen]);
|
||||
const drop_len = aligned_addr - @ptrToInt(slice.ptr);
|
||||
if (drop_len != 0) {
|
||||
os.munmap(slice[0..drop_len]);
|
||||
}
|
||||
|
||||
// Unmap extra pages
|
||||
const alignedBufferLen = allocLen - dropLen;
|
||||
if (alignedBufferLen > alignedLen) {
|
||||
os.munmap(@alignCast(mem.page_size, @intToPtr([*]u8, aligned_addr))[alignedLen..alignedBufferLen]);
|
||||
const aligned_buffer_len = alloc_len - drop_len;
|
||||
if (aligned_buffer_len > aligned_len) {
|
||||
os.munmap(result_ptr[aligned_len..aligned_buffer_len]);
|
||||
}
|
||||
|
||||
return @intToPtr([*]u8, aligned_addr)[0..alignPageAllocLen(alignedLen, n, len_align)];
|
||||
const new_hint = @alignCast(mem.page_size, result_ptr + aligned_len);
|
||||
_ = @cmpxchgStrong(@TypeOf(next_mmap_addr_hint), &next_mmap_addr_hint, hint, new_hint, .Monotonic, .Monotonic);
|
||||
|
||||
return result_ptr[0..alignPageAllocLen(aligned_len, n, len_align)];
|
||||
}
|
||||
|
||||
fn resize(allocator: *Allocator, buf_unaligned: []u8, new_size: usize, len_align: u29) Allocator.Error!usize {
|
||||
fn resize(
|
||||
allocator: *Allocator,
|
||||
buf_unaligned: []u8,
|
||||
buf_align: u29,
|
||||
new_size: usize,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) Allocator.Error!usize {
|
||||
const new_size_aligned = mem.alignForward(new_size, mem.page_size);
|
||||
|
||||
if (builtin.os.tag == .windows) {
|
||||
@ -201,7 +225,7 @@ const PageAllocator = struct {
|
||||
w.VirtualFree(buf_unaligned.ptr, 0, w.MEM_RELEASE);
|
||||
return 0;
|
||||
}
|
||||
if (new_size < buf_unaligned.len) {
|
||||
if (new_size <= buf_unaligned.len) {
|
||||
const base_addr = @ptrToInt(buf_unaligned.ptr);
|
||||
const old_addr_end = base_addr + buf_unaligned.len;
|
||||
const new_addr_end = mem.alignForward(base_addr + new_size, mem.page_size);
|
||||
@ -216,10 +240,10 @@ const PageAllocator = struct {
|
||||
}
|
||||
return alignPageAllocLen(new_size_aligned, new_size, len_align);
|
||||
}
|
||||
if (new_size == buf_unaligned.len) {
|
||||
const old_size_aligned = mem.alignForward(buf_unaligned.len, mem.page_size);
|
||||
if (new_size_aligned <= old_size_aligned) {
|
||||
return alignPageAllocLen(new_size_aligned, new_size, len_align);
|
||||
}
|
||||
// new_size > buf_unaligned.len not implemented
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
@ -229,6 +253,7 @@ const PageAllocator = struct {
|
||||
|
||||
if (new_size_aligned < buf_aligned_len) {
|
||||
const ptr = @intToPtr([*]align(mem.page_size) u8, @ptrToInt(buf_unaligned.ptr) + new_size_aligned);
|
||||
// TODO: if the next_mmap_addr_hint is within the unmapped range, update it
|
||||
os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]);
|
||||
if (new_size_aligned == 0)
|
||||
return 0;
|
||||
@ -236,6 +261,7 @@ const PageAllocator = struct {
|
||||
}
|
||||
|
||||
// TODO: call mremap
|
||||
// TODO: if the next_mmap_addr_hint is within the remapped range, update it
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
};
|
||||
@ -332,7 +358,7 @@ const WasmPageAllocator = struct {
|
||||
return mem.alignForward(memsize, mem.page_size) / mem.page_size;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 {
|
||||
fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
|
||||
const page_count = nPages(len);
|
||||
const page_idx = try allocPages(page_count, alignment);
|
||||
return @intToPtr([*]u8, page_idx * mem.page_size)[0..alignPageAllocLen(page_count * mem.page_size, len, len_align)];
|
||||
@ -385,7 +411,14 @@ const WasmPageAllocator = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize {
|
||||
fn resize(
|
||||
allocator: *Allocator,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_len: usize,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
const aligned_len = mem.alignForward(buf.len, mem.page_size);
|
||||
if (new_len > aligned_len) return error.OutOfMemory;
|
||||
const current_n = nPages(aligned_len);
|
||||
@ -425,7 +458,13 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
return @intToPtr(*align(1) usize, @ptrToInt(buf.ptr) + buf.len);
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
|
||||
fn alloc(
|
||||
allocator: *Allocator,
|
||||
n: usize,
|
||||
ptr_align: u29,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
|
||||
|
||||
const amt = n + ptr_align - 1 + @sizeOf(usize);
|
||||
@ -452,7 +491,14 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
return buf;
|
||||
}
|
||||
|
||||
fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) error{OutOfMemory}!usize {
|
||||
fn resize(
|
||||
allocator: *Allocator,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_size: usize,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
|
||||
if (new_size == 0) {
|
||||
os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*));
|
||||
@ -524,7 +570,7 @@ pub const FixedBufferAllocator = struct {
|
||||
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 {
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
|
||||
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
|
||||
const aligned_addr = mem.alignForward(@ptrToInt(self.buffer.ptr) + self.end_index, ptr_align);
|
||||
const adjusted_index = aligned_addr - @ptrToInt(self.buffer.ptr);
|
||||
@ -538,7 +584,14 @@ pub const FixedBufferAllocator = struct {
|
||||
return result;
|
||||
}
|
||||
|
||||
fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) Allocator.Error!usize {
|
||||
fn resize(
|
||||
allocator: *Allocator,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_size: usize,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) Allocator.Error!usize {
|
||||
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
|
||||
assert(self.ownsSlice(buf)); // sanity check
|
||||
|
||||
@ -588,7 +641,7 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
|
||||
};
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 {
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
|
||||
const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
|
||||
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
|
||||
while (true) {
|
||||
@ -636,18 +689,31 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
|
||||
return &self.allocator;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![*]u8 {
|
||||
fn alloc(
|
||||
allocator: *Allocator,
|
||||
len: usize,
|
||||
ptr_align: u29,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}![*]u8 {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align) catch
|
||||
return fallback_allocator.alloc(len, ptr_align);
|
||||
}
|
||||
|
||||
fn resize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!void {
|
||||
fn resize(
|
||||
self: *Allocator,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_len: usize,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}!void {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
|
||||
try self.fixed_buffer_allocator.callResizeFn(buf, new_len);
|
||||
try self.fixed_buffer_allocator.resize(buf, new_len);
|
||||
} else {
|
||||
try self.fallback_allocator.callResizeFn(buf, new_len);
|
||||
try self.fallback_allocator.resize(buf, new_len);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -932,7 +998,7 @@ pub fn testAllocatorAlignedShrink(base_allocator: *mem.Allocator) mem.Allocator.
|
||||
slice[60] = 0x34;
|
||||
|
||||
// realloc to a smaller size but with a larger alignment
|
||||
slice = try allocator.alignedRealloc(slice, mem.page_size * 32, alloc_size / 2);
|
||||
slice = try allocator.reallocAdvanced(slice, mem.page_size * 32, alloc_size / 2, .exact);
|
||||
testing.expect(slice[0] == 0x12);
|
||||
testing.expect(slice[60] == 0x34);
|
||||
}
|
||||
|
||||
@ -49,7 +49,7 @@ pub const ArenaAllocator = struct {
|
||||
const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
|
||||
const big_enough_len = prev_len + actual_min_size;
|
||||
const len = big_enough_len + big_enough_len / 2;
|
||||
const buf = try self.child_allocator.callAllocFn(len, @alignOf(BufNode), 1);
|
||||
const buf = try self.child_allocator.allocFn(self.child_allocator, len, @alignOf(BufNode), 1, @returnAddress());
|
||||
const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr));
|
||||
buf_node.* = BufNode{
|
||||
.data = buf,
|
||||
@ -60,7 +60,7 @@ pub const ArenaAllocator = struct {
|
||||
return buf_node;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 {
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
|
||||
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
|
||||
|
||||
var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + ptr_align);
|
||||
|
||||
921
lib/std/heap/general_purpose_allocator.zig
Normal file
921
lib/std/heap/general_purpose_allocator.zig
Normal file
@ -0,0 +1,921 @@
|
||||
//! # General Purpose Allocator
|
||||
//!
|
||||
//! ## Design Priorities
|
||||
//!
|
||||
//! ### `OptimizationMode.debug` and `OptimizationMode.release_safe`:
|
||||
//!
|
||||
//! * Detect double free, and print stack trace of:
|
||||
//! - Where it was first allocated
|
||||
//! - Where it was freed the first time
|
||||
//! - Where it was freed the second time
|
||||
//!
|
||||
//! * Detect leaks and print stack trace of:
|
||||
//! - Where it was allocated
|
||||
//!
|
||||
//! * When a page of memory is no longer needed, give it back to resident memory
|
||||
//! as soon as possible, so that it causes page faults when used.
|
||||
//!
|
||||
//! * Do not re-use memory slots, so that memory safety is upheld. For small
|
||||
//! allocations, this is handled here; for larger ones it is handled in the
|
||||
//! backing allocator (by default `std.heap.page_allocator`).
|
||||
//!
|
||||
//! * Make pointer math errors unlikely to harm memory from
|
||||
//! unrelated allocations.
|
||||
//!
|
||||
//! * It's OK for these mechanisms to cost some extra overhead bytes.
|
||||
//!
|
||||
//! * It's OK for performance cost for these mechanisms.
|
||||
//!
|
||||
//! * Rogue memory writes should not harm the allocator's state.
|
||||
//!
|
||||
//! * Cross platform. Operates based on a backing allocator which makes it work
|
||||
//! everywhere, even freestanding.
|
||||
//!
|
||||
//! * Compile-time configuration.
|
||||
//!
|
||||
//! ### `OptimizationMode.release_fast` (note: not much work has gone into this use case yet):
|
||||
//!
|
||||
//! * Low fragmentation is primary concern
|
||||
//! * Performance of worst-case latency is secondary concern
|
||||
//! * Performance of average-case latency is next
|
||||
//! * Finally, having freed memory unmapped, and pointer math errors unlikely to
|
||||
//! harm memory from unrelated allocations are nice-to-haves.
|
||||
//!
|
||||
//! ### `OptimizationMode.release_small` (note: not much work has gone into this use case yet):
|
||||
//!
|
||||
//! * Small binary code size of the executable is the primary concern.
|
||||
//! * Next, defer to the `.release_fast` priority list.
|
||||
//!
|
||||
//! ## Basic Design:
|
||||
//!
|
||||
//! Small allocations are divided into buckets:
|
||||
//!
|
||||
//! ```
|
||||
//! index obj_size
|
||||
//! 0 1
|
||||
//! 1 2
|
||||
//! 2 4
|
||||
//! 3 8
|
||||
//! 4 16
|
||||
//! 5 32
|
||||
//! 6 64
|
||||
//! 7 128
|
||||
//! 8 256
|
||||
//! 9 512
|
||||
//! 10 1024
|
||||
//! 11 2048
|
||||
//! ```
|
||||
//!
|
||||
//! The main allocator state has an array of all the "current" buckets for each
|
||||
//! size class. Each slot in the array can be null, meaning the bucket for that
|
||||
//! size class is not allocated. When the first object is allocated for a given
|
||||
//! size class, it allocates 1 page of memory from the OS. This page is
|
||||
//! divided into "slots" - one per allocated object. Along with the page of memory
|
||||
//! for object slots, as many pages as necessary are allocated to store the
|
||||
//! BucketHeader, followed by "used bits", and two stack traces for each slot
|
||||
//! (allocation trace and free trace).
|
||||
//!
|
||||
//! The "used bits" are 1 bit per slot representing whether the slot is used.
|
||||
//! Allocations use the data to iterate to find a free slot. Frees assert that the
|
||||
//! corresponding bit is 1 and set it to 0.
|
||||
//!
|
||||
//! Buckets have prev and next pointers. When there is only one bucket for a given
|
||||
//! size class, both prev and next point to itself. When all slots of a bucket are
|
||||
//! used, a new bucket is allocated, and enters the doubly linked list. The main
|
||||
//! allocator state tracks the "current" bucket for each size class. Leak detection
|
||||
//! currently only checks the current bucket.
|
||||
//!
|
||||
//! Resizing detects if the size class is unchanged or smaller, in which case the same
|
||||
//! pointer is returned unmodified. If a larger size class is required,
|
||||
//! `error.OutOfMemory` is returned.
|
||||
//!
|
||||
//! Large objects are allocated directly using the backing allocator and their metadata is stored
|
||||
//! in a `std.HashMap` using the backing allocator.
|
||||
|
||||
const std = @import("std");
|
||||
const math = std.math;
|
||||
const assert = std.debug.assert;
|
||||
const mem = std.mem;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const page_size = std.mem.page_size;
|
||||
const StackTrace = std.builtin.StackTrace;
|
||||
|
||||
/// Integer type for pointing to slots in a small allocation
|
||||
const SlotIndex = std.meta.Int(false, math.log2(page_size) + 1);
|
||||
|
||||
const sys_can_stack_trace = switch (std.Target.current.cpu.arch) {
|
||||
// Observed to go into an infinite loop.
|
||||
// TODO: Make this work.
|
||||
.mips,
|
||||
.mipsel,
|
||||
=> false,
|
||||
|
||||
// `@returnAddress()` in LLVM 10 gives
|
||||
// "Non-Emscripten WebAssembly hasn't implemented __builtin_return_address".
|
||||
.wasm32,
|
||||
.wasm64,
|
||||
=> std.Target.current.os.tag == .emscripten,
|
||||
|
||||
else => true,
|
||||
};
|
||||
const default_sys_stack_trace_frames: usize = if (sys_can_stack_trace) 4 else 0;
|
||||
const default_stack_trace_frames: usize = switch (std.builtin.mode) {
|
||||
.Debug => default_sys_stack_trace_frames,
|
||||
else => 0,
|
||||
};
|
||||
|
||||
pub const Config = struct {
|
||||
/// Number of stack frames to capture.
|
||||
stack_trace_frames: usize = default_stack_trace_frames,
|
||||
|
||||
/// If true, the allocator will have two fields:
|
||||
/// * `total_requested_bytes` which tracks the total allocated bytes of memory requested.
|
||||
/// * `requested_memory_limit` which causes allocations to return `error.OutOfMemory`
|
||||
/// when the `total_requested_bytes` exceeds this limit.
|
||||
/// If false, these fields will be `void`.
|
||||
enable_memory_limit: bool = false,
|
||||
|
||||
/// Whether to enable safety checks.
|
||||
safety: bool = std.debug.runtime_safety,
|
||||
|
||||
/// Whether the allocator may be used simultaneously from multiple threads.
|
||||
thread_safe: bool = !std.builtin.single_threaded,
|
||||
};
|
||||
|
||||
pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
return struct {
|
||||
allocator: Allocator = Allocator{
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
backing_allocator: *Allocator = std.heap.page_allocator,
|
||||
buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count,
|
||||
large_allocations: LargeAllocTable = .{},
|
||||
|
||||
total_requested_bytes: @TypeOf(total_requested_bytes_init) = total_requested_bytes_init,
|
||||
requested_memory_limit: @TypeOf(requested_memory_limit_init) = requested_memory_limit_init,
|
||||
|
||||
mutex: @TypeOf(mutex_init) = mutex_init,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
const total_requested_bytes_init = if (config.enable_memory_limit) @as(usize, 0) else {};
|
||||
const requested_memory_limit_init = if (config.enable_memory_limit) @as(usize, math.maxInt(usize)) else {};
|
||||
|
||||
const mutex_init = if (config.thread_safe) std.Mutex{} else std.mutex.Dummy{};
|
||||
|
||||
const stack_n = config.stack_trace_frames;
|
||||
const one_trace_size = @sizeOf(usize) * stack_n;
|
||||
const traces_per_slot = 2;
|
||||
|
||||
pub const Error = mem.Allocator.Error;
|
||||
|
||||
const small_bucket_count = math.log2(page_size);
|
||||
const largest_bucket_object_size = 1 << (small_bucket_count - 1);
|
||||
|
||||
const LargeAlloc = struct {
|
||||
bytes: []u8,
|
||||
stack_addresses: [stack_n]usize,
|
||||
|
||||
fn dumpStackTrace(self: *LargeAlloc) void {
|
||||
var len: usize = 0;
|
||||
while (len < stack_n and self.stack_addresses[len] != 0) {
|
||||
len += 1;
|
||||
}
|
||||
const stack_trace = StackTrace{
|
||||
.instruction_addresses = &self.stack_addresses,
|
||||
.index = len,
|
||||
};
|
||||
std.debug.dumpStackTrace(stack_trace);
|
||||
}
|
||||
};
|
||||
const LargeAllocTable = std.AutoHashMapUnmanaged(usize, LargeAlloc);
|
||||
|
||||
// Bucket: In memory, in order:
|
||||
// * BucketHeader
|
||||
// * bucket_used_bits: [N]u8, // 1 bit for every slot; 1 byte for every 8 slots
|
||||
// * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation
|
||||
|
||||
const BucketHeader = struct {
|
||||
prev: *BucketHeader,
|
||||
next: *BucketHeader,
|
||||
page: [*]align(page_size) u8,
|
||||
alloc_cursor: SlotIndex,
|
||||
used_count: SlotIndex,
|
||||
|
||||
fn usedBits(bucket: *BucketHeader, index: usize) *u8 {
|
||||
return @intToPtr(*u8, @ptrToInt(bucket) + @sizeOf(BucketHeader) + index);
|
||||
}
|
||||
|
||||
fn stackTracePtr(
|
||||
bucket: *BucketHeader,
|
||||
size_class: usize,
|
||||
slot_index: SlotIndex,
|
||||
trace_kind: TraceKind,
|
||||
) *[stack_n]usize {
|
||||
const start_ptr = @ptrCast([*]u8, bucket) + bucketStackFramesStart(size_class);
|
||||
const addr = start_ptr + one_trace_size * traces_per_slot * slot_index +
|
||||
@enumToInt(trace_kind) * @as(usize, one_trace_size);
|
||||
return @ptrCast(*[stack_n]usize, @alignCast(@alignOf(usize), addr));
|
||||
}
|
||||
|
||||
fn captureStackTrace(
|
||||
bucket: *BucketHeader,
|
||||
ret_addr: usize,
|
||||
size_class: usize,
|
||||
slot_index: SlotIndex,
|
||||
trace_kind: TraceKind,
|
||||
) void {
|
||||
// Initialize them to 0. When determining the count we must look
|
||||
// for non zero addresses.
|
||||
const stack_addresses = bucket.stackTracePtr(size_class, slot_index, trace_kind);
|
||||
collectStackTrace(ret_addr, stack_addresses);
|
||||
}
|
||||
};
|
||||
|
||||
fn bucketStackTrace(
|
||||
bucket: *BucketHeader,
|
||||
size_class: usize,
|
||||
slot_index: SlotIndex,
|
||||
trace_kind: TraceKind,
|
||||
) StackTrace {
|
||||
const stack_addresses = bucket.stackTracePtr(size_class, slot_index, trace_kind);
|
||||
var len: usize = 0;
|
||||
while (len < stack_n and stack_addresses[len] != 0) {
|
||||
len += 1;
|
||||
}
|
||||
return StackTrace{
|
||||
.instruction_addresses = stack_addresses,
|
||||
.index = len,
|
||||
};
|
||||
}
|
||||
|
||||
fn bucketStackFramesStart(size_class: usize) usize {
|
||||
return mem.alignForward(
|
||||
@sizeOf(BucketHeader) + usedBitsCount(size_class),
|
||||
@alignOf(usize),
|
||||
);
|
||||
}
|
||||
|
||||
fn bucketSize(size_class: usize) usize {
|
||||
const slot_count = @divExact(page_size, size_class);
|
||||
return bucketStackFramesStart(size_class) + one_trace_size * traces_per_slot * slot_count;
|
||||
}
|
||||
|
||||
fn usedBitsCount(size_class: usize) usize {
|
||||
const slot_count = @divExact(page_size, size_class);
|
||||
if (slot_count < 8) return 1;
|
||||
return @divExact(slot_count, 8);
|
||||
}
|
||||
|
||||
fn detectLeaksInBucket(
|
||||
bucket: *BucketHeader,
|
||||
size_class: usize,
|
||||
used_bits_count: usize,
|
||||
) bool {
|
||||
var leaks = false;
|
||||
var used_bits_byte: usize = 0;
|
||||
while (used_bits_byte < used_bits_count) : (used_bits_byte += 1) {
|
||||
const used_byte = bucket.usedBits(used_bits_byte).*;
|
||||
if (used_byte != 0) {
|
||||
var bit_index: u3 = 0;
|
||||
while (true) : (bit_index += 1) {
|
||||
const is_used = @truncate(u1, used_byte >> bit_index) != 0;
|
||||
if (is_used) {
|
||||
std.debug.print("\nMemory leak detected:\n", .{});
|
||||
const slot_index = @intCast(SlotIndex, used_bits_byte * 8 + bit_index);
|
||||
const stack_trace = bucketStackTrace(
|
||||
bucket,
|
||||
size_class,
|
||||
slot_index,
|
||||
.alloc,
|
||||
);
|
||||
std.debug.dumpStackTrace(stack_trace);
|
||||
leaks = true;
|
||||
}
|
||||
if (bit_index == math.maxInt(u3))
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return leaks;
|
||||
}
|
||||
|
||||
/// Returns whether there were leaks.
|
||||
pub fn deinit(self: *Self) bool {
|
||||
var leaks = false;
|
||||
for (self.buckets) |optional_bucket, bucket_i| {
|
||||
const first_bucket = optional_bucket orelse continue;
|
||||
const size_class = @as(usize, 1) << @intCast(math.Log2Int(usize), bucket_i);
|
||||
const used_bits_count = usedBitsCount(size_class);
|
||||
var bucket = first_bucket;
|
||||
while (true) {
|
||||
leaks = detectLeaksInBucket(bucket, size_class, used_bits_count) or leaks;
|
||||
bucket = bucket.next;
|
||||
if (bucket == first_bucket)
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (self.large_allocations.items()) |*large_alloc| {
|
||||
std.debug.print("\nMemory leak detected (0x{x}):\n", .{@ptrToInt(large_alloc.value.bytes.ptr)});
|
||||
large_alloc.value.dumpStackTrace();
|
||||
leaks = true;
|
||||
}
|
||||
self.large_allocations.deinit(self.backing_allocator);
|
||||
self.* = undefined;
|
||||
return leaks;
|
||||
}
|
||||
|
||||
fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void {
|
||||
if (stack_n == 0) return;
|
||||
mem.set(usize, addresses, 0);
|
||||
var stack_trace = StackTrace{
|
||||
.instruction_addresses = addresses,
|
||||
.index = 0,
|
||||
};
|
||||
std.debug.captureStackTrace(first_trace_addr, &stack_trace);
|
||||
}
|
||||
|
||||
fn allocSlot(self: *Self, size_class: usize, trace_addr: usize) Error![*]u8 {
|
||||
const bucket_index = math.log2(size_class);
|
||||
const first_bucket = self.buckets[bucket_index] orelse try self.createBucket(
|
||||
size_class,
|
||||
bucket_index,
|
||||
);
|
||||
var bucket = first_bucket;
|
||||
const slot_count = @divExact(page_size, size_class);
|
||||
while (bucket.alloc_cursor == slot_count) {
|
||||
const prev_bucket = bucket;
|
||||
bucket = prev_bucket.next;
|
||||
if (bucket == first_bucket) {
|
||||
// make a new one
|
||||
bucket = try self.createBucket(size_class, bucket_index);
|
||||
bucket.prev = prev_bucket;
|
||||
bucket.next = prev_bucket.next;
|
||||
prev_bucket.next = bucket;
|
||||
bucket.next.prev = bucket;
|
||||
}
|
||||
}
|
||||
// change the allocator's current bucket to be this one
|
||||
self.buckets[bucket_index] = bucket;
|
||||
|
||||
const slot_index = bucket.alloc_cursor;
|
||||
bucket.alloc_cursor += 1;
|
||||
|
||||
var used_bits_byte = bucket.usedBits(slot_index / 8);
|
||||
const used_bit_index: u3 = @intCast(u3, slot_index % 8); // TODO cast should be unnecessary
|
||||
used_bits_byte.* |= (@as(u8, 1) << used_bit_index);
|
||||
bucket.used_count += 1;
|
||||
bucket.captureStackTrace(trace_addr, size_class, slot_index, .alloc);
|
||||
return bucket.page + slot_index * size_class;
|
||||
}
|
||||
|
||||
fn searchBucket(
|
||||
self: *Self,
|
||||
bucket_index: usize,
|
||||
addr: usize,
|
||||
) ?*BucketHeader {
|
||||
const first_bucket = self.buckets[bucket_index] orelse return null;
|
||||
var bucket = first_bucket;
|
||||
while (true) {
|
||||
const in_bucket_range = (addr >= @ptrToInt(bucket.page) and
|
||||
addr < @ptrToInt(bucket.page) + page_size);
|
||||
if (in_bucket_range) return bucket;
|
||||
bucket = bucket.prev;
|
||||
if (bucket == first_bucket) {
|
||||
return null;
|
||||
}
|
||||
self.buckets[bucket_index] = bucket;
|
||||
}
|
||||
}
|
||||
|
||||
fn freeSlot(
|
||||
self: *Self,
|
||||
bucket: *BucketHeader,
|
||||
bucket_index: usize,
|
||||
size_class: usize,
|
||||
slot_index: SlotIndex,
|
||||
used_byte: *u8,
|
||||
used_bit_index: u3,
|
||||
trace_addr: usize,
|
||||
) void {
|
||||
// Capture stack trace to be the "first free", in case a double free happens.
|
||||
bucket.captureStackTrace(trace_addr, size_class, slot_index, .free);
|
||||
|
||||
used_byte.* &= ~(@as(u8, 1) << used_bit_index);
|
||||
bucket.used_count -= 1;
|
||||
if (bucket.used_count == 0) {
|
||||
if (bucket.next == bucket) {
|
||||
// it's the only bucket and therefore the current one
|
||||
self.buckets[bucket_index] = null;
|
||||
} else {
|
||||
bucket.next.prev = bucket.prev;
|
||||
bucket.prev.next = bucket.next;
|
||||
self.buckets[bucket_index] = bucket.prev;
|
||||
}
|
||||
self.backing_allocator.free(bucket.page[0..page_size]);
|
||||
const bucket_size = bucketSize(size_class);
|
||||
const bucket_slice = @ptrCast([*]align(@alignOf(BucketHeader)) u8, bucket)[0..bucket_size];
|
||||
self.backing_allocator.free(bucket_slice);
|
||||
} else {
|
||||
// TODO Set the slot data to undefined.
|
||||
// Related: https://github.com/ziglang/zig/issues/4298
|
||||
}
|
||||
}
|
||||
|
||||
/// This function assumes the object is in the large object storage regardless
|
||||
/// of the parameters.
|
||||
fn resizeLarge(
|
||||
self: *Self,
|
||||
old_mem: []u8,
|
||||
old_align: u29,
|
||||
new_size: usize,
|
||||
len_align: u29,
|
||||
ret_addr: usize,
|
||||
) Error!usize {
|
||||
const entry = self.large_allocations.getEntry(@ptrToInt(old_mem.ptr)) orelse {
|
||||
if (config.safety) {
|
||||
@panic("Invalid free");
|
||||
} else {
|
||||
unreachable;
|
||||
}
|
||||
};
|
||||
|
||||
if (config.safety and old_mem.len != entry.value.bytes.len) {
|
||||
std.debug.print("\nAllocation size {} bytes does not match free size {}. Allocated here:\n", .{
|
||||
entry.value.bytes.len,
|
||||
old_mem.len,
|
||||
});
|
||||
entry.value.dumpStackTrace();
|
||||
|
||||
@panic("\nFree here:");
|
||||
}
|
||||
|
||||
const result_len = try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align, ret_addr);
|
||||
|
||||
if (result_len == 0) {
|
||||
self.large_allocations.removeAssertDiscard(@ptrToInt(old_mem.ptr));
|
||||
return 0;
|
||||
}
|
||||
|
||||
entry.value.bytes = old_mem.ptr[0..result_len];
|
||||
collectStackTrace(ret_addr, &entry.value.stack_addresses);
|
||||
return result_len;
|
||||
}
|
||||
|
||||
pub fn setRequestedMemoryLimit(self: *Self, limit: usize) void {
|
||||
self.requested_memory_limit = limit;
|
||||
}
|
||||
|
||||
fn resize(
|
||||
allocator: *Allocator,
|
||||
old_mem: []u8,
|
||||
old_align: u29,
|
||||
new_size: usize,
|
||||
len_align: u29,
|
||||
ret_addr: usize,
|
||||
) Error!usize {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
|
||||
const held = self.mutex.acquire();
|
||||
defer held.release();
|
||||
|
||||
const prev_req_bytes = self.total_requested_bytes;
|
||||
if (config.enable_memory_limit) {
|
||||
const new_req_bytes = prev_req_bytes + new_size - old_mem.len;
|
||||
if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
self.total_requested_bytes = new_req_bytes;
|
||||
}
|
||||
errdefer if (config.enable_memory_limit) {
|
||||
self.total_requested_bytes = prev_req_bytes;
|
||||
};
|
||||
|
||||
assert(old_mem.len != 0);
|
||||
|
||||
const aligned_size = math.max(old_mem.len, old_align);
|
||||
if (aligned_size > largest_bucket_object_size) {
|
||||
return self.resizeLarge(old_mem, old_align, new_size, len_align, ret_addr);
|
||||
}
|
||||
const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size);
|
||||
|
||||
var bucket_index = math.log2(size_class_hint);
|
||||
var size_class: usize = size_class_hint;
|
||||
const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) {
|
||||
if (self.searchBucket(bucket_index, @ptrToInt(old_mem.ptr))) |bucket| {
|
||||
break bucket;
|
||||
}
|
||||
size_class *= 2;
|
||||
} else {
|
||||
return self.resizeLarge(old_mem, old_align, new_size, len_align, ret_addr);
|
||||
};
|
||||
const byte_offset = @ptrToInt(old_mem.ptr) - @ptrToInt(bucket.page);
|
||||
const slot_index = @intCast(SlotIndex, byte_offset / size_class);
|
||||
const used_byte_index = slot_index / 8;
|
||||
const used_bit_index = @intCast(u3, slot_index % 8);
|
||||
const used_byte = bucket.usedBits(used_byte_index);
|
||||
const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0;
|
||||
if (!is_used) {
|
||||
if (config.safety) {
|
||||
// print allocation stack trace
|
||||
std.debug.print("\nDouble free detected, allocated here:\n", .{});
|
||||
const alloc_stack_trace = bucketStackTrace(bucket, size_class, slot_index, .alloc);
|
||||
std.debug.dumpStackTrace(alloc_stack_trace);
|
||||
std.debug.print("\nFirst free here:\n", .{});
|
||||
const free_stack_trace = bucketStackTrace(bucket, size_class, slot_index, .free);
|
||||
std.debug.dumpStackTrace(free_stack_trace);
|
||||
@panic("\nSecond free here:");
|
||||
} else {
|
||||
unreachable;
|
||||
}
|
||||
}
|
||||
if (new_size == 0) {
|
||||
self.freeSlot(bucket, bucket_index, size_class, slot_index, used_byte, used_bit_index, ret_addr);
|
||||
return @as(usize, 0);
|
||||
}
|
||||
const new_aligned_size = math.max(new_size, old_align);
|
||||
const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
|
||||
if (new_size_class <= size_class) {
|
||||
return new_size;
|
||||
}
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
|
||||
const held = self.mutex.acquire();
|
||||
defer held.release();
|
||||
|
||||
const prev_req_bytes = self.total_requested_bytes;
|
||||
if (config.enable_memory_limit) {
|
||||
const new_req_bytes = prev_req_bytes + len;
|
||||
if (new_req_bytes > self.requested_memory_limit) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
self.total_requested_bytes = new_req_bytes;
|
||||
}
|
||||
errdefer if (config.enable_memory_limit) {
|
||||
self.total_requested_bytes = prev_req_bytes;
|
||||
};
|
||||
|
||||
const new_aligned_size = math.max(len, ptr_align);
|
||||
if (new_aligned_size > largest_bucket_object_size) {
|
||||
try self.large_allocations.ensureCapacity(
|
||||
self.backing_allocator,
|
||||
self.large_allocations.entries.items.len + 1,
|
||||
);
|
||||
|
||||
const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr);
|
||||
|
||||
const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
|
||||
assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
|
||||
gop.entry.value.bytes = slice;
|
||||
collectStackTrace(ret_addr, &gop.entry.value.stack_addresses);
|
||||
|
||||
return slice;
|
||||
} else {
|
||||
const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
|
||||
const ptr = try self.allocSlot(new_size_class, ret_addr);
|
||||
return ptr[0..len];
|
||||
}
|
||||
}
|
||||
|
||||
fn createBucket(self: *Self, size_class: usize, bucket_index: usize) Error!*BucketHeader {
|
||||
const page = try self.backing_allocator.allocAdvanced(u8, page_size, page_size, .exact);
|
||||
errdefer self.backing_allocator.free(page);
|
||||
|
||||
const bucket_size = bucketSize(size_class);
|
||||
const bucket_bytes = try self.backing_allocator.allocAdvanced(u8, @alignOf(BucketHeader), bucket_size, .exact);
|
||||
const ptr = @ptrCast(*BucketHeader, bucket_bytes.ptr);
|
||||
ptr.* = BucketHeader{
|
||||
.prev = ptr,
|
||||
.next = ptr,
|
||||
.page = page.ptr,
|
||||
.alloc_cursor = 0,
|
||||
.used_count = 0,
|
||||
};
|
||||
self.buckets[bucket_index] = ptr;
|
||||
// Set the used bits to all zeroes
|
||||
@memset(@as(*[1]u8, ptr.usedBits(0)), 0, usedBitsCount(size_class));
|
||||
return ptr;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const TraceKind = enum {
|
||||
alloc,
|
||||
free,
|
||||
};
|
||||
|
||||
const test_config = Config{};
|
||||
|
||||
test "small allocations - free in same order" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit());
|
||||
const allocator = &gpa.allocator;
|
||||
|
||||
var list = std.ArrayList(*u64).init(std.testing.allocator);
|
||||
defer list.deinit();
|
||||
|
||||
var i: usize = 0;
|
||||
while (i < 513) : (i += 1) {
|
||||
const ptr = try allocator.create(u64);
|
||||
try list.append(ptr);
|
||||
}
|
||||
|
||||
for (list.items) |ptr| {
|
||||
allocator.destroy(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
test "small allocations - free in reverse order" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit());
|
||||
const allocator = &gpa.allocator;
|
||||
|
||||
var list = std.ArrayList(*u64).init(std.testing.allocator);
|
||||
defer list.deinit();
|
||||
|
||||
var i: usize = 0;
|
||||
while (i < 513) : (i += 1) {
|
||||
const ptr = try allocator.create(u64);
|
||||
try list.append(ptr);
|
||||
}
|
||||
|
||||
while (list.popOrNull()) |ptr| {
|
||||
allocator.destroy(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
test "large allocations" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit());
|
||||
const allocator = &gpa.allocator;
|
||||
|
||||
const ptr1 = try allocator.alloc(u64, 42768);
|
||||
const ptr2 = try allocator.alloc(u64, 52768);
|
||||
allocator.free(ptr1);
|
||||
const ptr3 = try allocator.alloc(u64, 62768);
|
||||
allocator.free(ptr3);
|
||||
allocator.free(ptr2);
|
||||
}
|
||||
|
||||
test "realloc" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit());
|
||||
const allocator = &gpa.allocator;
|
||||
|
||||
var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1);
|
||||
defer allocator.free(slice);
|
||||
slice[0] = 0x12;
|
||||
|
||||
// This reallocation should keep its pointer address.
|
||||
const old_slice = slice;
|
||||
slice = try allocator.realloc(slice, 2);
|
||||
std.testing.expect(old_slice.ptr == slice.ptr);
|
||||
std.testing.expect(slice[0] == 0x12);
|
||||
slice[1] = 0x34;
|
||||
|
||||
// This requires upgrading to a larger size class
|
||||
slice = try allocator.realloc(slice, 17);
|
||||
std.testing.expect(slice[0] == 0x12);
|
||||
std.testing.expect(slice[1] == 0x34);
|
||||
}
|
||||
|
||||
test "shrink" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit());
|
||||
const allocator = &gpa.allocator;
|
||||
|
||||
var slice = try allocator.alloc(u8, 20);
|
||||
defer allocator.free(slice);
|
||||
|
||||
mem.set(u8, slice, 0x11);
|
||||
|
||||
slice = allocator.shrink(slice, 17);
|
||||
|
||||
for (slice) |b| {
|
||||
std.testing.expect(b == 0x11);
|
||||
}
|
||||
|
||||
slice = allocator.shrink(slice, 16);
|
||||
|
||||
for (slice) |b| {
|
||||
std.testing.expect(b == 0x11);
|
||||
}
|
||||
}
|
||||
|
||||
test "large object - grow" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit());
|
||||
const allocator = &gpa.allocator;
|
||||
|
||||
var slice1 = try allocator.alloc(u8, page_size * 2 - 20);
|
||||
defer allocator.free(slice1);
|
||||
|
||||
const old = slice1;
|
||||
slice1 = try allocator.realloc(slice1, page_size * 2 - 10);
|
||||
std.testing.expect(slice1.ptr == old.ptr);
|
||||
|
||||
slice1 = try allocator.realloc(slice1, page_size * 2);
|
||||
std.testing.expect(slice1.ptr == old.ptr);
|
||||
|
||||
slice1 = try allocator.realloc(slice1, page_size * 2 + 1);
|
||||
}
|
||||
|
||||
test "realloc small object to large object" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit());
|
||||
const allocator = &gpa.allocator;
|
||||
|
||||
var slice = try allocator.alloc(u8, 70);
|
||||
defer allocator.free(slice);
|
||||
slice[0] = 0x12;
|
||||
slice[60] = 0x34;
|
||||
|
||||
// This requires upgrading to a large object
|
||||
const large_object_size = page_size * 2 + 50;
|
||||
slice = try allocator.realloc(slice, large_object_size);
|
||||
std.testing.expect(slice[0] == 0x12);
|
||||
std.testing.expect(slice[60] == 0x34);
|
||||
}
|
||||
|
||||
test "shrink large object to large object" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit());
|
||||
const allocator = &gpa.allocator;
|
||||
|
||||
var slice = try allocator.alloc(u8, page_size * 2 + 50);
|
||||
defer allocator.free(slice);
|
||||
slice[0] = 0x12;
|
||||
slice[60] = 0x34;
|
||||
|
||||
slice = try allocator.resize(slice, page_size * 2 + 1);
|
||||
std.testing.expect(slice[0] == 0x12);
|
||||
std.testing.expect(slice[60] == 0x34);
|
||||
|
||||
slice = allocator.shrink(slice, page_size * 2 + 1);
|
||||
std.testing.expect(slice[0] == 0x12);
|
||||
std.testing.expect(slice[60] == 0x34);
|
||||
|
||||
slice = try allocator.realloc(slice, page_size * 2);
|
||||
std.testing.expect(slice[0] == 0x12);
|
||||
std.testing.expect(slice[60] == 0x34);
|
||||
}
|
||||
|
||||
test "shrink large object to large object with larger alignment" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit());
|
||||
const allocator = &gpa.allocator;
|
||||
|
||||
var debug_buffer: [1000]u8 = undefined;
|
||||
const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
|
||||
|
||||
const alloc_size = page_size * 2 + 50;
|
||||
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
|
||||
defer allocator.free(slice);
|
||||
|
||||
const big_alignment: usize = switch (std.Target.current.os.tag) {
|
||||
.windows => page_size * 32, // Windows aligns to 64K.
|
||||
else => page_size * 2,
|
||||
};
|
||||
// This loop allocates until we find a page that is not aligned to the big
|
||||
// alignment. Then we shrink the allocation after the loop, but increase the
|
||||
// alignment to the higher one, that we know will force it to realloc.
|
||||
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
|
||||
while (mem.isAligned(@ptrToInt(slice.ptr), big_alignment)) {
|
||||
try stuff_to_free.append(slice);
|
||||
slice = try allocator.alignedAlloc(u8, 16, alloc_size);
|
||||
}
|
||||
while (stuff_to_free.popOrNull()) |item| {
|
||||
allocator.free(item);
|
||||
}
|
||||
slice[0] = 0x12;
|
||||
slice[60] = 0x34;
|
||||
|
||||
slice = try allocator.reallocAdvanced(slice, big_alignment, alloc_size / 2, .exact);
|
||||
std.testing.expect(slice[0] == 0x12);
|
||||
std.testing.expect(slice[60] == 0x34);
|
||||
}
|
||||
|
||||
test "realloc large object to small object" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit());
|
||||
const allocator = &gpa.allocator;
|
||||
|
||||
var slice = try allocator.alloc(u8, page_size * 2 + 50);
|
||||
defer allocator.free(slice);
|
||||
slice[0] = 0x12;
|
||||
slice[16] = 0x34;
|
||||
|
||||
slice = try allocator.realloc(slice, 19);
|
||||
std.testing.expect(slice[0] == 0x12);
|
||||
std.testing.expect(slice[16] == 0x34);
|
||||
}
|
||||
|
||||
test "non-page-allocator backing allocator" {
|
||||
var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = std.testing.allocator };
|
||||
defer std.testing.expect(!gpa.deinit());
|
||||
const allocator = &gpa.allocator;
|
||||
|
||||
const ptr = try allocator.create(i32);
|
||||
defer allocator.destroy(ptr);
|
||||
}
|
||||
|
||||
test "realloc large object to larger alignment" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit());
|
||||
const allocator = &gpa.allocator;
|
||||
|
||||
var debug_buffer: [1000]u8 = undefined;
|
||||
const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
|
||||
|
||||
var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
|
||||
defer allocator.free(slice);
|
||||
|
||||
const big_alignment: usize = switch (std.Target.current.os.tag) {
|
||||
.windows => page_size * 32, // Windows aligns to 64K.
|
||||
else => page_size * 2,
|
||||
};
|
||||
// This loop allocates until we find a page that is not aligned to the big alignment.
|
||||
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
|
||||
while (mem.isAligned(@ptrToInt(slice.ptr), big_alignment)) {
|
||||
try stuff_to_free.append(slice);
|
||||
slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
|
||||
}
|
||||
while (stuff_to_free.popOrNull()) |item| {
|
||||
allocator.free(item);
|
||||
}
|
||||
slice[0] = 0x12;
|
||||
slice[16] = 0x34;
|
||||
|
||||
slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 100, .exact);
|
||||
std.testing.expect(slice[0] == 0x12);
|
||||
std.testing.expect(slice[16] == 0x34);
|
||||
|
||||
slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 25, .exact);
|
||||
std.testing.expect(slice[0] == 0x12);
|
||||
std.testing.expect(slice[16] == 0x34);
|
||||
|
||||
slice = try allocator.reallocAdvanced(slice, big_alignment, page_size * 2 + 100, .exact);
|
||||
std.testing.expect(slice[0] == 0x12);
|
||||
std.testing.expect(slice[16] == 0x34);
|
||||
}
|
||||
|
||||
test "large object shrinks to small but allocation fails during shrink" {
|
||||
var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, 3);
|
||||
var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = &failing_allocator.allocator };
|
||||
defer std.testing.expect(!gpa.deinit());
|
||||
const allocator = &gpa.allocator;
|
||||
|
||||
var slice = try allocator.alloc(u8, page_size * 2 + 50);
|
||||
defer allocator.free(slice);
|
||||
slice[0] = 0x12;
|
||||
slice[3] = 0x34;
|
||||
|
||||
// Next allocation will fail in the backing allocator of the GeneralPurposeAllocator
|
||||
|
||||
slice = allocator.shrink(slice, 4);
|
||||
std.testing.expect(slice[0] == 0x12);
|
||||
std.testing.expect(slice[3] == 0x34);
|
||||
}
|
||||
|
||||
test "objects of size 1024 and 2048" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit());
|
||||
const allocator = &gpa.allocator;
|
||||
|
||||
const slice = try allocator.alloc(u8, 1025);
|
||||
const slice2 = try allocator.alloc(u8, 3000);
|
||||
|
||||
allocator.free(slice);
|
||||
allocator.free(slice2);
|
||||
}
|
||||
|
||||
test "setting a memory cap" {
|
||||
var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
|
||||
defer std.testing.expect(!gpa.deinit());
|
||||
const allocator = &gpa.allocator;
|
||||
|
||||
gpa.setRequestedMemoryLimit(1010);
|
||||
|
||||
const small = try allocator.create(i32);
|
||||
std.testing.expect(gpa.total_requested_bytes == 4);
|
||||
|
||||
const big = try allocator.alloc(u8, 1000);
|
||||
std.testing.expect(gpa.total_requested_bytes == 1004);
|
||||
|
||||
std.testing.expectError(error.OutOfMemory, allocator.create(u64));
|
||||
|
||||
allocator.destroy(small);
|
||||
std.testing.expect(gpa.total_requested_bytes == 1000);
|
||||
|
||||
allocator.free(big);
|
||||
std.testing.expect(gpa.total_requested_bytes == 0);
|
||||
|
||||
const exact = try allocator.alloc(u8, 1010);
|
||||
std.testing.expect(gpa.total_requested_bytes == 1010);
|
||||
allocator.free(exact);
|
||||
}
|
||||
@ -23,10 +23,16 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type {
|
||||
};
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
|
||||
fn alloc(
|
||||
allocator: *Allocator,
|
||||
len: usize,
|
||||
ptr_align: u29,
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
self.out_stream.print("alloc : {}", .{len}) catch {};
|
||||
const result = self.parent_allocator.callAllocFn(len, ptr_align, len_align);
|
||||
const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra);
|
||||
if (result) |buff| {
|
||||
self.out_stream.print(" success!\n", .{}) catch {};
|
||||
} else |err| {
|
||||
@ -35,7 +41,14 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type {
|
||||
return result;
|
||||
}
|
||||
|
||||
fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize {
|
||||
fn resize(
|
||||
allocator: *Allocator,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_len: usize,
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
if (new_len == 0) {
|
||||
self.out_stream.print("free : {}\n", .{buf.len}) catch {};
|
||||
@ -44,7 +57,7 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type {
|
||||
} else {
|
||||
self.out_stream.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
|
||||
}
|
||||
if (self.parent_allocator.callResizeFn(buf, new_len, len_align)) |resized_len| {
|
||||
if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ra)) |resized_len| {
|
||||
if (new_len > buf.len) {
|
||||
self.out_stream.print(" success!\n", .{}) catch {};
|
||||
}
|
||||
@ -74,9 +87,9 @@ test "LoggingAllocator" {
|
||||
const allocator = &loggingAllocator(&fixedBufferAllocator.allocator, fbs.outStream()).allocator;
|
||||
|
||||
var a = try allocator.alloc(u8, 10);
|
||||
a.len = allocator.shrinkBytes(a, 5, 0);
|
||||
a = allocator.shrink(a, 5);
|
||||
std.debug.assert(a.len == 5);
|
||||
std.testing.expectError(error.OutOfMemory, allocator.callResizeFn(a, 20, 0));
|
||||
std.testing.expectError(error.OutOfMemory, allocator.resize(a, 20));
|
||||
allocator.free(a);
|
||||
|
||||
std.testing.expectEqualSlices(u8,
|
||||
|
||||
@ -837,6 +837,10 @@ pub fn ceilPowerOfTwo(comptime T: type, value: T) (error{Overflow}!T) {
|
||||
return @intCast(T, x);
|
||||
}
|
||||
|
||||
pub fn ceilPowerOfTwoAssert(comptime T: type, value: T) T {
|
||||
return ceilPowerOfTwo(T, value) catch unreachable;
|
||||
}
|
||||
|
||||
test "math.ceilPowerOfTwoPromote" {
|
||||
testCeilPowerOfTwoPromote();
|
||||
comptime testCeilPowerOfTwoPromote();
|
||||
|
||||
407
lib/std/mem.zig
407
lib/std/mem.zig
@ -8,391 +8,13 @@ const meta = std.meta;
|
||||
const trait = meta.trait;
|
||||
const testing = std.testing;
|
||||
|
||||
// https://github.com/ziglang/zig/issues/2564
|
||||
/// https://github.com/ziglang/zig/issues/2564
|
||||
pub const page_size = switch (builtin.arch) {
|
||||
.wasm32, .wasm64 => 64 * 1024,
|
||||
else => 4 * 1024,
|
||||
};
|
||||
|
||||
pub const Allocator = struct {
|
||||
pub const Error = error{OutOfMemory};
|
||||
|
||||
/// Attempt to allocate at least `len` bytes aligned to `ptr_align`.
|
||||
///
|
||||
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
|
||||
/// otherwise, the length must be aligned to `len_align`.
|
||||
///
|
||||
/// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
|
||||
allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Error![]u8,
|
||||
|
||||
/// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent
|
||||
/// length returned by `allocFn` or `resizeFn`.
|
||||
///
|
||||
/// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no
|
||||
/// longer be passed to `resizeFn`.
|
||||
///
|
||||
/// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`.
|
||||
/// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be
|
||||
/// unmodified and error.OutOfMemory MUST be returned.
|
||||
///
|
||||
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
|
||||
/// otherwise, the length must be aligned to `len_align`.
|
||||
///
|
||||
/// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
|
||||
resizeFn: fn (self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize,
|
||||
|
||||
pub fn callAllocFn(self: *Allocator, new_len: usize, alignment: u29, len_align: u29) Error![]u8 {
|
||||
return self.allocFn(self, new_len, alignment, len_align);
|
||||
}
|
||||
|
||||
pub fn callResizeFn(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize {
|
||||
return self.resizeFn(self, buf, new_len, len_align);
|
||||
}
|
||||
|
||||
/// Set to resizeFn if in-place resize is not supported.
|
||||
pub fn noResize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize {
|
||||
if (new_len > buf.len)
|
||||
return error.OutOfMemory;
|
||||
return new_len;
|
||||
}
|
||||
|
||||
/// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning
|
||||
/// error.OutOfMemory should be impossible.
|
||||
pub fn shrinkBytes(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) usize {
|
||||
assert(new_len <= buf.len);
|
||||
return self.callResizeFn(buf, new_len, len_align) catch unreachable;
|
||||
}
|
||||
|
||||
/// Realloc is used to modify the size or alignment of an existing allocation,
|
||||
/// as well as to provide the allocator with an opportunity to move an allocation
|
||||
/// to a better location.
|
||||
/// When the size/alignment is greater than the previous allocation, this function
|
||||
/// returns `error.OutOfMemory` when the requested new allocation could not be granted.
|
||||
/// When the size/alignment is less than or equal to the previous allocation,
|
||||
/// this function returns `error.OutOfMemory` when the allocator decides the client
|
||||
/// would be better off keeping the extra alignment/size. Clients will call
|
||||
/// `callResizeFn` when they require the allocator to track a new alignment/size,
|
||||
/// and so this function should only return success when the allocator considers
|
||||
/// the reallocation desirable from the allocator's perspective.
|
||||
/// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle
|
||||
/// reallocation failure, even when `new_n` <= `old_mem.len`. A `FixedBufferAllocator`
|
||||
/// would always return `error.OutOfMemory` for `reallocFn` when the size/alignment
|
||||
/// is less than or equal to the old allocation, because it cannot reclaim the memory,
|
||||
/// and thus the `std.ArrayList` would be better off retaining its capacity.
|
||||
/// When `reallocFn` returns,
|
||||
/// `return_value[0..min(old_mem.len, new_byte_count)]` must be the same
|
||||
/// as `old_mem` was when `reallocFn` is called. The bytes of
|
||||
/// `return_value[old_mem.len..]` have undefined values.
|
||||
/// The returned slice must have its pointer aligned at least to `new_alignment` bytes.
|
||||
fn reallocBytes(
|
||||
self: *Allocator,
|
||||
/// Guaranteed to be the same as what was returned from most recent call to
|
||||
/// `allocFn` or `resizeFn`.
|
||||
/// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
|
||||
/// is guaranteed to be >= 1.
|
||||
old_mem: []u8,
|
||||
/// If `old_mem.len == 0` then this is `undefined`, otherwise:
|
||||
/// Guaranteed to be the same as what was passed to `allocFn`.
|
||||
/// Guaranteed to be >= 1.
|
||||
/// Guaranteed to be a power of 2.
|
||||
old_alignment: u29,
|
||||
/// If `new_byte_count` is 0 then this is a free and it is guaranteed that
|
||||
/// `old_mem.len != 0`.
|
||||
new_byte_count: usize,
|
||||
/// Guaranteed to be >= 1.
|
||||
/// Guaranteed to be a power of 2.
|
||||
/// Returned slice's pointer must have this alignment.
|
||||
new_alignment: u29,
|
||||
/// 0 indicates the length of the slice returned MUST match `new_byte_count` exactly
|
||||
/// non-zero means the length of the returned slice must be aligned by `len_align`
|
||||
/// `new_len` must be aligned by `len_align`
|
||||
len_align: u29,
|
||||
) Error![]u8 {
|
||||
if (old_mem.len == 0) {
|
||||
const new_mem = try self.callAllocFn(new_byte_count, new_alignment, len_align);
|
||||
@memset(new_mem.ptr, undefined, new_byte_count);
|
||||
return new_mem;
|
||||
}
|
||||
|
||||
if (isAligned(@ptrToInt(old_mem.ptr), new_alignment)) {
|
||||
if (new_byte_count <= old_mem.len) {
|
||||
const shrunk_len = self.shrinkBytes(old_mem, new_byte_count, len_align);
|
||||
return old_mem.ptr[0..shrunk_len];
|
||||
}
|
||||
if (self.callResizeFn(old_mem, new_byte_count, len_align)) |resized_len| {
|
||||
assert(resized_len >= new_byte_count);
|
||||
@memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
|
||||
return old_mem.ptr[0..resized_len];
|
||||
} else |_| {}
|
||||
}
|
||||
if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
return self.moveBytes(old_mem, new_byte_count, new_alignment, len_align);
|
||||
}
|
||||
|
||||
/// Move the given memory to a new location in the given allocator to accomodate a new
|
||||
/// size and alignment.
|
||||
fn moveBytes(self: *Allocator, old_mem: []u8, new_len: usize, new_alignment: u29, len_align: u29) Error![]u8 {
|
||||
assert(old_mem.len > 0);
|
||||
assert(new_len > 0);
|
||||
const new_mem = try self.callAllocFn(new_len, new_alignment, len_align);
|
||||
@memcpy(new_mem.ptr, old_mem.ptr, std.math.min(new_len, old_mem.len));
|
||||
// DISABLED TO AVOID BUGS IN TRANSLATE C
|
||||
// use './zig build test-translate-c' to reproduce, some of the symbols in the
|
||||
// generated C code will be a sequence of 0xaa (the undefined value), meaning
|
||||
// it is printing data that has been freed
|
||||
//@memset(old_mem.ptr, undefined, old_mem.len);
|
||||
_ = self.shrinkBytes(old_mem, 0, 0);
|
||||
return new_mem;
|
||||
}
|
||||
|
||||
/// Returns a pointer to undefined memory.
|
||||
/// Call `destroy` with the result to free the memory.
|
||||
pub fn create(self: *Allocator, comptime T: type) Error!*T {
|
||||
if (@sizeOf(T) == 0) return &(T{});
|
||||
const slice = try self.alloc(T, 1);
|
||||
return &slice[0];
|
||||
}
|
||||
|
||||
/// `ptr` should be the return value of `create`, or otherwise
|
||||
/// have the same address and alignment property.
|
||||
pub fn destroy(self: *Allocator, ptr: anytype) void {
|
||||
const T = @TypeOf(ptr).Child;
|
||||
if (@sizeOf(T) == 0) return;
|
||||
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
|
||||
_ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], 0, 0);
|
||||
}
|
||||
|
||||
/// Allocates an array of `n` items of type `T` and sets all the
|
||||
/// items to `undefined`. Depending on the Allocator
|
||||
/// implementation, it may be required to call `free` once the
|
||||
/// memory is no longer needed, to avoid a resource leak. If the
|
||||
/// `Allocator` implementation is unknown, then correct code will
|
||||
/// call `free` when done.
|
||||
///
|
||||
/// For allocating a single item, see `create`.
|
||||
pub fn alloc(self: *Allocator, comptime T: type, n: usize) Error![]T {
|
||||
return self.alignedAlloc(T, null, n);
|
||||
}
|
||||
|
||||
pub fn allocWithOptions(
|
||||
self: *Allocator,
|
||||
comptime Elem: type,
|
||||
n: usize,
|
||||
/// null means naturally aligned
|
||||
comptime optional_alignment: ?u29,
|
||||
comptime optional_sentinel: ?Elem,
|
||||
) Error!AllocWithOptionsPayload(Elem, optional_alignment, optional_sentinel) {
|
||||
if (optional_sentinel) |sentinel| {
|
||||
const ptr = try self.alignedAlloc(Elem, optional_alignment, n + 1);
|
||||
ptr[n] = sentinel;
|
||||
return ptr[0..n :sentinel];
|
||||
} else {
|
||||
return self.alignedAlloc(Elem, optional_alignment, n);
|
||||
}
|
||||
}
|
||||
|
||||
fn AllocWithOptionsPayload(comptime Elem: type, comptime alignment: ?u29, comptime sentinel: ?Elem) type {
|
||||
if (sentinel) |s| {
|
||||
return [:s]align(alignment orelse @alignOf(Elem)) Elem;
|
||||
} else {
|
||||
return []align(alignment orelse @alignOf(Elem)) Elem;
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates an array of `n + 1` items of type `T` and sets the first `n`
|
||||
/// items to `undefined` and the last item to `sentinel`. Depending on the
|
||||
/// Allocator implementation, it may be required to call `free` once the
|
||||
/// memory is no longer needed, to avoid a resource leak. If the
|
||||
/// `Allocator` implementation is unknown, then correct code will
|
||||
/// call `free` when done.
|
||||
///
|
||||
/// For allocating a single item, see `create`.
|
||||
///
|
||||
/// Deprecated; use `allocWithOptions`.
|
||||
pub fn allocSentinel(self: *Allocator, comptime Elem: type, n: usize, comptime sentinel: Elem) Error![:sentinel]Elem {
|
||||
return self.allocWithOptions(Elem, n, null, sentinel);
|
||||
}
|
||||
|
||||
/// Deprecated: use `allocAdvanced`
|
||||
pub fn alignedAlloc(
|
||||
self: *Allocator,
|
||||
comptime T: type,
|
||||
/// null means naturally aligned
|
||||
comptime alignment: ?u29,
|
||||
n: usize,
|
||||
) Error![]align(alignment orelse @alignOf(T)) T {
|
||||
return self.allocAdvanced(T, alignment, n, .exact);
|
||||
}
|
||||
|
||||
const Exact = enum { exact, at_least };
|
||||
pub fn allocAdvanced(
|
||||
self: *Allocator,
|
||||
comptime T: type,
|
||||
/// null means naturally aligned
|
||||
comptime alignment: ?u29,
|
||||
n: usize,
|
||||
exact: Exact,
|
||||
) Error![]align(alignment orelse @alignOf(T)) T {
|
||||
const a = if (alignment) |a| blk: {
|
||||
if (a == @alignOf(T)) return allocAdvanced(self, T, null, n, exact);
|
||||
break :blk a;
|
||||
} else @alignOf(T);
|
||||
|
||||
if (n == 0) {
|
||||
return @as([*]align(a) T, undefined)[0..0];
|
||||
}
|
||||
|
||||
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
|
||||
// TODO The `if (alignment == null)` blocks are workarounds for zig not being able to
|
||||
// access certain type information about T without creating a circular dependency in async
|
||||
// functions that heap-allocate their own frame with @Frame(func).
|
||||
const sizeOfT = if (alignment == null) @intCast(u29, @divExact(byte_count, n)) else @sizeOf(T);
|
||||
const byte_slice = try self.callAllocFn(byte_count, a, if (exact == .exact) @as(u29, 0) else sizeOfT);
|
||||
switch (exact) {
|
||||
.exact => assert(byte_slice.len == byte_count),
|
||||
.at_least => assert(byte_slice.len >= byte_count),
|
||||
}
|
||||
@memset(byte_slice.ptr, undefined, byte_slice.len);
|
||||
if (alignment == null) {
|
||||
// This if block is a workaround (see comment above)
|
||||
return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..@divExact(byte_slice.len, @sizeOf(T))];
|
||||
} else {
|
||||
return mem.bytesAsSlice(T, @alignCast(a, byte_slice));
|
||||
}
|
||||
}
|
||||
|
||||
/// This function requests a new byte size for an existing allocation,
|
||||
/// which can be larger, smaller, or the same size as the old memory
|
||||
/// allocation.
|
||||
/// This function is preferred over `shrink`, because it can fail, even
|
||||
/// when shrinking. This gives the allocator a chance to perform a
|
||||
/// cheap shrink operation if possible, or otherwise return OutOfMemory,
|
||||
/// indicating that the caller should keep their capacity, for example
|
||||
/// in `std.ArrayList.shrink`.
|
||||
/// If you need guaranteed success, call `shrink`.
|
||||
/// If `new_n` is 0, this is the same as `free` and it always succeeds.
|
||||
pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
break :t Error![]align(Slice.alignment) Slice.child;
|
||||
} {
|
||||
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
|
||||
return self.reallocAdvanced(old_mem, old_alignment, new_n, .exact);
|
||||
}
|
||||
|
||||
pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
break :t Error![]align(Slice.alignment) Slice.child;
|
||||
} {
|
||||
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
|
||||
return self.reallocAdvanced(old_mem, old_alignment, new_n, .at_least);
|
||||
}
|
||||
|
||||
// Deprecated: use `reallocAdvanced`
|
||||
pub fn alignedRealloc(
|
||||
self: *Allocator,
|
||||
old_mem: anytype,
|
||||
comptime new_alignment: u29,
|
||||
new_n: usize,
|
||||
) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
|
||||
return self.reallocAdvanced(old_mem, new_alignment, new_n, .exact);
|
||||
}
|
||||
|
||||
/// This is the same as `realloc`, except caller may additionally request
|
||||
/// a new alignment, which can be larger, smaller, or the same as the old
|
||||
/// allocation.
|
||||
pub fn reallocAdvanced(
|
||||
self: *Allocator,
|
||||
old_mem: anytype,
|
||||
comptime new_alignment: u29,
|
||||
new_n: usize,
|
||||
exact: Exact,
|
||||
) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
const T = Slice.child;
|
||||
if (old_mem.len == 0) {
|
||||
return self.allocAdvanced(T, new_alignment, new_n, exact);
|
||||
}
|
||||
if (new_n == 0) {
|
||||
self.free(old_mem);
|
||||
return @as([*]align(new_alignment) T, undefined)[0..0];
|
||||
}
|
||||
|
||||
const old_byte_slice = mem.sliceAsBytes(old_mem);
|
||||
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
|
||||
// Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
|
||||
const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, if (exact == .exact) @as(u29, 0) else @sizeOf(T));
|
||||
return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice));
|
||||
}
|
||||
|
||||
/// Prefer calling realloc to shrink if you can tolerate failure, such as
|
||||
/// in an ArrayList data structure with a storage capacity.
|
||||
/// Shrink always succeeds, and `new_n` must be <= `old_mem.len`.
|
||||
/// Returned slice has same alignment as old_mem.
|
||||
/// Shrinking to 0 is the same as calling `free`.
|
||||
pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
break :t []align(Slice.alignment) Slice.child;
|
||||
} {
|
||||
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
|
||||
return self.alignedShrink(old_mem, old_alignment, new_n);
|
||||
}
|
||||
|
||||
/// This is the same as `shrink`, except caller may additionally request
|
||||
/// a new alignment, which must be smaller or the same as the old
|
||||
/// allocation.
|
||||
pub fn alignedShrink(
|
||||
self: *Allocator,
|
||||
old_mem: anytype,
|
||||
comptime new_alignment: u29,
|
||||
new_n: usize,
|
||||
) []align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
const T = Slice.child;
|
||||
|
||||
if (new_n == old_mem.len)
|
||||
return old_mem;
|
||||
assert(new_n < old_mem.len);
|
||||
assert(new_alignment <= Slice.alignment);
|
||||
|
||||
// Here we skip the overflow checking on the multiplication because
|
||||
// new_n <= old_mem.len and the multiplication didn't overflow for that operation.
|
||||
const byte_count = @sizeOf(T) * new_n;
|
||||
|
||||
const old_byte_slice = mem.sliceAsBytes(old_mem);
|
||||
@memset(old_byte_slice.ptr + byte_count, undefined, old_byte_slice.len - byte_count);
|
||||
_ = self.shrinkBytes(old_byte_slice, byte_count, 0);
|
||||
return old_mem[0..new_n];
|
||||
}
|
||||
|
||||
/// Free an array allocated with `alloc`. To free a single item,
|
||||
/// see `destroy`.
|
||||
pub fn free(self: *Allocator, memory: anytype) void {
|
||||
const Slice = @typeInfo(@TypeOf(memory)).Pointer;
|
||||
const bytes = mem.sliceAsBytes(memory);
|
||||
const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0;
|
||||
if (bytes_len == 0) return;
|
||||
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
|
||||
@memset(non_const_ptr, undefined, bytes_len);
|
||||
_ = self.shrinkBytes(non_const_ptr[0..bytes_len], 0, 0);
|
||||
}
|
||||
|
||||
/// Copies `m` to newly allocated memory. Caller owns the memory.
|
||||
pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T {
|
||||
const new_buf = try allocator.alloc(T, m.len);
|
||||
copy(T, new_buf, m);
|
||||
return new_buf;
|
||||
}
|
||||
|
||||
/// Copies `m` to newly allocated memory, with a null-terminated element. Caller owns the memory.
|
||||
pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T {
|
||||
const new_buf = try allocator.alloc(T, m.len + 1);
|
||||
copy(T, new_buf, m);
|
||||
new_buf[m.len] = 0;
|
||||
return new_buf[0..m.len :0];
|
||||
}
|
||||
};
|
||||
pub const Allocator = @import("mem/Allocator.zig");
|
||||
|
||||
/// Detects and asserts if the std.mem.Allocator interface is violated by the caller
|
||||
/// or the allocator.
|
||||
@ -415,7 +37,13 @@ pub fn ValidationAllocator(comptime T: type) type {
|
||||
if (*T == *Allocator) return &self.underlying_allocator;
|
||||
return &self.underlying_allocator.allocator;
|
||||
}
|
||||
pub fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 {
|
||||
pub fn alloc(
|
||||
allocator: *Allocator,
|
||||
n: usize,
|
||||
ptr_align: u29,
|
||||
len_align: u29,
|
||||
ret_addr: usize,
|
||||
) Allocator.Error![]u8 {
|
||||
assert(n > 0);
|
||||
assert(mem.isValidAlign(ptr_align));
|
||||
if (len_align != 0) {
|
||||
@ -424,7 +52,8 @@ pub fn ValidationAllocator(comptime T: type) type {
|
||||
}
|
||||
|
||||
const self = @fieldParentPtr(@This(), "allocator", allocator);
|
||||
const result = try self.getUnderlyingAllocatorPtr().callAllocFn(n, ptr_align, len_align);
|
||||
const underlying = self.getUnderlyingAllocatorPtr();
|
||||
const result = try underlying.allocFn(underlying, n, ptr_align, len_align, ret_addr);
|
||||
assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align));
|
||||
if (len_align == 0) {
|
||||
assert(result.len == n);
|
||||
@ -434,14 +63,22 @@ pub fn ValidationAllocator(comptime T: type) type {
|
||||
}
|
||||
return result;
|
||||
}
|
||||
pub fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize {
|
||||
pub fn resize(
|
||||
allocator: *Allocator,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_len: usize,
|
||||
len_align: u29,
|
||||
ret_addr: usize,
|
||||
) Allocator.Error!usize {
|
||||
assert(buf.len > 0);
|
||||
if (len_align != 0) {
|
||||
assert(mem.isAlignedAnyAlign(new_len, len_align));
|
||||
assert(new_len >= len_align);
|
||||
}
|
||||
const self = @fieldParentPtr(@This(), "allocator", allocator);
|
||||
const result = try self.getUnderlyingAllocatorPtr().callResizeFn(buf, new_len, len_align);
|
||||
const underlying = self.getUnderlyingAllocatorPtr();
|
||||
const result = try underlying.resizeFn(underlying, buf, buf_align, new_len, len_align, ret_addr);
|
||||
if (len_align == 0) {
|
||||
assert(result == new_len);
|
||||
} else {
|
||||
@ -481,7 +118,7 @@ var failAllocator = Allocator{
|
||||
.allocFn = failAllocatorAlloc,
|
||||
.resizeFn = Allocator.noResize,
|
||||
};
|
||||
fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29) Allocator.Error![]u8 {
|
||||
fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
|
||||
486
lib/std/mem/Allocator.zig
Normal file
486
lib/std/mem/Allocator.zig
Normal file
@ -0,0 +1,486 @@
|
||||
//! The standard memory allocation interface.
|
||||
|
||||
const std = @import("../std.zig");
|
||||
const assert = std.debug.assert;
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
const Allocator = @This();
|
||||
|
||||
pub const Error = error{OutOfMemory};
|
||||
|
||||
/// Attempt to allocate at least `len` bytes aligned to `ptr_align`.
|
||||
///
|
||||
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
|
||||
/// otherwise, the length must be aligned to `len_align`.
|
||||
///
|
||||
/// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
|
||||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
|
||||
/// If the value is `0` it means no return address has been provided.
|
||||
allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
|
||||
|
||||
/// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent
|
||||
/// length returned by `allocFn` or `resizeFn`. `buf_align` must equal the same value
|
||||
/// that was passed as the `ptr_align` parameter to the original `allocFn` call.
|
||||
///
|
||||
/// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no
|
||||
/// longer be passed to `resizeFn`.
|
||||
///
|
||||
/// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`.
|
||||
/// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be
|
||||
/// unmodified and error.OutOfMemory MUST be returned.
|
||||
///
|
||||
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
|
||||
/// otherwise, the length must be aligned to `len_align`. Note that `len_align` does *not*
|
||||
/// provide a way to modify the alignment of a pointer. Rather it provides an API for
|
||||
/// accepting more bytes of memory from the allocator than requested.
|
||||
///
|
||||
/// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
|
||||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
|
||||
/// If the value is `0` it means no return address has been provided.
|
||||
resizeFn: fn (self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
|
||||
|
||||
/// Set to resizeFn if in-place resize is not supported.
|
||||
pub fn noResize(
|
||||
self: *Allocator,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_len: usize,
|
||||
len_align: u29,
|
||||
ret_addr: usize,
|
||||
) Error!usize {
|
||||
if (new_len > buf.len)
|
||||
return error.OutOfMemory;
|
||||
return new_len;
|
||||
}
|
||||
|
||||
/// Realloc is used to modify the size or alignment of an existing allocation,
|
||||
/// as well as to provide the allocator with an opportunity to move an allocation
|
||||
/// to a better location.
|
||||
/// When the size/alignment is greater than the previous allocation, this function
|
||||
/// returns `error.OutOfMemory` when the requested new allocation could not be granted.
|
||||
/// When the size/alignment is less than or equal to the previous allocation,
|
||||
/// this function returns `error.OutOfMemory` when the allocator decides the client
|
||||
/// would be better off keeping the extra alignment/size. Clients will call
|
||||
/// `resizeFn` when they require the allocator to track a new alignment/size,
|
||||
/// and so this function should only return success when the allocator considers
|
||||
/// the reallocation desirable from the allocator's perspective.
|
||||
/// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle
|
||||
/// reallocation failure, even when `new_n` <= `old_mem.len`. A `FixedBufferAllocator`
|
||||
/// would always return `error.OutOfMemory` for `reallocFn` when the size/alignment
|
||||
/// is less than or equal to the old allocation, because it cannot reclaim the memory,
|
||||
/// and thus the `std.ArrayList` would be better off retaining its capacity.
|
||||
/// When `reallocFn` returns,
|
||||
/// `return_value[0..min(old_mem.len, new_byte_count)]` must be the same
|
||||
/// as `old_mem` was when `reallocFn` is called. The bytes of
|
||||
/// `return_value[old_mem.len..]` have undefined values.
|
||||
/// The returned slice must have its pointer aligned at least to `new_alignment` bytes.
|
||||
fn reallocBytes(
|
||||
self: *Allocator,
|
||||
/// Guaranteed to be the same as what was returned from most recent call to
|
||||
/// `allocFn` or `resizeFn`.
|
||||
/// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
|
||||
/// is guaranteed to be >= 1.
|
||||
old_mem: []u8,
|
||||
/// If `old_mem.len == 0` then this is `undefined`, otherwise:
|
||||
/// Guaranteed to be the same as what was passed to `allocFn`.
|
||||
/// Guaranteed to be >= 1.
|
||||
/// Guaranteed to be a power of 2.
|
||||
old_alignment: u29,
|
||||
/// If `new_byte_count` is 0 then this is a free and it is guaranteed that
|
||||
/// `old_mem.len != 0`.
|
||||
new_byte_count: usize,
|
||||
/// Guaranteed to be >= 1.
|
||||
/// Guaranteed to be a power of 2.
|
||||
/// Returned slice's pointer must have this alignment.
|
||||
new_alignment: u29,
|
||||
/// 0 indicates the length of the slice returned MUST match `new_byte_count` exactly
|
||||
/// non-zero means the length of the returned slice must be aligned by `len_align`
|
||||
/// `new_len` must be aligned by `len_align`
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) Error![]u8 {
|
||||
if (old_mem.len == 0) {
|
||||
const new_mem = try self.allocFn(self, new_byte_count, new_alignment, len_align, return_address);
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(new_mem.ptr, undefined, new_byte_count);
|
||||
return new_mem;
|
||||
}
|
||||
|
||||
if (mem.isAligned(@ptrToInt(old_mem.ptr), new_alignment)) {
|
||||
if (new_byte_count <= old_mem.len) {
|
||||
const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align, return_address);
|
||||
return old_mem.ptr[0..shrunk_len];
|
||||
}
|
||||
if (self.resizeFn(self, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
|
||||
assert(resized_len >= new_byte_count);
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
|
||||
return old_mem.ptr[0..resized_len];
|
||||
} else |_| {}
|
||||
}
|
||||
if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
return self.moveBytes(old_mem, old_alignment, new_byte_count, new_alignment, len_align, return_address);
|
||||
}
|
||||
|
||||
/// Move the given memory to a new location in the given allocator to accomodate a new
|
||||
/// size and alignment.
|
||||
fn moveBytes(
|
||||
self: *Allocator,
|
||||
old_mem: []u8,
|
||||
old_align: u29,
|
||||
new_len: usize,
|
||||
new_alignment: u29,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) Error![]u8 {
|
||||
assert(old_mem.len > 0);
|
||||
assert(new_len > 0);
|
||||
const new_mem = try self.allocFn(self, new_len, new_alignment, len_align, return_address);
|
||||
@memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len));
|
||||
// TODO DISABLED TO AVOID BUGS IN TRANSLATE C
|
||||
// TODO see also https://github.com/ziglang/zig/issues/4298
|
||||
// use './zig build test-translate-c' to reproduce, some of the symbols in the
|
||||
// generated C code will be a sequence of 0xaa (the undefined value), meaning
|
||||
// it is printing data that has been freed
|
||||
//@memset(old_mem.ptr, undefined, old_mem.len);
|
||||
_ = self.shrinkBytes(old_mem, old_align, 0, 0, return_address);
|
||||
return new_mem;
|
||||
}
|
||||
|
||||
/// Returns a pointer to undefined memory.
|
||||
/// Call `destroy` with the result to free the memory.
|
||||
pub fn create(self: *Allocator, comptime T: type) Error!*T {
|
||||
if (@sizeOf(T) == 0) return &(T{});
|
||||
const slice = try self.allocAdvancedWithRetAddr(T, null, 1, .exact, @returnAddress());
|
||||
return &slice[0];
|
||||
}
|
||||
|
||||
/// `ptr` should be the return value of `create`, or otherwise
|
||||
/// have the same address and alignment property.
|
||||
pub fn destroy(self: *Allocator, ptr: anytype) void {
|
||||
const T = @TypeOf(ptr).Child;
|
||||
if (@sizeOf(T) == 0) return;
|
||||
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
|
||||
const ptr_align = @typeInfo(@TypeOf(ptr)).Pointer.alignment;
|
||||
_ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], ptr_align, 0, 0, @returnAddress());
|
||||
}
|
||||
|
||||
/// Allocates an array of `n` items of type `T` and sets all the
|
||||
/// items to `undefined`. Depending on the Allocator
|
||||
/// implementation, it may be required to call `free` once the
|
||||
/// memory is no longer needed, to avoid a resource leak. If the
|
||||
/// `Allocator` implementation is unknown, then correct code will
|
||||
/// call `free` when done.
|
||||
///
|
||||
/// For allocating a single item, see `create`.
|
||||
pub fn alloc(self: *Allocator, comptime T: type, n: usize) Error![]T {
|
||||
return self.allocAdvancedWithRetAddr(T, null, n, .exact, @returnAddress());
|
||||
}
|
||||
|
||||
pub fn allocWithOptions(
|
||||
self: *Allocator,
|
||||
comptime Elem: type,
|
||||
n: usize,
|
||||
/// null means naturally aligned
|
||||
comptime optional_alignment: ?u29,
|
||||
comptime optional_sentinel: ?Elem,
|
||||
) Error!AllocWithOptionsPayload(Elem, optional_alignment, optional_sentinel) {
|
||||
return self.allocWithOptionsRetAddr(Elem, n, optional_alignment, optional_sentinel, @returnAddress());
|
||||
}
|
||||
|
||||
pub fn allocWithOptionsRetAddr(
|
||||
self: *Allocator,
|
||||
comptime Elem: type,
|
||||
n: usize,
|
||||
/// null means naturally aligned
|
||||
comptime optional_alignment: ?u29,
|
||||
comptime optional_sentinel: ?Elem,
|
||||
return_address: usize,
|
||||
) Error!AllocWithOptionsPayload(Elem, optional_alignment, optional_sentinel) {
|
||||
if (optional_sentinel) |sentinel| {
|
||||
const ptr = try self.allocAdvancedWithRetAddr(Elem, optional_alignment, n + 1, .exact, return_address);
|
||||
ptr[n] = sentinel;
|
||||
return ptr[0..n :sentinel];
|
||||
} else {
|
||||
return self.allocAdvancedWithRetAddr(Elem, optional_alignment, n, .exact, return_address);
|
||||
}
|
||||
}
|
||||
|
||||
fn AllocWithOptionsPayload(comptime Elem: type, comptime alignment: ?u29, comptime sentinel: ?Elem) type {
|
||||
if (sentinel) |s| {
|
||||
return [:s]align(alignment orelse @alignOf(Elem)) Elem;
|
||||
} else {
|
||||
return []align(alignment orelse @alignOf(Elem)) Elem;
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates an array of `n + 1` items of type `T` and sets the first `n`
|
||||
/// items to `undefined` and the last item to `sentinel`. Depending on the
|
||||
/// Allocator implementation, it may be required to call `free` once the
|
||||
/// memory is no longer needed, to avoid a resource leak. If the
|
||||
/// `Allocator` implementation is unknown, then correct code will
|
||||
/// call `free` when done.
|
||||
///
|
||||
/// For allocating a single item, see `create`.
|
||||
///
|
||||
/// Deprecated; use `allocWithOptions`.
|
||||
pub fn allocSentinel(
|
||||
self: *Allocator,
|
||||
comptime Elem: type,
|
||||
n: usize,
|
||||
comptime sentinel: Elem,
|
||||
) Error![:sentinel]Elem {
|
||||
return self.allocWithOptionsRetAddr(Elem, n, null, sentinel, @returnAddress());
|
||||
}
|
||||
|
||||
/// Deprecated: use `allocAdvanced`
|
||||
pub fn alignedAlloc(
|
||||
self: *Allocator,
|
||||
comptime T: type,
|
||||
/// null means naturally aligned
|
||||
comptime alignment: ?u29,
|
||||
n: usize,
|
||||
) Error![]align(alignment orelse @alignOf(T)) T {
|
||||
return self.allocAdvancedWithRetAddr(T, alignment, n, .exact, @returnAddress());
|
||||
}
|
||||
|
||||
pub fn allocAdvanced(
|
||||
self: *Allocator,
|
||||
comptime T: type,
|
||||
/// null means naturally aligned
|
||||
comptime alignment: ?u29,
|
||||
n: usize,
|
||||
exact: Exact,
|
||||
) Error![]align(alignment orelse @alignOf(T)) T {
|
||||
return self.allocAdvancedWithRetAddr(T, alignment, n, exact, @returnAddress());
|
||||
}
|
||||
|
||||
pub const Exact = enum { exact, at_least };
|
||||
|
||||
pub fn allocAdvancedWithRetAddr(
|
||||
self: *Allocator,
|
||||
comptime T: type,
|
||||
/// null means naturally aligned
|
||||
comptime alignment: ?u29,
|
||||
n: usize,
|
||||
exact: Exact,
|
||||
return_address: usize,
|
||||
) Error![]align(alignment orelse @alignOf(T)) T {
|
||||
const a = if (alignment) |a| blk: {
|
||||
if (a == @alignOf(T)) return allocAdvancedWithRetAddr(self, T, null, n, exact, return_address);
|
||||
break :blk a;
|
||||
} else @alignOf(T);
|
||||
|
||||
if (n == 0) {
|
||||
return @as([*]align(a) T, undefined)[0..0];
|
||||
}
|
||||
|
||||
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
|
||||
// TODO The `if (alignment == null)` blocks are workarounds for zig not being able to
|
||||
// access certain type information about T without creating a circular dependency in async
|
||||
// functions that heap-allocate their own frame with @Frame(func).
|
||||
const size_of_T = if (alignment == null) @intCast(u29, @divExact(byte_count, n)) else @sizeOf(T);
|
||||
const len_align: u29 = switch (exact) {
|
||||
.exact => 0,
|
||||
.at_least => size_of_T,
|
||||
};
|
||||
const byte_slice = try self.allocFn(self, byte_count, a, len_align, return_address);
|
||||
switch (exact) {
|
||||
.exact => assert(byte_slice.len == byte_count),
|
||||
.at_least => assert(byte_slice.len >= byte_count),
|
||||
}
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(byte_slice.ptr, undefined, byte_slice.len);
|
||||
if (alignment == null) {
|
||||
// This if block is a workaround (see comment above)
|
||||
return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..@divExact(byte_slice.len, @sizeOf(T))];
|
||||
} else {
|
||||
return mem.bytesAsSlice(T, @alignCast(a, byte_slice));
|
||||
}
|
||||
}
|
||||
|
||||
/// Increases or decreases the size of an allocation. It is guaranteed to not move the pointer.
|
||||
pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old_mem) {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
const T = Slice.child;
|
||||
if (new_n == 0) {
|
||||
self.free(old_mem);
|
||||
return &[0]T{};
|
||||
}
|
||||
const old_byte_slice = mem.sliceAsBytes(old_mem);
|
||||
const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
|
||||
const rc = try self.resizeFn(self, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
|
||||
assert(rc == new_byte_count);
|
||||
const new_byte_slice = old_mem.ptr[0..new_byte_count];
|
||||
return mem.bytesAsSlice(T, new_byte_slice);
|
||||
}
|
||||
|
||||
/// This function requests a new byte size for an existing allocation,
|
||||
/// which can be larger, smaller, or the same size as the old memory
|
||||
/// allocation.
|
||||
/// This function is preferred over `shrink`, because it can fail, even
|
||||
/// when shrinking. This gives the allocator a chance to perform a
|
||||
/// cheap shrink operation if possible, or otherwise return OutOfMemory,
|
||||
/// indicating that the caller should keep their capacity, for example
|
||||
/// in `std.ArrayList.shrink`.
|
||||
/// If you need guaranteed success, call `shrink`.
|
||||
/// If `new_n` is 0, this is the same as `free` and it always succeeds.
|
||||
pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
break :t Error![]align(Slice.alignment) Slice.child;
|
||||
} {
|
||||
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
|
||||
return self.reallocAdvancedWithRetAddr(old_mem, old_alignment, new_n, .exact, @returnAddress());
|
||||
}
|
||||
|
||||
pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
break :t Error![]align(Slice.alignment) Slice.child;
|
||||
} {
|
||||
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
|
||||
return self.reallocAdvancedWithRetAddr(old_mem, old_alignment, new_n, .at_least, @returnAddress());
|
||||
}
|
||||
|
||||
/// This is the same as `realloc`, except caller may additionally request
|
||||
/// a new alignment, which can be larger, smaller, or the same as the old
|
||||
/// allocation.
|
||||
pub fn reallocAdvanced(
|
||||
self: *Allocator,
|
||||
old_mem: anytype,
|
||||
comptime new_alignment: u29,
|
||||
new_n: usize,
|
||||
exact: Exact,
|
||||
) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
|
||||
return self.reallocAdvancedWithRetAddr(old_mem, new_alignment, new_n, exact, @returnAddress());
|
||||
}
|
||||
|
||||
pub fn reallocAdvancedWithRetAddr(
|
||||
self: *Allocator,
|
||||
old_mem: anytype,
|
||||
comptime new_alignment: u29,
|
||||
new_n: usize,
|
||||
exact: Exact,
|
||||
return_address: usize,
|
||||
) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
const T = Slice.child;
|
||||
if (old_mem.len == 0) {
|
||||
return self.allocAdvanced(T, new_alignment, new_n, exact);
|
||||
}
|
||||
if (new_n == 0) {
|
||||
self.free(old_mem);
|
||||
return @as([*]align(new_alignment) T, undefined)[0..0];
|
||||
}
|
||||
|
||||
const old_byte_slice = mem.sliceAsBytes(old_mem);
|
||||
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
|
||||
// Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
|
||||
const len_align: u29 = switch (exact) {
|
||||
.exact => 0,
|
||||
.at_least => @sizeOf(T),
|
||||
};
|
||||
const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, len_align, return_address);
|
||||
return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice));
|
||||
}
|
||||
|
||||
/// Prefer calling realloc to shrink if you can tolerate failure, such as
|
||||
/// in an ArrayList data structure with a storage capacity.
|
||||
/// Shrink always succeeds, and `new_n` must be <= `old_mem.len`.
|
||||
/// Returned slice has same alignment as old_mem.
|
||||
/// Shrinking to 0 is the same as calling `free`.
|
||||
pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
break :t []align(Slice.alignment) Slice.child;
|
||||
} {
|
||||
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
|
||||
return self.alignedShrinkWithRetAddr(old_mem, old_alignment, new_n, @returnAddress());
|
||||
}
|
||||
|
||||
/// This is the same as `shrink`, except caller may additionally request
|
||||
/// a new alignment, which must be smaller or the same as the old
|
||||
/// allocation.
|
||||
pub fn alignedShrink(
|
||||
self: *Allocator,
|
||||
old_mem: anytype,
|
||||
comptime new_alignment: u29,
|
||||
new_n: usize,
|
||||
) []align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
|
||||
return self.alignedShrinkWithRetAddr(old_mem, new_alignment, new_n, @returnAddress());
|
||||
}
|
||||
|
||||
/// This is the same as `alignedShrink`, except caller may additionally pass
|
||||
/// the return address of the first stack frame, which may be relevant for
|
||||
/// allocators which collect stack traces.
|
||||
pub fn alignedShrinkWithRetAddr(
|
||||
self: *Allocator,
|
||||
old_mem: anytype,
|
||||
comptime new_alignment: u29,
|
||||
new_n: usize,
|
||||
return_address: usize,
|
||||
) []align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
const T = Slice.child;
|
||||
|
||||
if (new_n == old_mem.len)
|
||||
return old_mem;
|
||||
assert(new_n < old_mem.len);
|
||||
assert(new_alignment <= Slice.alignment);
|
||||
|
||||
// Here we skip the overflow checking on the multiplication because
|
||||
// new_n <= old_mem.len and the multiplication didn't overflow for that operation.
|
||||
const byte_count = @sizeOf(T) * new_n;
|
||||
|
||||
const old_byte_slice = mem.sliceAsBytes(old_mem);
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(old_byte_slice.ptr + byte_count, undefined, old_byte_slice.len - byte_count);
|
||||
_ = self.shrinkBytes(old_byte_slice, Slice.alignment, byte_count, 0, return_address);
|
||||
return old_mem[0..new_n];
|
||||
}
|
||||
|
||||
/// Free an array allocated with `alloc`. To free a single item,
|
||||
/// see `destroy`.
|
||||
pub fn free(self: *Allocator, memory: anytype) void {
|
||||
const Slice = @typeInfo(@TypeOf(memory)).Pointer;
|
||||
const bytes = mem.sliceAsBytes(memory);
|
||||
const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0;
|
||||
if (bytes_len == 0) return;
|
||||
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(non_const_ptr, undefined, bytes_len);
|
||||
_ = self.shrinkBytes(non_const_ptr[0..bytes_len], Slice.alignment, 0, 0, @returnAddress());
|
||||
}
|
||||
|
||||
/// Copies `m` to newly allocated memory. Caller owns the memory.
|
||||
pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T {
|
||||
const new_buf = try allocator.alloc(T, m.len);
|
||||
mem.copy(T, new_buf, m);
|
||||
return new_buf;
|
||||
}
|
||||
|
||||
/// Copies `m` to newly allocated memory, with a null-terminated element. Caller owns the memory.
|
||||
pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T {
|
||||
const new_buf = try allocator.alloc(T, m.len + 1);
|
||||
mem.copy(T, new_buf, m);
|
||||
new_buf[m.len] = 0;
|
||||
return new_buf[0..m.len :0];
|
||||
}
|
||||
|
||||
/// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning
|
||||
/// error.OutOfMemory should be impossible.
|
||||
/// This function allows a runtime `buf_align` value. Callers should generally prefer
|
||||
/// to call `shrink` directly.
|
||||
pub fn shrinkBytes(
|
||||
self: *Allocator,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_len: usize,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) usize {
|
||||
assert(new_len <= buf.len);
|
||||
return self.resizeFn(self, buf, buf_align, new_len, len_align, return_address) catch unreachable;
|
||||
}
|
||||
@ -15,8 +15,7 @@ const ResetEvent = std.ResetEvent;
|
||||
/// deadlock detection.
|
||||
///
|
||||
/// Example usage:
|
||||
/// var m = Mutex.init();
|
||||
/// defer m.deinit();
|
||||
/// var m = Mutex{};
|
||||
///
|
||||
/// const lock = m.acquire();
|
||||
/// defer lock.release();
|
||||
@ -30,141 +29,13 @@ const ResetEvent = std.ResetEvent;
|
||||
/// // ... lock not acquired
|
||||
/// }
|
||||
pub const Mutex = if (builtin.single_threaded)
|
||||
struct {
|
||||
lock: @TypeOf(lock_init),
|
||||
|
||||
const lock_init = if (std.debug.runtime_safety) false else {};
|
||||
|
||||
pub const Held = struct {
|
||||
mutex: *Mutex,
|
||||
|
||||
pub fn release(self: Held) void {
|
||||
if (std.debug.runtime_safety) {
|
||||
self.mutex.lock = false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/// Create a new mutex in unlocked state.
|
||||
pub fn init() Mutex {
|
||||
return Mutex{ .lock = lock_init };
|
||||
}
|
||||
|
||||
/// Free a mutex created with init. Calling this while the
|
||||
/// mutex is held is illegal behavior.
|
||||
pub fn deinit(self: *Mutex) void {
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
/// Try to acquire the mutex without blocking. Returns null if
|
||||
/// the mutex is unavailable. Otherwise returns Held. Call
|
||||
/// release on Held.
|
||||
pub fn tryAcquire(self: *Mutex) ?Held {
|
||||
if (std.debug.runtime_safety) {
|
||||
if (self.lock) return null;
|
||||
self.lock = true;
|
||||
}
|
||||
return Held{ .mutex = self };
|
||||
}
|
||||
|
||||
/// Acquire the mutex. Will deadlock if the mutex is already
|
||||
/// held by the calling thread.
|
||||
pub fn acquire(self: *Mutex) Held {
|
||||
return self.tryAcquire() orelse @panic("deadlock detected");
|
||||
}
|
||||
}
|
||||
Dummy
|
||||
else if (builtin.os.tag == .windows)
|
||||
// https://locklessinc.com/articles/keyed_events/
|
||||
extern union {
|
||||
locked: u8,
|
||||
waiters: u32,
|
||||
|
||||
const WAKE = 1 << 8;
|
||||
const WAIT = 1 << 9;
|
||||
|
||||
pub fn init() Mutex {
|
||||
return Mutex{ .waiters = 0 };
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Mutex) void {
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn tryAcquire(self: *Mutex) ?Held {
|
||||
if (@atomicRmw(u8, &self.locked, .Xchg, 1, .Acquire) != 0)
|
||||
return null;
|
||||
return Held{ .mutex = self };
|
||||
}
|
||||
|
||||
pub fn acquire(self: *Mutex) Held {
|
||||
return self.tryAcquire() orelse self.acquireSlow();
|
||||
}
|
||||
|
||||
fn acquireSpinning(self: *Mutex) Held {
|
||||
@setCold(true);
|
||||
while (true) : (SpinLock.yield()) {
|
||||
return self.tryAcquire() orelse continue;
|
||||
}
|
||||
}
|
||||
|
||||
fn acquireSlow(self: *Mutex) Held {
|
||||
// try to use NT keyed events for blocking, falling back to spinlock if unavailable
|
||||
@setCold(true);
|
||||
const handle = ResetEvent.OsEvent.Futex.getEventHandle() orelse return self.acquireSpinning();
|
||||
const key = @ptrCast(*const c_void, &self.waiters);
|
||||
|
||||
while (true) : (SpinLock.loopHint(1)) {
|
||||
const waiters = @atomicLoad(u32, &self.waiters, .Monotonic);
|
||||
|
||||
// try and take lock if unlocked
|
||||
if ((waiters & 1) == 0) {
|
||||
if (@atomicRmw(u8, &self.locked, .Xchg, 1, .Acquire) == 0) {
|
||||
return Held{ .mutex = self };
|
||||
}
|
||||
|
||||
// otherwise, try and update the waiting count.
|
||||
// then unset the WAKE bit so that another unlocker can wake up a thread.
|
||||
} else if (@cmpxchgWeak(u32, &self.waiters, waiters, (waiters + WAIT) | 1, .Monotonic, .Monotonic) == null) {
|
||||
const rc = windows.ntdll.NtWaitForKeyedEvent(handle, key, windows.FALSE, null);
|
||||
assert(rc == .SUCCESS);
|
||||
_ = @atomicRmw(u32, &self.waiters, .Sub, WAKE, .Monotonic);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const Held = struct {
|
||||
mutex: *Mutex,
|
||||
|
||||
pub fn release(self: Held) void {
|
||||
// unlock without a rmw/cmpxchg instruction
|
||||
@atomicStore(u8, @ptrCast(*u8, &self.mutex.locked), 0, .Release);
|
||||
const handle = ResetEvent.OsEvent.Futex.getEventHandle() orelse return;
|
||||
const key = @ptrCast(*const c_void, &self.mutex.waiters);
|
||||
|
||||
while (true) : (SpinLock.loopHint(1)) {
|
||||
const waiters = @atomicLoad(u32, &self.mutex.waiters, .Monotonic);
|
||||
|
||||
// no one is waiting
|
||||
if (waiters < WAIT) return;
|
||||
// someone grabbed the lock and will do the wake instead
|
||||
if (waiters & 1 != 0) return;
|
||||
// someone else is currently waking up
|
||||
if (waiters & WAKE != 0) return;
|
||||
|
||||
// try to decrease the waiter count & set the WAKE bit meaning a thread is waking up
|
||||
if (@cmpxchgWeak(u32, &self.mutex.waiters, waiters, waiters - WAIT + WAKE, .Release, .Monotonic) == null) {
|
||||
const rc = windows.ntdll.NtReleaseKeyedEvent(handle, key, windows.FALSE, null);
|
||||
assert(rc == .SUCCESS);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
WindowsMutex
|
||||
else if (builtin.link_libc or builtin.os.tag == .linux)
|
||||
// stack-based version of https://github.com/Amanieu/parking_lot/blob/master/core/src/word_lock.rs
|
||||
struct {
|
||||
state: usize,
|
||||
state: usize = 0,
|
||||
|
||||
/// number of times to spin trying to acquire the lock.
|
||||
/// https://webkit.org/blog/6161/locking-in-webkit/
|
||||
@ -179,14 +50,6 @@ else if (builtin.link_libc or builtin.os.tag == .linux)
|
||||
event: ResetEvent,
|
||||
};
|
||||
|
||||
pub fn init() Mutex {
|
||||
return Mutex{ .state = 0 };
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Mutex) void {
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn tryAcquire(self: *Mutex) ?Held {
|
||||
if (@cmpxchgWeak(usize, &self.state, 0, MUTEX_LOCK, .Acquire, .Monotonic) != null)
|
||||
return null;
|
||||
@ -298,6 +161,128 @@ else if (builtin.link_libc or builtin.os.tag == .linux)
|
||||
else
|
||||
SpinLock;
|
||||
|
||||
/// This has the sematics as `Mutex`, however it does not actually do any
|
||||
/// synchronization. Operations are safety-checked no-ops.
|
||||
pub const Dummy = struct {
|
||||
lock: @TypeOf(lock_init) = lock_init,
|
||||
|
||||
const lock_init = if (std.debug.runtime_safety) false else {};
|
||||
|
||||
pub const Held = struct {
|
||||
mutex: *Dummy,
|
||||
|
||||
pub fn release(self: Held) void {
|
||||
if (std.debug.runtime_safety) {
|
||||
self.mutex.lock = false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/// Create a new mutex in unlocked state.
|
||||
pub const init = Dummy{};
|
||||
|
||||
/// Try to acquire the mutex without blocking. Returns null if
|
||||
/// the mutex is unavailable. Otherwise returns Held. Call
|
||||
/// release on Held.
|
||||
pub fn tryAcquire(self: *Dummy) ?Held {
|
||||
if (std.debug.runtime_safety) {
|
||||
if (self.lock) return null;
|
||||
self.lock = true;
|
||||
}
|
||||
return Held{ .mutex = self };
|
||||
}
|
||||
|
||||
/// Acquire the mutex. Will deadlock if the mutex is already
|
||||
/// held by the calling thread.
|
||||
pub fn acquire(self: *Dummy) Held {
|
||||
return self.tryAcquire() orelse @panic("deadlock detected");
|
||||
}
|
||||
};
|
||||
|
||||
// https://locklessinc.com/articles/keyed_events/
|
||||
const WindowsMutex = struct {
|
||||
state: State = State{ .waiters = 0 },
|
||||
|
||||
const State = extern union {
|
||||
locked: u8,
|
||||
waiters: u32,
|
||||
};
|
||||
|
||||
const WAKE = 1 << 8;
|
||||
const WAIT = 1 << 9;
|
||||
|
||||
pub fn tryAcquire(self: *WindowsMutex) ?Held {
|
||||
if (@atomicRmw(u8, &self.state.locked, .Xchg, 1, .Acquire) != 0)
|
||||
return null;
|
||||
return Held{ .mutex = self };
|
||||
}
|
||||
|
||||
pub fn acquire(self: *WindowsMutex) Held {
|
||||
return self.tryAcquire() orelse self.acquireSlow();
|
||||
}
|
||||
|
||||
fn acquireSpinning(self: *WindowsMutex) Held {
|
||||
@setCold(true);
|
||||
while (true) : (SpinLock.yield()) {
|
||||
return self.tryAcquire() orelse continue;
|
||||
}
|
||||
}
|
||||
|
||||
fn acquireSlow(self: *WindowsMutex) Held {
|
||||
// try to use NT keyed events for blocking, falling back to spinlock if unavailable
|
||||
@setCold(true);
|
||||
const handle = ResetEvent.OsEvent.Futex.getEventHandle() orelse return self.acquireSpinning();
|
||||
const key = @ptrCast(*const c_void, &self.state.waiters);
|
||||
|
||||
while (true) : (SpinLock.loopHint(1)) {
|
||||
const waiters = @atomicLoad(u32, &self.state.waiters, .Monotonic);
|
||||
|
||||
// try and take lock if unlocked
|
||||
if ((waiters & 1) == 0) {
|
||||
if (@atomicRmw(u8, &self.state.locked, .Xchg, 1, .Acquire) == 0) {
|
||||
return Held{ .mutex = self };
|
||||
}
|
||||
|
||||
// otherwise, try and update the waiting count.
|
||||
// then unset the WAKE bit so that another unlocker can wake up a thread.
|
||||
} else if (@cmpxchgWeak(u32, &self.state.waiters, waiters, (waiters + WAIT) | 1, .Monotonic, .Monotonic) == null) {
|
||||
const rc = windows.ntdll.NtWaitForKeyedEvent(handle, key, windows.FALSE, null);
|
||||
assert(rc == .SUCCESS);
|
||||
_ = @atomicRmw(u32, &self.state.waiters, .Sub, WAKE, .Monotonic);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const Held = struct {
|
||||
mutex: *WindowsMutex,
|
||||
|
||||
pub fn release(self: Held) void {
|
||||
// unlock without a rmw/cmpxchg instruction
|
||||
@atomicStore(u8, @ptrCast(*u8, &self.mutex.state.locked), 0, .Release);
|
||||
const handle = ResetEvent.OsEvent.Futex.getEventHandle() orelse return;
|
||||
const key = @ptrCast(*const c_void, &self.mutex.state.waiters);
|
||||
|
||||
while (true) : (SpinLock.loopHint(1)) {
|
||||
const waiters = @atomicLoad(u32, &self.mutex.state.waiters, .Monotonic);
|
||||
|
||||
// no one is waiting
|
||||
if (waiters < WAIT) return;
|
||||
// someone grabbed the lock and will do the wake instead
|
||||
if (waiters & 1 != 0) return;
|
||||
// someone else is currently waking up
|
||||
if (waiters & WAKE != 0) return;
|
||||
|
||||
// try to decrease the waiter count & set the WAKE bit meaning a thread is waking up
|
||||
if (@cmpxchgWeak(u32, &self.mutex.state.waiters, waiters, waiters - WAIT + WAKE, .Release, .Monotonic) == null) {
|
||||
const rc = windows.ntdll.NtReleaseKeyedEvent(handle, key, windows.FALSE, null);
|
||||
assert(rc == .SUCCESS);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
const TestContext = struct {
|
||||
mutex: *Mutex,
|
||||
data: i128,
|
||||
@ -306,8 +291,7 @@ const TestContext = struct {
|
||||
};
|
||||
|
||||
test "std.Mutex" {
|
||||
var mutex = Mutex.init();
|
||||
defer mutex.deinit();
|
||||
var mutex = Mutex{};
|
||||
|
||||
var context = TestContext{
|
||||
.mutex = &mutex,
|
||||
|
||||
@ -10,7 +10,7 @@ pub fn once(comptime f: fn () void) Once(f) {
|
||||
pub fn Once(comptime f: fn () void) type {
|
||||
return struct {
|
||||
done: bool = false,
|
||||
mutex: std.Mutex = std.Mutex.init(),
|
||||
mutex: std.Mutex = std.Mutex{},
|
||||
|
||||
/// Call the function `f`.
|
||||
/// If `call` is invoked multiple times `f` will be executed only the
|
||||
|
||||
@ -19,15 +19,21 @@ pub fn main() anyerror!void {
|
||||
// ignores the alignment of the slice.
|
||||
async_frame_buffer = &[_]u8{};
|
||||
|
||||
var leaks: usize = 0;
|
||||
for (test_fn_list) |test_fn, i| {
|
||||
std.testing.base_allocator_instance.reset();
|
||||
std.testing.allocator_instance = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
defer {
|
||||
if (std.testing.allocator_instance.deinit()) {
|
||||
leaks += 1;
|
||||
}
|
||||
}
|
||||
std.testing.log_level = .warn;
|
||||
|
||||
var test_node = root_node.start(test_fn.name, null);
|
||||
test_node.activate();
|
||||
progress.refresh();
|
||||
if (progress.terminal == null) {
|
||||
std.debug.warn("{}/{} {}...", .{ i + 1, test_fn_list.len, test_fn.name });
|
||||
std.debug.print("{}/{} {}...", .{ i + 1, test_fn_list.len, test_fn.name });
|
||||
}
|
||||
const result = if (test_fn.async_frame_size) |size| switch (io_mode) {
|
||||
.evented => blk: {
|
||||
@ -42,24 +48,20 @@ pub fn main() anyerror!void {
|
||||
skip_count += 1;
|
||||
test_node.end();
|
||||
progress.log("{}...SKIP (async test)\n", .{test_fn.name});
|
||||
if (progress.terminal == null) std.debug.warn("SKIP (async test)\n", .{});
|
||||
if (progress.terminal == null) std.debug.print("SKIP (async test)\n", .{});
|
||||
continue;
|
||||
},
|
||||
} else test_fn.func();
|
||||
if (result) |_| {
|
||||
ok_count += 1;
|
||||
test_node.end();
|
||||
std.testing.allocator_instance.validate() catch |err| switch (err) {
|
||||
error.Leak => std.debug.panic("", .{}),
|
||||
else => std.debug.panic("error.{}", .{@errorName(err)}),
|
||||
};
|
||||
if (progress.terminal == null) std.debug.warn("OK\n", .{});
|
||||
if (progress.terminal == null) std.debug.print("OK\n", .{});
|
||||
} else |err| switch (err) {
|
||||
error.SkipZigTest => {
|
||||
skip_count += 1;
|
||||
test_node.end();
|
||||
progress.log("{}...SKIP\n", .{test_fn.name});
|
||||
if (progress.terminal == null) std.debug.warn("SKIP\n", .{});
|
||||
if (progress.terminal == null) std.debug.print("SKIP\n", .{});
|
||||
},
|
||||
else => {
|
||||
progress.log("", .{});
|
||||
@ -69,9 +71,13 @@ pub fn main() anyerror!void {
|
||||
}
|
||||
root_node.end();
|
||||
if (ok_count == test_fn_list.len) {
|
||||
std.debug.warn("All {} tests passed.\n", .{ok_count});
|
||||
std.debug.print("All {} tests passed.\n", .{ok_count});
|
||||
} else {
|
||||
std.debug.warn("{} passed; {} skipped.\n", .{ ok_count, skip_count });
|
||||
std.debug.print("{} passed; {} skipped.\n", .{ ok_count, skip_count });
|
||||
}
|
||||
if (leaks != 0) {
|
||||
std.debug.print("{} tests leaked memory\n", .{ok_count});
|
||||
std.process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -13,7 +13,8 @@ pub const ComptimeStringMap = @import("comptime_string_map.zig").ComptimeStringM
|
||||
pub const DynLib = @import("dynamic_library.zig").DynLib;
|
||||
pub const HashMap = hash_map.HashMap;
|
||||
pub const HashMapUnmanaged = hash_map.HashMapUnmanaged;
|
||||
pub const Mutex = @import("mutex.zig").Mutex;
|
||||
pub const mutex = @import("mutex.zig");
|
||||
pub const Mutex = mutex.Mutex;
|
||||
pub const PackedIntArray = @import("packed_int_array.zig").PackedIntArray;
|
||||
pub const PackedIntArrayEndian = @import("packed_int_array.zig").PackedIntArrayEndian;
|
||||
pub const PackedIntSlice = @import("packed_int_array.zig").PackedIntSlice;
|
||||
|
||||
@ -1,18 +1,16 @@
|
||||
const std = @import("std.zig");
|
||||
const warn = std.debug.warn;
|
||||
const print = std.debug.print;
|
||||
|
||||
pub const LeakCountAllocator = @import("testing/leak_count_allocator.zig").LeakCountAllocator;
|
||||
pub const FailingAllocator = @import("testing/failing_allocator.zig").FailingAllocator;
|
||||
|
||||
/// This should only be used in temporary test programs.
|
||||
pub const allocator = &allocator_instance.allocator;
|
||||
pub var allocator_instance = LeakCountAllocator.init(&base_allocator_instance.allocator);
|
||||
pub var allocator_instance = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
|
||||
pub const failing_allocator = &failing_allocator_instance.allocator;
|
||||
pub var failing_allocator_instance = FailingAllocator.init(&base_allocator_instance.allocator, 0);
|
||||
|
||||
pub var base_allocator_instance = std.mem.validationWrap(std.heap.ThreadSafeFixedBufferAllocator.init(allocator_mem[0..]));
|
||||
var allocator_mem: [2 * 1024 * 1024]u8 = undefined;
|
||||
pub var base_allocator_instance = std.heap.FixedBufferAllocator.init("");
|
||||
|
||||
/// TODO https://github.com/ziglang/zig/issues/5738
|
||||
pub var log_level = std.log.Level.warn;
|
||||
@ -326,22 +324,22 @@ test "expectEqual vector" {
|
||||
|
||||
pub fn expectEqualStrings(expected: []const u8, actual: []const u8) void {
|
||||
if (std.mem.indexOfDiff(u8, actual, expected)) |diff_index| {
|
||||
warn("\n====== expected this output: =========\n", .{});
|
||||
print("\n====== expected this output: =========\n", .{});
|
||||
printWithVisibleNewlines(expected);
|
||||
warn("\n======== instead found this: =========\n", .{});
|
||||
print("\n======== instead found this: =========\n", .{});
|
||||
printWithVisibleNewlines(actual);
|
||||
warn("\n======================================\n", .{});
|
||||
print("\n======================================\n", .{});
|
||||
|
||||
var diff_line_number: usize = 1;
|
||||
for (expected[0..diff_index]) |value| {
|
||||
if (value == '\n') diff_line_number += 1;
|
||||
}
|
||||
warn("First difference occurs on line {}:\n", .{diff_line_number});
|
||||
print("First difference occurs on line {}:\n", .{diff_line_number});
|
||||
|
||||
warn("expected:\n", .{});
|
||||
print("expected:\n", .{});
|
||||
printIndicatorLine(expected, diff_index);
|
||||
|
||||
warn("found:\n", .{});
|
||||
print("found:\n", .{});
|
||||
printIndicatorLine(actual, diff_index);
|
||||
|
||||
@panic("test failure");
|
||||
@ -362,9 +360,9 @@ fn printIndicatorLine(source: []const u8, indicator_index: usize) void {
|
||||
{
|
||||
var i: usize = line_begin_index;
|
||||
while (i < indicator_index) : (i += 1)
|
||||
warn(" ", .{});
|
||||
print(" ", .{});
|
||||
}
|
||||
warn("^\n", .{});
|
||||
print("^\n", .{});
|
||||
}
|
||||
|
||||
fn printWithVisibleNewlines(source: []const u8) void {
|
||||
@ -372,15 +370,15 @@ fn printWithVisibleNewlines(source: []const u8) void {
|
||||
while (std.mem.indexOf(u8, source[i..], "\n")) |nl| : (i += nl + 1) {
|
||||
printLine(source[i .. i + nl]);
|
||||
}
|
||||
warn("{}␃\n", .{source[i..]}); // End of Text symbol (ETX)
|
||||
print("{}␃\n", .{source[i..]}); // End of Text symbol (ETX)
|
||||
}
|
||||
|
||||
fn printLine(line: []const u8) void {
|
||||
if (line.len != 0) switch (line[line.len - 1]) {
|
||||
' ', '\t' => warn("{}⏎\n", .{line}), // Carriage return symbol,
|
||||
' ', '\t' => print("{}⏎\n", .{line}), // Carriage return symbol,
|
||||
else => {},
|
||||
};
|
||||
warn("{}\n", .{line});
|
||||
print("{}\n", .{line});
|
||||
}
|
||||
|
||||
test "" {
|
||||
|
||||
@ -45,21 +45,34 @@ pub const FailingAllocator = struct {
|
||||
};
|
||||
}
|
||||
|
||||
fn alloc(allocator: *std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
|
||||
fn alloc(
|
||||
allocator: *std.mem.Allocator,
|
||||
len: usize,
|
||||
ptr_align: u29,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
|
||||
if (self.index == self.fail_index) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
const result = try self.internal_allocator.callAllocFn(len, ptr_align, len_align);
|
||||
const result = try self.internal_allocator.allocFn(self.internal_allocator, len, ptr_align, len_align, return_address);
|
||||
self.allocated_bytes += result.len;
|
||||
self.allocations += 1;
|
||||
self.index += 1;
|
||||
return result;
|
||||
}
|
||||
|
||||
fn resize(allocator: *std.mem.Allocator, old_mem: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize {
|
||||
fn resize(
|
||||
allocator: *std.mem.Allocator,
|
||||
old_mem: []u8,
|
||||
old_align: u29,
|
||||
new_len: usize,
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
|
||||
const r = self.internal_allocator.callResizeFn(old_mem, new_len, len_align) catch |e| {
|
||||
const r = self.internal_allocator.resizeFn(self.internal_allocator, old_mem, old_align, new_len, len_align, ra) catch |e| {
|
||||
std.debug.assert(new_len > old_mem.len);
|
||||
return e;
|
||||
};
|
||||
|
||||
@ -1,51 +0,0 @@
|
||||
const std = @import("../std.zig");
|
||||
|
||||
/// This allocator is used in front of another allocator and counts the numbers of allocs and frees.
|
||||
/// The test runner asserts every alloc has a corresponding free at the end of each test.
|
||||
///
|
||||
/// The detection algorithm is incredibly primitive and only accounts for number of calls.
|
||||
/// This should be replaced by the general purpose debug allocator.
|
||||
pub const LeakCountAllocator = struct {
|
||||
count: usize,
|
||||
allocator: std.mem.Allocator,
|
||||
internal_allocator: *std.mem.Allocator,
|
||||
|
||||
pub fn init(allocator: *std.mem.Allocator) LeakCountAllocator {
|
||||
return .{
|
||||
.count = 0,
|
||||
.allocator = .{
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
.internal_allocator = allocator,
|
||||
};
|
||||
}
|
||||
|
||||
fn alloc(allocator: *std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
|
||||
const self = @fieldParentPtr(LeakCountAllocator, "allocator", allocator);
|
||||
const ptr = try self.internal_allocator.callAllocFn(len, ptr_align, len_align);
|
||||
self.count += 1;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
fn resize(allocator: *std.mem.Allocator, old_mem: []u8, new_size: usize, len_align: u29) error{OutOfMemory}!usize {
|
||||
const self = @fieldParentPtr(LeakCountAllocator, "allocator", allocator);
|
||||
if (new_size == 0) {
|
||||
if (self.count == 0) {
|
||||
std.debug.panic("error - too many calls to free, most likely double free", .{});
|
||||
}
|
||||
self.count -= 1;
|
||||
}
|
||||
return self.internal_allocator.callResizeFn(old_mem, new_size, len_align) catch |e| {
|
||||
std.debug.assert(new_size > old_mem.len);
|
||||
return e;
|
||||
};
|
||||
}
|
||||
|
||||
pub fn validate(self: LeakCountAllocator) !void {
|
||||
if (self.count > 0) {
|
||||
std.debug.warn("error - detected leaked allocations without matching free: {}\n", .{self.count});
|
||||
return error.Leak;
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -325,17 +325,17 @@ pub const File = struct {
|
||||
/// local symbols, they cannot be mixed. So we must buffer all the global symbols and
|
||||
/// write them at the end. These are only the local symbols. The length of this array
|
||||
/// is the value used for sh_info in the .symtab section.
|
||||
local_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = std.ArrayListUnmanaged(elf.Elf64_Sym){},
|
||||
global_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = std.ArrayListUnmanaged(elf.Elf64_Sym){},
|
||||
local_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{},
|
||||
global_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{},
|
||||
|
||||
local_symbol_free_list: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
|
||||
global_symbol_free_list: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
|
||||
offset_table_free_list: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
|
||||
local_symbol_free_list: std.ArrayListUnmanaged(u32) = .{},
|
||||
global_symbol_free_list: std.ArrayListUnmanaged(u32) = .{},
|
||||
offset_table_free_list: std.ArrayListUnmanaged(u32) = .{},
|
||||
|
||||
/// Same order as in the file. The value is the absolute vaddr value.
|
||||
/// If the vaddr of the executable program header changes, the entire
|
||||
/// offset table needs to be rewritten.
|
||||
offset_table: std.ArrayListUnmanaged(u64) = std.ArrayListUnmanaged(u64){},
|
||||
offset_table: std.ArrayListUnmanaged(u64) = .{},
|
||||
|
||||
phdr_table_dirty: bool = false,
|
||||
shdr_table_dirty: bool = false,
|
||||
|
||||
@ -60,9 +60,10 @@ pub fn log(
|
||||
std.debug.print(prefix ++ format, args);
|
||||
}
|
||||
|
||||
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
|
||||
pub fn main() !void {
|
||||
// TODO general purpose allocator in the zig std lib
|
||||
const gpa = if (std.builtin.link_libc) std.heap.c_allocator else std.heap.page_allocator;
|
||||
const gpa = if (std.builtin.link_libc) std.heap.c_allocator else &general_purpose_allocator.allocator;
|
||||
var arena_instance = std.heap.ArenaAllocator.init(gpa);
|
||||
defer arena_instance.deinit();
|
||||
const arena = &arena_instance.allocator;
|
||||
|
||||
@ -407,8 +407,6 @@ pub const TestContext = struct {
|
||||
defer root_node.end();
|
||||
|
||||
for (self.cases.items) |case| {
|
||||
std.testing.base_allocator_instance.reset();
|
||||
|
||||
var prg_node = root_node.start(case.name, case.updates.items.len);
|
||||
prg_node.activate();
|
||||
defer prg_node.end();
|
||||
@ -419,7 +417,6 @@ pub const TestContext = struct {
|
||||
progress.refresh_rate_ns = 0;
|
||||
|
||||
try self.runOneCase(std.testing.allocator, &prg_node, case);
|
||||
try std.testing.allocator_instance.validate();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -5886,6 +5886,12 @@ static LLVMValueRef ir_render_breakpoint(CodeGen *g, IrExecutableGen *executable
|
||||
static LLVMValueRef ir_render_return_address(CodeGen *g, IrExecutableGen *executable,
|
||||
IrInstGenReturnAddress *instruction)
|
||||
{
|
||||
if (target_is_wasm(g->zig_target) && g->zig_target->os != OsEmscripten) {
|
||||
// I got this error from LLVM 10:
|
||||
// "Non-Emscripten WebAssembly hasn't implemented __builtin_return_address"
|
||||
return LLVMConstNull(get_llvm_type(g, instruction->base.value->type));
|
||||
}
|
||||
|
||||
LLVMValueRef zero = LLVMConstNull(g->builtin_types.entry_i32->llvm_type);
|
||||
LLVMValueRef ptr_val = LLVMBuildCall(g->builder, get_return_address_fn_val(g), &zero, 1, "");
|
||||
return LLVMBuildPtrToInt(g->builder, ptr_val, g->builtin_types.entry_usize->llvm_type, "");
|
||||
|
||||
40
src/ir.cpp
40
src/ir.cpp
@ -25067,12 +25067,12 @@ static PtrLen size_enum_index_to_ptr_len(BuiltinPtrSize size_enum_index) {
|
||||
zig_unreachable();
|
||||
}
|
||||
|
||||
static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, ZigType *ptr_type_entry) {
|
||||
Error err;
|
||||
static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, IrInst *source_instr, ZigType *ptr_type_entry) {
|
||||
ZigType *attrs_type;
|
||||
BuiltinPtrSize size_enum_index;
|
||||
if (is_slice(ptr_type_entry)) {
|
||||
attrs_type = ptr_type_entry->data.structure.fields[slice_ptr_index]->type_entry;
|
||||
TypeStructField *ptr_field = ptr_type_entry->data.structure.fields[slice_ptr_index];
|
||||
attrs_type = resolve_struct_field_type(ira->codegen, ptr_field);
|
||||
size_enum_index = BuiltinPtrSizeSlice;
|
||||
} else if (ptr_type_entry->id == ZigTypeIdPointer) {
|
||||
attrs_type = ptr_type_entry;
|
||||
@ -25081,9 +25081,6 @@ static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, ZigType *ptr_type_ent
|
||||
zig_unreachable();
|
||||
}
|
||||
|
||||
if ((err = type_resolve(ira->codegen, attrs_type->data.pointer.child_type, ResolveStatusSizeKnown)))
|
||||
return nullptr;
|
||||
|
||||
ZigType *type_info_pointer_type = ir_type_info_get_type(ira, "Pointer", nullptr);
|
||||
assertNoError(type_resolve(ira->codegen, type_info_pointer_type, ResolveStatusSizeKnown));
|
||||
|
||||
@ -25114,9 +25111,18 @@ static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, ZigType *ptr_type_ent
|
||||
fields[2]->data.x_bool = attrs_type->data.pointer.is_volatile;
|
||||
// alignment: u32
|
||||
ensure_field_index(result->type, "alignment", 3);
|
||||
fields[3]->special = ConstValSpecialStatic;
|
||||
fields[3]->type = ira->codegen->builtin_types.entry_num_lit_int;
|
||||
bigint_init_unsigned(&fields[3]->data.x_bigint, get_ptr_align(ira->codegen, attrs_type));
|
||||
if (attrs_type->data.pointer.explicit_alignment != 0) {
|
||||
fields[3]->special = ConstValSpecialStatic;
|
||||
bigint_init_unsigned(&fields[3]->data.x_bigint, attrs_type->data.pointer.explicit_alignment);
|
||||
} else {
|
||||
LazyValueAlignOf *lazy_align_of = heap::c_allocator.create<LazyValueAlignOf>();
|
||||
lazy_align_of->ira = ira; ira_ref(ira);
|
||||
fields[3]->special = ConstValSpecialLazy;
|
||||
fields[3]->data.x_lazy = &lazy_align_of->base;
|
||||
lazy_align_of->base.id = LazyValueIdAlignOf;
|
||||
lazy_align_of->target_type = ir_const_type(ira, source_instr, attrs_type->data.pointer.child_type);
|
||||
}
|
||||
// child: type
|
||||
ensure_field_index(result->type, "child", 4);
|
||||
fields[4]->special = ConstValSpecialStatic;
|
||||
@ -25130,7 +25136,7 @@ static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, ZigType *ptr_type_ent
|
||||
// sentinel: anytype
|
||||
ensure_field_index(result->type, "sentinel", 6);
|
||||
fields[6]->special = ConstValSpecialStatic;
|
||||
if (attrs_type->data.pointer.child_type->id != ZigTypeIdOpaque) {
|
||||
if (attrs_type->data.pointer.sentinel != nullptr) {
|
||||
fields[6]->type = get_optional_type(ira->codegen, attrs_type->data.pointer.child_type);
|
||||
set_optional_payload(fields[6], attrs_type->data.pointer.sentinel);
|
||||
} else {
|
||||
@ -25165,9 +25171,6 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
|
||||
assert(type_entry != nullptr);
|
||||
assert(!type_is_invalid(type_entry));
|
||||
|
||||
if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
|
||||
return err;
|
||||
|
||||
auto entry = ira->codegen->type_info_cache.maybe_get(type_entry);
|
||||
if (entry != nullptr) {
|
||||
*out = entry->value;
|
||||
@ -25231,7 +25234,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
|
||||
}
|
||||
case ZigTypeIdPointer:
|
||||
{
|
||||
result = create_ptr_like_type_info(ira, type_entry);
|
||||
result = create_ptr_like_type_info(ira, source_instr, type_entry);
|
||||
if (result == nullptr)
|
||||
return ErrorSemanticAnalyzeFail;
|
||||
break;
|
||||
@ -25317,6 +25320,9 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
|
||||
}
|
||||
case ZigTypeIdEnum:
|
||||
{
|
||||
if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
|
||||
return err;
|
||||
|
||||
result = ira->codegen->pass1_arena->create<ZigValue>();
|
||||
result->special = ConstValSpecialStatic;
|
||||
result->type = ir_type_info_get_type(ira, "Enum", nullptr);
|
||||
@ -25455,6 +25461,9 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
|
||||
}
|
||||
case ZigTypeIdUnion:
|
||||
{
|
||||
if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
|
||||
return err;
|
||||
|
||||
result = ira->codegen->pass1_arena->create<ZigValue>();
|
||||
result->special = ConstValSpecialStatic;
|
||||
result->type = ir_type_info_get_type(ira, "Union", nullptr);
|
||||
@ -25545,12 +25554,15 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
|
||||
case ZigTypeIdStruct:
|
||||
{
|
||||
if (type_entry->data.structure.special == StructSpecialSlice) {
|
||||
result = create_ptr_like_type_info(ira, type_entry);
|
||||
result = create_ptr_like_type_info(ira, source_instr, type_entry);
|
||||
if (result == nullptr)
|
||||
return ErrorSemanticAnalyzeFail;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
|
||||
return err;
|
||||
|
||||
result = ira->codegen->pass1_arena->create<ZigValue>();
|
||||
result->special = ConstValSpecialStatic;
|
||||
result->type = ir_type_info_get_type(ira, "Struct", nullptr);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user