std: introduce GeneralPurposeAllocator

`std.GeneralPurposeAllocator` is now available. It is a function that
takes a configuration struct (with default field values) and returns an
allocator. There is a detailed description of this allocator in the
doc comments at the top of the new file.

The main feature of this allocator is that it is *safe*. It
prevents double-free, use-after-free, and detects leaks.

Some deprecation compile errors are removed.

The Allocator interface gains `old_align` as a new parameter to
`resizeFn`. This is useful to quickly look up allocations.

`std.heap.page_allocator` is improved to use mmap address hints to avoid
obtaining the same virtual address pages when unmapping and mapping
pages. The new general purpose allocator uses the page allocator as its
backing allocator by default.

`std.testing.allocator` is replaced with usage of this new allocator,
which does leak checking, and so the LeakCheckAllocator is retired.

stage1 is improved so that the `@typeInfo` of a pointer has a lazy value
for the alignment of the child type, to avoid false dependency loops
when dealing with pointers to async function frames.

The `std.mem.Allocator` interface is refactored to be in its own file.

`std.Mutex` now exposes the dummy mutex with `std.Mutex.Dummy`.

This allocator is great for debug mode, however it needs some work to
have better performance in release modes. The next step will be setting
up a series of tests in ziglang/gotta-go-fast and then making
improvements to the implementation.
This commit is contained in:
Andrew Kelley 2020-08-07 22:35:15 -07:00
parent 30bace66d4
commit cc17f84ccc
14 changed files with 1497 additions and 551 deletions

View File

@ -263,6 +263,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
if (better_capacity >= new_capacity) break;
}
// TODO This can be optimized to avoid needlessly copying undefined memory.
const new_memory = try self.allocator.reallocAtLeast(self.allocatedSlice(), better_capacity);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;

View File

@ -19,9 +19,6 @@ const windows = std.os.windows;
pub const leb = @import("debug/leb128.zig");
pub const global_allocator = @compileError("Please switch to std.testing.allocator.");
pub const failing_allocator = @compileError("Please switch to std.testing.failing_allocator.");
pub const runtime_safety = switch (builtin.mode) {
.Debug, .ReleaseSafe => true,
.ReleaseFast, .ReleaseSmall => false,

View File

@ -12,6 +12,7 @@ const maxInt = std.math.maxInt;
pub const LoggingAllocator = @import("heap/logging_allocator.zig").LoggingAllocator;
pub const loggingAllocator = @import("heap/logging_allocator.zig").loggingAllocator;
pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator;
pub const GeneralPurposeAllocator = @import("heap/general_purpose_allocator.zig").GeneralPurposeAllocator;
const Allocator = mem.Allocator;
@ -53,7 +54,7 @@ fn cAlloc(self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Allocato
return ptr[0..mem.alignBackwardAnyAlign(full_len, len_align)];
}
fn cResize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize {
fn cResize(self: *Allocator, buf: []u8, old_align: u29, new_len: usize, len_align: u29) Allocator.Error!usize {
if (new_len == 0) {
c.free(buf.ptr);
return 0;
@ -88,8 +89,6 @@ var wasm_page_allocator_state = Allocator{
.resizeFn = WasmPageAllocator.resize,
};
pub const direct_allocator = @compileError("deprecated; use std.heap.page_allocator");
/// Verifies that the adjusted length will still map to the full length
pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
const aligned_len = mem.alignAllocLen(full_len, len, len_align);
@ -97,10 +96,13 @@ pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
return aligned_len;
}
/// TODO Utilize this on Windows.
pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null;
const PageAllocator = struct {
fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 {
assert(n > 0);
const alignedLen = mem.alignForward(n, mem.page_size);
const aligned_len = mem.alignForward(n, mem.page_size);
if (builtin.os.tag == .windows) {
const w = os.windows;
@ -112,14 +114,14 @@ const PageAllocator = struct {
// see https://devblogs.microsoft.com/oldnewthing/?p=42223
const addr = w.VirtualAlloc(
null,
alignedLen,
aligned_len,
w.MEM_COMMIT | w.MEM_RESERVE,
w.PAGE_READWRITE,
) catch return error.OutOfMemory;
// If the allocation is sufficiently aligned, use it.
if (@ptrToInt(addr) & (alignment - 1) == 0) {
return @ptrCast([*]u8, addr)[0..alignPageAllocLen(alignedLen, n, len_align)];
return @ptrCast([*]u8, addr)[0..alignPageAllocLen(aligned_len, n, len_align)];
}
// If it wasn't, actually do an explicitely aligned allocation.
@ -146,20 +148,24 @@ const PageAllocator = struct {
// until it succeeds.
const ptr = w.VirtualAlloc(
@intToPtr(*c_void, aligned_addr),
alignedLen,
aligned_len,
w.MEM_COMMIT | w.MEM_RESERVE,
w.PAGE_READWRITE,
) catch continue;
return @ptrCast([*]u8, ptr)[0..alignPageAllocLen(alignedLen, n, len_align)];
return @ptrCast([*]u8, ptr)[0..alignPageAllocLen(aligned_len, n, len_align)];
}
}
const maxDropLen = alignment - std.math.min(alignment, mem.page_size);
const allocLen = if (maxDropLen <= alignedLen - n) alignedLen else mem.alignForward(alignedLen + maxDropLen, mem.page_size);
const max_drop_len = alignment - std.math.min(alignment, mem.page_size);
const alloc_len = if (max_drop_len <= aligned_len - n)
aligned_len
else
mem.alignForward(aligned_len + max_drop_len, mem.page_size);
const hint = @atomicLoad(@TypeOf(next_mmap_addr_hint), &next_mmap_addr_hint, .Unordered);
const slice = os.mmap(
null,
allocLen,
hint,
alloc_len,
os.PROT_READ | os.PROT_WRITE,
os.MAP_PRIVATE | os.MAP_ANONYMOUS,
-1,
@ -168,25 +174,29 @@ const PageAllocator = struct {
assert(mem.isAligned(@ptrToInt(slice.ptr), mem.page_size));
const aligned_addr = mem.alignForward(@ptrToInt(slice.ptr), alignment);
const result_ptr = @alignCast(mem.page_size, @intToPtr([*]u8, aligned_addr));
// Unmap the extra bytes that were only requested in order to guarantee
// that the range of memory we were provided had a proper alignment in
// it somewhere. The extra bytes could be at the beginning, or end, or both.
const dropLen = aligned_addr - @ptrToInt(slice.ptr);
if (dropLen != 0) {
os.munmap(slice[0..dropLen]);
const drop_len = aligned_addr - @ptrToInt(slice.ptr);
if (drop_len != 0) {
os.munmap(slice[0..drop_len]);
}
// Unmap extra pages
const alignedBufferLen = allocLen - dropLen;
if (alignedBufferLen > alignedLen) {
os.munmap(@alignCast(mem.page_size, @intToPtr([*]u8, aligned_addr))[alignedLen..alignedBufferLen]);
const aligned_buffer_len = alloc_len - drop_len;
if (aligned_buffer_len > aligned_len) {
os.munmap(result_ptr[aligned_len..aligned_buffer_len]);
}
return @intToPtr([*]u8, aligned_addr)[0..alignPageAllocLen(alignedLen, n, len_align)];
const new_hint = @alignCast(mem.page_size, result_ptr + aligned_len);
_ = @cmpxchgStrong(@TypeOf(next_mmap_addr_hint), &next_mmap_addr_hint, hint, new_hint, .Monotonic, .Monotonic);
return result_ptr[0..alignPageAllocLen(aligned_len, n, len_align)];
}
fn resize(allocator: *Allocator, buf_unaligned: []u8, new_size: usize, len_align: u29) Allocator.Error!usize {
fn resize(allocator: *Allocator, buf_unaligned: []u8, buf_align: u29, new_size: usize, len_align: u29) Allocator.Error!usize {
const new_size_aligned = mem.alignForward(new_size, mem.page_size);
if (builtin.os.tag == .windows) {
@ -229,6 +239,7 @@ const PageAllocator = struct {
if (new_size_aligned < buf_aligned_len) {
const ptr = @intToPtr([*]align(mem.page_size) u8, @ptrToInt(buf_unaligned.ptr) + new_size_aligned);
// TODO: if the next_mmap_addr_hint is within the unmapped range, update it
os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]);
if (new_size_aligned == 0)
return 0;
@ -236,6 +247,7 @@ const PageAllocator = struct {
}
// TODO: call mremap
// TODO: if the next_mmap_addr_hint is within the remapped range, update it
return error.OutOfMemory;
}
};
@ -538,7 +550,7 @@ pub const FixedBufferAllocator = struct {
return result;
}
fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) Allocator.Error!usize {
fn resize(allocator: *Allocator, buf: []u8, buf_align: u29, new_size: usize, len_align: u29) Allocator.Error!usize {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
assert(self.ownsSlice(buf)); // sanity check

View File

@ -49,7 +49,7 @@ pub const ArenaAllocator = struct {
const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
const big_enough_len = prev_len + actual_min_size;
const len = big_enough_len + big_enough_len / 2;
const buf = try self.child_allocator.callAllocFn(len, @alignOf(BufNode), 1);
const buf = try self.child_allocator.allocFn(self.child_allocator, len, @alignOf(BufNode), 1);
const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr));
buf_node.* = BufNode{
.data = buf,

View File

@ -0,0 +1,920 @@
//! # General Purpose Allocator
//!
//! ## Design Priorities
//!
//! ### `OptimizationMode.debug` and `OptimizationMode.release_safe`:
//!
//! * Detect double free, and print stack trace of:
//! - Where it was first allocated
//! - Where it was freed the first time
//! - Where it was freed the second time
//!
//! * Detect leaks and print stack trace of:
//! - Where it was allocated
//!
//! * When a page of memory is no longer needed, give it back to resident memory
//! as soon as possible, so that it causes page faults when used.
//!
//! * Do not re-use memory slots, so that memory safety is upheld. For small
//! allocations, this is handled here; for larger ones it is handled in the
//! backing allocator (by default `std.heap.page_allocator`).
//!
//! * Make pointer math errors unlikely to harm memory from
//! unrelated allocations.
//!
//! * It's OK for these mechanisms to cost some extra overhead bytes.
//!
//! * It's OK for performance cost for these mechanisms.
//!
//! * Rogue memory writes should not harm the allocator's state.
//!
//! * Cross platform. Operates based on a backing allocator which makes it work
//! everywhere, even freestanding.
//!
//! * Compile-time configuration.
//!
//! ### `OptimizationMode.release_fast` (note: not much work has gone into this use case yet):
//!
//! * Low fragmentation is primary concern
//! * Performance of worst-case latency is secondary concern
//! * Performance of average-case latency is next
//! * Finally, having freed memory unmapped, and pointer math errors unlikely to
//! harm memory from unrelated allocations are nice-to-haves.
//!
//! ### `OptimizationMode.release_small` (note: not much work has gone into this use case yet):
//!
//! * Small binary code size of the executable is the primary concern.
//! * Next, defer to the `.release_fast` priority list.
//!
//! ## Basic Design:
//!
//! Small allocations are divided into buckets:
//!
//! ```
//! index obj_size
//! 0 1
//! 1 2
//! 2 4
//! 3 8
//! 4 16
//! 5 32
//! 6 64
//! 7 128
//! 8 256
//! 9 512
//! 10 1024
//! 11 2048
//! ```
//!
//! The main allocator state has an array of all the "current" buckets for each
//! size class. Each slot in the array can be null, meaning the bucket for that
//! size class is not allocated. When the first object is allocated for a given
//! size class, it allocates 1 page of memory from the OS. This page is
//! divided into "slots" - one per allocated object. Along with the page of memory
//! for object slots, as many pages as necessary are allocated to store the
//! BucketHeader, followed by "used bits", and two stack traces for each slot
//! (allocation trace and free trace).
//!
//! The "used bits" are 1 bit per slot representing whether the slot is used.
//! Allocations use the data to iterate to find a free slot. Frees assert that the
//! corresponding bit is 1 and set it to 0.
//!
//! Buckets have prev and next pointers. When there is only one bucket for a given
//! size class, both prev and next point to itself. When all slots of a bucket are
//! used, a new bucket is allocated, and enters the doubly linked list. The main
//! allocator state tracks the "current" bucket for each size class. Leak detection
//! currently only checks the current bucket.
//!
//! Resizing detects if the size class is unchanged or smaller, in which case the same
//! pointer is returned unmodified. If a larger size class is required,
//! `error.OutOfMemory` is returned.
//!
//! Large objects are allocated directly using the backing allocator and their metadata is stored
//! in a `std.HashMap` using the backing allocator.
const std = @import("std");
const math = std.math;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const page_size = std.mem.page_size;
const StackTrace = std.builtin.StackTrace;
/// Integer type for pointing to slots in a small allocation
const SlotIndex = std.meta.Int(false, math.log2(page_size) + 1);
pub const Config = struct {
/// Number of stack frames to capture.
stack_trace_frames: usize = if (std.debug.runtime_safety) @as(usize, 6) else @as(usize, 0),
/// If true, the allocator will have two fields:
/// * `total_requested_bytes` which tracks the total allocated bytes of memory requested.
/// * `requested_memory_limit` which causes allocations to return `error.OutOfMemory`
/// when the `total_requested_bytes` exceeds this limit.
/// If false, these fields will be `void`.
enable_memory_limit: bool = false,
/// Whether to enable safety checks.
safety: bool = std.debug.runtime_safety,
/// Whether the allocator may be used simultaneously from multiple threads.
thread_safe: bool = !std.builtin.single_threaded,
};
pub fn GeneralPurposeAllocator(comptime config: Config) type {
return struct {
allocator: Allocator = Allocator{
.allocFn = alloc,
.resizeFn = resize,
},
backing_allocator: *Allocator = std.heap.page_allocator,
buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count,
large_allocations: LargeAllocTable = .{},
total_requested_bytes: @TypeOf(total_requested_bytes_init) = total_requested_bytes_init,
requested_memory_limit: @TypeOf(requested_memory_limit_init) = requested_memory_limit_init,
mutex: @TypeOf(mutex_init) = mutex_init,
const Self = @This();
const total_requested_bytes_init = if (config.enable_memory_limit) @as(usize, 0) else {};
const requested_memory_limit_init = if (config.enable_memory_limit) @as(usize, math.maxInt(usize)) else {};
const mutex_init = if (config.thread_safe) std.Mutex.init() else std.Mutex.Dummy.init();
const stack_n = config.stack_trace_frames;
const one_trace_size = @sizeOf(usize) * stack_n;
const traces_per_slot = 2;
pub const Error = std.mem.Allocator.Error;
const small_bucket_count = math.log2(page_size);
const largest_bucket_object_size = 1 << (small_bucket_count - 1);
const LargeAlloc = struct {
bytes: []u8,
stack_addresses: [stack_n]usize,
fn dumpStackTrace(self: *LargeAlloc) void {
var len: usize = 0;
while (len < stack_n and self.stack_addresses[len] != 0) {
len += 1;
}
const stack_trace = StackTrace{
.instruction_addresses = &self.stack_addresses,
.index = len,
};
std.debug.dumpStackTrace(stack_trace);
}
};
const LargeAllocTable = std.HashMapUnmanaged(usize, LargeAlloc, hash_addr, eql_addr, false);
// Bucket: In memory, in order:
// * BucketHeader
// * bucket_used_bits: [N]u8, // 1 bit for every slot; 1 byte for every 8 slots
// * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation
const BucketHeader = struct {
prev: *BucketHeader,
next: *BucketHeader,
page: [*]align(page_size) u8,
alloc_cursor: SlotIndex,
used_count: SlotIndex,
fn usedBits(bucket: *BucketHeader, index: usize) *u8 {
return @intToPtr(*u8, @ptrToInt(bucket) + @sizeOf(BucketHeader) + index);
}
fn stackTracePtr(
bucket: *BucketHeader,
size_class: usize,
slot_index: SlotIndex,
trace_kind: TraceKind,
) *[stack_n]usize {
const start_ptr = @ptrCast([*]u8, bucket) + bucketStackFramesStart(size_class);
const addr = start_ptr + one_trace_size * traces_per_slot * slot_index +
@enumToInt(trace_kind) * @as(usize, one_trace_size);
return @ptrCast(*[stack_n]usize, @alignCast(@alignOf(usize), addr));
}
fn captureStackTrace(
bucket: *BucketHeader,
return_address: usize,
size_class: usize,
slot_index: SlotIndex,
trace_kind: TraceKind,
) void {
// Initialize them to 0. When determining the count we must look
// for non zero addresses.
const stack_addresses = bucket.stackTracePtr(size_class, slot_index, trace_kind);
collectStackTrace(return_address, stack_addresses);
}
};
fn bucketStackTrace(
bucket: *BucketHeader,
size_class: usize,
slot_index: SlotIndex,
trace_kind: TraceKind,
) StackTrace {
const stack_addresses = bucket.stackTracePtr(size_class, slot_index, trace_kind);
var len: usize = 0;
while (len < stack_n and stack_addresses[len] != 0) {
len += 1;
}
return StackTrace{
.instruction_addresses = stack_addresses,
.index = len,
};
}
fn bucketStackFramesStart(size_class: usize) usize {
return std.mem.alignForward(
@sizeOf(BucketHeader) + usedBitsCount(size_class),
@alignOf(usize),
);
}
fn bucketSize(size_class: usize) usize {
const slot_count = @divExact(page_size, size_class);
return bucketStackFramesStart(size_class) + one_trace_size * traces_per_slot * slot_count;
}
fn usedBitsCount(size_class: usize) usize {
const slot_count = @divExact(page_size, size_class);
if (slot_count < 8) return 1;
return @divExact(slot_count, 8);
}
fn detectLeaksInBucket(
bucket: *BucketHeader,
size_class: usize,
used_bits_count: usize,
) void {
var used_bits_byte: usize = 0;
while (used_bits_byte < used_bits_count) : (used_bits_byte += 1) {
const used_byte = bucket.usedBits(used_bits_byte).*;
if (used_byte != 0) {
var bit_index: u3 = 0;
while (true) : (bit_index += 1) {
const is_used = @truncate(u1, used_byte >> bit_index) != 0;
if (is_used) {
std.debug.print("\nMemory leak detected:\n", .{});
const slot_index = @intCast(SlotIndex, used_bits_byte * 8 + bit_index);
const stack_trace = bucketStackTrace(
bucket,
size_class,
slot_index,
.alloc,
);
std.debug.dumpStackTrace(stack_trace);
}
if (bit_index == math.maxInt(u3))
break;
}
}
}
}
pub fn deinit(self: *Self) void {
for (self.buckets) |optional_bucket, bucket_i| {
const first_bucket = optional_bucket orelse continue;
const size_class = @as(usize, 1) << @intCast(u6, bucket_i);
const used_bits_count = usedBitsCount(size_class);
var bucket = first_bucket;
while (true) {
detectLeaksInBucket(bucket, size_class, used_bits_count);
bucket = bucket.next;
if (bucket == first_bucket)
break;
}
}
for (self.large_allocations.items()) |*large_alloc| {
std.debug.print("\nMemory leak detected:\n", .{});
large_alloc.value.dumpStackTrace();
}
self.large_allocations.deinit(self.backing_allocator);
self.* = undefined;
}
fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void {
std.mem.set(usize, addresses, 0);
var stack_trace = StackTrace{
.instruction_addresses = addresses,
.index = 0,
};
std.debug.captureStackTrace(first_trace_addr, &stack_trace);
}
fn allocSlot(self: *Self, size_class: usize, trace_addr: usize) Error![*]u8 {
const bucket_index = math.log2(size_class);
const first_bucket = self.buckets[bucket_index] orelse try self.createBucket(
size_class,
bucket_index,
);
var bucket = first_bucket;
const slot_count = @divExact(page_size, size_class);
while (bucket.alloc_cursor == slot_count) {
const prev_bucket = bucket;
bucket = prev_bucket.next;
if (bucket == first_bucket) {
// make a new one
bucket = try self.createBucket(size_class, bucket_index);
bucket.prev = prev_bucket;
bucket.next = prev_bucket.next;
prev_bucket.next = bucket;
bucket.next.prev = bucket;
}
}
// change the allocator's current bucket to be this one
self.buckets[bucket_index] = bucket;
const slot_index = bucket.alloc_cursor;
bucket.alloc_cursor += 1;
var used_bits_byte = bucket.usedBits(slot_index / 8);
const used_bit_index: u3 = @intCast(u3, slot_index % 8); // TODO cast should be unnecessary
used_bits_byte.* |= (@as(u8, 1) << used_bit_index);
bucket.used_count += 1;
bucket.captureStackTrace(trace_addr, size_class, slot_index, .alloc);
return bucket.page + slot_index * size_class;
}
fn searchBucket(
self: *Self,
bucket_index: usize,
addr: usize,
) ?*BucketHeader {
const first_bucket = self.buckets[bucket_index] orelse return null;
var bucket = first_bucket;
while (true) {
const in_bucket_range = (addr >= @ptrToInt(bucket.page) and
addr < @ptrToInt(bucket.page) + page_size);
if (in_bucket_range) return bucket;
bucket = bucket.prev;
if (bucket == first_bucket) {
return null;
}
self.buckets[bucket_index] = bucket;
}
}
fn freeSlot(
self: *Self,
bucket: *BucketHeader,
bucket_index: usize,
size_class: usize,
slot_index: SlotIndex,
used_byte: *u8,
used_bit_index: u3,
trace_addr: usize,
) void {
// Capture stack trace to be the "first free", in case a double free happens.
bucket.captureStackTrace(@returnAddress(), size_class, slot_index, .free);
used_byte.* &= ~(@as(u8, 1) << used_bit_index);
bucket.used_count -= 1;
if (bucket.used_count == 0) {
if (bucket.next == bucket) {
// it's the only bucket and therefore the current one
self.buckets[bucket_index] = null;
} else {
bucket.next.prev = bucket.prev;
bucket.prev.next = bucket.next;
self.buckets[bucket_index] = bucket.prev;
}
self.backing_allocator.free(bucket.page[0..page_size]);
const bucket_size = bucketSize(size_class);
const bucket_slice = @ptrCast([*]align(@alignOf(BucketHeader)) u8, bucket)[0..bucket_size];
self.backing_allocator.free(bucket_slice);
} else {
// TODO Set the slot data to undefined.
// Related: https://github.com/ziglang/zig/issues/4298
}
}
/// This function assumes the object is in the large object storage regardless
/// of the parameters.
fn resizeLarge(
self: *Self,
old_mem: []u8,
old_align: u29,
new_size: usize,
len_align: u29,
return_addr: usize,
) Error!usize {
const entry = self.large_allocations.getEntry(@ptrToInt(old_mem.ptr)) orelse {
if (config.safety) {
@panic("Invalid free");
} else {
unreachable;
}
};
if (config.safety and old_mem.len != entry.value.bytes.len) {
std.debug.print("\nAllocation size {} bytes does not match free size {}. Allocated here:\n", .{
entry.value.bytes.len,
old_mem.len,
});
entry.value.dumpStackTrace();
@panic("\nFree here:");
}
const result_len = try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align);
if (result_len == 0) {
self.large_allocations.removeAssertDiscard(@ptrToInt(old_mem.ptr));
return 0;
}
entry.value.bytes = old_mem.ptr[0..result_len];
collectStackTrace(return_addr, &entry.value.stack_addresses);
return result_len;
}
pub fn setRequestedMemoryLimit(self: *Self, limit: usize) void {
self.requested_memory_limit = limit;
}
fn resize(
allocator: *Allocator,
old_mem: []u8,
old_align: u29,
new_size: usize,
len_align: u29,
) Error!usize {
const self = @fieldParentPtr(Self, "allocator", allocator);
const held = self.mutex.acquire();
defer held.release();
const prev_req_bytes = self.total_requested_bytes;
if (config.enable_memory_limit) {
const new_req_bytes = prev_req_bytes + new_size - old_mem.len;
if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
return error.OutOfMemory;
}
self.total_requested_bytes = new_req_bytes;
}
errdefer if (config.enable_memory_limit) {
self.total_requested_bytes = prev_req_bytes;
};
assert(old_mem.len != 0);
const aligned_size = math.max(old_mem.len, old_align);
if (aligned_size > largest_bucket_object_size) {
return self.resizeLarge(old_mem, old_align, new_size, len_align, @returnAddress());
}
const size_class_hint = up_to_nearest_power_of_2(usize, aligned_size);
var bucket_index = math.log2(size_class_hint);
var size_class: usize = size_class_hint;
const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) {
if (self.searchBucket(bucket_index, @ptrToInt(old_mem.ptr))) |bucket| {
break bucket;
}
size_class *= 2;
} else {
return self.resizeLarge(old_mem, old_align, new_size, len_align, @returnAddress());
};
const byte_offset = @ptrToInt(old_mem.ptr) - @ptrToInt(bucket.page);
const slot_index = @intCast(SlotIndex, byte_offset / size_class);
const used_byte_index = slot_index / 8;
const used_bit_index = @intCast(u3, slot_index % 8);
const used_byte = bucket.usedBits(used_byte_index);
const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0;
if (!is_used) {
if (config.safety) {
// print allocation stack trace
std.debug.print("\nDouble free detected, allocated here:\n", .{});
const alloc_stack_trace = bucketStackTrace(bucket, size_class, slot_index, .alloc);
std.debug.dumpStackTrace(alloc_stack_trace);
std.debug.print("\nFirst free here:\n", .{});
const free_stack_trace = bucketStackTrace(bucket, size_class, slot_index, .free);
std.debug.dumpStackTrace(free_stack_trace);
@panic("\nSecond free here:");
} else {
unreachable;
}
}
if (new_size == 0) {
self.freeSlot(bucket, bucket_index, size_class, slot_index, used_byte, used_bit_index, @returnAddress());
return @as(usize, 0);
}
const new_aligned_size = math.max(new_size, old_align);
const new_size_class = up_to_nearest_power_of_2(usize, new_aligned_size);
if (new_size_class <= size_class) {
return new_size;
}
return error.OutOfMemory;
}
fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) Error![]u8 {
const self = @fieldParentPtr(Self, "allocator", allocator);
const held = self.mutex.acquire();
defer held.release();
const prev_req_bytes = self.total_requested_bytes;
if (config.enable_memory_limit) {
const new_req_bytes = prev_req_bytes + len;
if (new_req_bytes > self.requested_memory_limit) {
return error.OutOfMemory;
}
self.total_requested_bytes = new_req_bytes;
}
errdefer if (config.enable_memory_limit) {
self.total_requested_bytes = prev_req_bytes;
};
const new_aligned_size = math.max(len, ptr_align);
if (new_aligned_size > largest_bucket_object_size) {
try self.large_allocations.ensureCapacity(
self.backing_allocator,
self.large_allocations.entries.items.len + 1,
);
const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align);
const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
gop.entry.value.bytes = slice;
collectStackTrace(@returnAddress(), &gop.entry.value.stack_addresses);
return slice;
} else {
const new_size_class = up_to_nearest_power_of_2(usize, new_aligned_size);
const ptr = try self.allocSlot(new_size_class, @returnAddress());
return ptr[0..len];
}
}
fn createBucket(self: *Self, size_class: usize, bucket_index: usize) Error!*BucketHeader {
const page = try self.backing_allocator.allocAdvanced(u8, page_size, page_size, .exact);
errdefer self.backing_allocator.free(page);
const bucket_size = bucketSize(size_class);
const bucket_bytes = try self.backing_allocator.allocAdvanced(u8, @alignOf(BucketHeader), bucket_size, .exact);
const ptr = @ptrCast(*BucketHeader, bucket_bytes.ptr);
ptr.* = BucketHeader{
.prev = ptr,
.next = ptr,
.page = page.ptr,
.alloc_cursor = 0,
.used_count = 0,
};
self.buckets[bucket_index] = ptr;
// Set the used bits to all zeroes
@memset(@as(*[1]u8, ptr.usedBits(0)), 0, usedBitsCount(size_class));
return ptr;
}
};
}
const TraceKind = enum {
alloc,
free,
};
fn up_to_nearest_power_of_2(comptime T: type, n: T) T {
var power: T = 1;
while (power < n)
power *= 2;
return power;
}
fn hash_addr(addr: usize) u32 {
if (@sizeOf(usize) == @sizeOf(u32))
return addr;
comptime assert(@sizeOf(usize) == 8);
return @intCast(u32, addr >> 32) ^ @truncate(u32, addr);
}
fn eql_addr(a: usize, b: usize) bool {
return a == b;
}
const test_config = Config{};
test "small allocations - free in same order" {
var gpda = GeneralPurposeAllocator(test_config){};
defer gpda.deinit();
const allocator = &gpda.allocator;
var list = std.ArrayList(*u64).init(std.testing.allocator);
defer list.deinit();
var i: usize = 0;
while (i < 513) : (i += 1) {
const ptr = try allocator.create(u64);
try list.append(ptr);
}
for (list.items) |ptr| {
allocator.destroy(ptr);
}
}
test "small allocations - free in reverse order" {
var gpda = GeneralPurposeAllocator(test_config){};
defer gpda.deinit();
const allocator = &gpda.allocator;
var list = std.ArrayList(*u64).init(std.testing.allocator);
defer list.deinit();
var i: usize = 0;
while (i < 513) : (i += 1) {
const ptr = try allocator.create(u64);
try list.append(ptr);
}
while (list.popOrNull()) |ptr| {
allocator.destroy(ptr);
}
}
test "large allocations" {
var gpda = GeneralPurposeAllocator(test_config){};
defer gpda.deinit();
const allocator = &gpda.allocator;
const ptr1 = try allocator.alloc(u64, 42768);
const ptr2 = try allocator.alloc(u64, 52768);
allocator.free(ptr1);
const ptr3 = try allocator.alloc(u64, 62768);
allocator.free(ptr3);
allocator.free(ptr2);
}
test "realloc" {
var gpda = GeneralPurposeAllocator(test_config){};
defer gpda.deinit();
const allocator = &gpda.allocator;
var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1);
defer allocator.free(slice);
slice[0] = 0x12;
// This reallocation should keep its pointer address.
const old_slice = slice;
slice = try allocator.realloc(slice, 2);
assert(old_slice.ptr == slice.ptr);
assert(slice[0] == 0x12);
slice[1] = 0x34;
// This requires upgrading to a larger size class
slice = try allocator.realloc(slice, 17);
assert(slice[0] == 0x12);
assert(slice[1] == 0x34);
}
test "shrink" {
var gpda = GeneralPurposeAllocator(test_config){};
defer gpda.deinit();
const allocator = &gpda.allocator;
var slice = try allocator.alloc(u8, 20);
defer allocator.free(slice);
std.mem.set(u8, slice, 0x11);
slice = allocator.shrink(slice, 17);
for (slice) |b| {
assert(b == 0x11);
}
slice = allocator.shrink(slice, 16);
for (slice) |b| {
assert(b == 0x11);
}
}
test "large object - grow" {
var gpda = GeneralPurposeAllocator(test_config){};
defer gpda.deinit();
const allocator = &gpda.allocator;
var slice1 = try allocator.alloc(u8, page_size * 2 - 20);
defer allocator.free(slice1);
var old = slice1;
slice1 = try allocator.realloc(slice1, page_size * 2 - 10);
assert(slice1.ptr == old.ptr);
slice1 = try allocator.realloc(slice1, page_size * 2);
assert(slice1.ptr == old.ptr);
slice1 = try allocator.realloc(slice1, page_size * 2 + 1);
}
test "realloc small object to large object" {
var gpda = GeneralPurposeAllocator(test_config){};
defer gpda.deinit();
const allocator = &gpda.allocator;
var slice = try allocator.alloc(u8, 70);
defer allocator.free(slice);
slice[0] = 0x12;
slice[60] = 0x34;
// This requires upgrading to a large object
const large_object_size = page_size * 2 + 50;
slice = try allocator.realloc(slice, large_object_size);
assert(slice[0] == 0x12);
assert(slice[60] == 0x34);
}
test "shrink large object to large object" {
var gpda = GeneralPurposeAllocator(test_config){};
defer gpda.deinit();
const allocator = &gpda.allocator;
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
slice[0] = 0x12;
slice[60] = 0x34;
slice = try allocator.resize(slice, page_size * 2 + 1);
assert(slice[0] == 0x12);
assert(slice[60] == 0x34);
slice = allocator.shrink(slice, page_size * 2 + 1);
assert(slice[0] == 0x12);
assert(slice[60] == 0x34);
slice = try allocator.realloc(slice, page_size * 2);
assert(slice[0] == 0x12);
assert(slice[60] == 0x34);
}
test "shrink large object to large object with larger alignment" {
var gpda = GeneralPurposeAllocator(test_config){};
defer gpda.deinit();
const allocator = &gpda.allocator;
var debug_buffer: [1000]u8 = undefined;
const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
const alloc_size = page_size * 2 + 50;
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
defer allocator.free(slice);
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
while (isAligned(@ptrToInt(slice.ptr), page_size * 2)) {
try stuff_to_free.append(slice);
slice = try allocator.alignedAlloc(u8, 16, alloc_size);
}
while (stuff_to_free.popOrNull()) |item| {
allocator.free(item);
}
slice[0] = 0x12;
slice[60] = 0x34;
slice = try allocator.alignedRealloc(slice, page_size * 2, alloc_size / 2);
assert(slice[0] == 0x12);
assert(slice[60] == 0x34);
}
test "realloc large object to small object" {
var gpda = GeneralPurposeAllocator(test_config){};
defer gpda.deinit();
const allocator = &gpda.allocator;
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
slice[0] = 0x12;
slice[16] = 0x34;
slice = try allocator.realloc(slice, 19);
assert(slice[0] == 0x12);
assert(slice[16] == 0x34);
}
test "non-page-allocator backing allocator" {
var gpda = GeneralPurposeAllocator(.{}){ .backing_allocator = std.testing.allocator };
defer gpda.deinit();
const allocator = &gpda.allocator;
const ptr = try allocator.create(i32);
defer allocator.destroy(ptr);
}
test "realloc large object to larger alignment" {
var gpda = GeneralPurposeAllocator(test_config){};
defer gpda.deinit();
const allocator = &gpda.allocator;
var debug_buffer: [1000]u8 = undefined;
const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
defer allocator.free(slice);
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
while (isAligned(@ptrToInt(slice.ptr), page_size * 2)) {
try stuff_to_free.append(slice);
slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
}
while (stuff_to_free.popOrNull()) |item| {
allocator.free(item);
}
slice[0] = 0x12;
slice[16] = 0x34;
slice = try allocator.alignedRealloc(slice, 32, page_size * 2 + 100);
assert(slice[0] == 0x12);
assert(slice[16] == 0x34);
slice = try allocator.alignedRealloc(slice, 32, page_size * 2 + 25);
assert(slice[0] == 0x12);
assert(slice[16] == 0x34);
slice = try allocator.alignedRealloc(slice, page_size * 2, page_size * 2 + 100);
assert(slice[0] == 0x12);
assert(slice[16] == 0x34);
}
fn isAligned(addr: usize, alignment: usize) bool {
// 000010000 // example addr
// 000001111 // subtract 1
// 111110000 // binary not
const aligned_addr = (addr & ~(alignment - 1));
return aligned_addr == addr;
}
test "isAligned works" {
assert(isAligned(0, 4));
assert(isAligned(1, 1));
assert(isAligned(2, 1));
assert(isAligned(2, 2));
assert(!isAligned(2, 4));
assert(isAligned(3, 1));
assert(!isAligned(3, 2));
assert(!isAligned(3, 4));
assert(isAligned(4, 4));
assert(isAligned(4, 2));
assert(isAligned(4, 1));
assert(!isAligned(4, 8));
assert(!isAligned(4, 16));
}
test "large object shrinks to small but allocation fails during shrink" {
var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, 3);
var gpda = GeneralPurposeAllocator(.{}){ .backing_allocator = &failing_allocator.allocator };
defer gpda.deinit();
const allocator = &gpda.allocator;
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
slice[0] = 0x12;
slice[3] = 0x34;
// Next allocation will fail in the backing allocator of the GeneralPurposeAllocator
slice = allocator.shrink(slice, 4);
assert(slice[0] == 0x12);
assert(slice[3] == 0x34);
}
test "objects of size 1024 and 2048" {
var gpda = GeneralPurposeAllocator(test_config){};
defer gpda.deinit();
const allocator = &gpda.allocator;
const slice = try allocator.alloc(u8, 1025);
const slice2 = try allocator.alloc(u8, 3000);
allocator.free(slice);
allocator.free(slice2);
}
test "setting a memory cap" {
var gpda = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
defer gpda.deinit();
const allocator = &gpda.allocator;
gpda.setRequestedMemoryLimit(1010);
const small = try allocator.create(i32);
assert(gpda.total_requested_bytes == 4);
const big = try allocator.alloc(u8, 1000);
assert(gpda.total_requested_bytes == 1004);
std.testing.expectError(error.OutOfMemory, allocator.create(u64));
allocator.destroy(small);
assert(gpda.total_requested_bytes == 1000);
allocator.free(big);
assert(gpda.total_requested_bytes == 0);
const exact = try allocator.alloc(u8, 1010);
assert(gpda.total_requested_bytes == 1010);
allocator.free(exact);
}

View File

@ -26,7 +26,7 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type {
fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
const self = @fieldParentPtr(Self, "allocator", allocator);
self.out_stream.print("alloc : {}", .{len}) catch {};
const result = self.parent_allocator.callAllocFn(len, ptr_align, len_align);
const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align);
if (result) |buff| {
self.out_stream.print(" success!\n", .{}) catch {};
} else |err| {
@ -35,7 +35,13 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type {
return result;
}
fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize {
fn resize(
allocator: *Allocator,
buf: []u8,
buf_align: u29,
new_len: usize,
len_align: u29,
) error{OutOfMemory}!usize {
const self = @fieldParentPtr(Self, "allocator", allocator);
if (new_len == 0) {
self.out_stream.print("free : {}\n", .{buf.len}) catch {};
@ -44,7 +50,7 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type {
} else {
self.out_stream.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
}
if (self.parent_allocator.callResizeFn(buf, new_len, len_align)) |resized_len| {
if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align)) |resized_len| {
if (new_len > buf.len) {
self.out_stream.print(" success!\n", .{}) catch {};
}
@ -74,9 +80,9 @@ test "LoggingAllocator" {
const allocator = &loggingAllocator(&fixedBufferAllocator.allocator, fbs.outStream()).allocator;
var a = try allocator.alloc(u8, 10);
a.len = allocator.shrinkBytes(a, 5, 0);
a.len = allocator.shrinkBytes(a, 1, 5, 0);
std.debug.assert(a.len == 5);
std.testing.expectError(error.OutOfMemory, allocator.callResizeFn(a, 20, 0));
std.testing.expectError(error.OutOfMemory, allocator.resizeFn(allocator, a, 1, 20, 0));
allocator.free(a);
std.testing.expectEqualSlices(u8,

View File

@ -8,391 +8,13 @@ const meta = std.meta;
const trait = meta.trait;
const testing = std.testing;
// https://github.com/ziglang/zig/issues/2564
/// https://github.com/ziglang/zig/issues/2564
pub const page_size = switch (builtin.arch) {
.wasm32, .wasm64 => 64 * 1024,
else => 4 * 1024,
};
pub const Allocator = struct {
pub const Error = error{OutOfMemory};
/// Attempt to allocate at least `len` bytes aligned to `ptr_align`.
///
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
/// otherwise, the length must be aligned to `len_align`.
///
/// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Error![]u8,
/// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent
/// length returned by `allocFn` or `resizeFn`.
///
/// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no
/// longer be passed to `resizeFn`.
///
/// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`.
/// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be
/// unmodified and error.OutOfMemory MUST be returned.
///
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
/// otherwise, the length must be aligned to `len_align`.
///
/// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
resizeFn: fn (self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize,
pub fn callAllocFn(self: *Allocator, new_len: usize, alignment: u29, len_align: u29) Error![]u8 {
return self.allocFn(self, new_len, alignment, len_align);
}
pub fn callResizeFn(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize {
return self.resizeFn(self, buf, new_len, len_align);
}
/// Set to resizeFn if in-place resize is not supported.
pub fn noResize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize {
if (new_len > buf.len)
return error.OutOfMemory;
return new_len;
}
/// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning
/// error.OutOfMemory should be impossible.
pub fn shrinkBytes(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) usize {
assert(new_len <= buf.len);
return self.callResizeFn(buf, new_len, len_align) catch unreachable;
}
/// Realloc is used to modify the size or alignment of an existing allocation,
/// as well as to provide the allocator with an opportunity to move an allocation
/// to a better location.
/// When the size/alignment is greater than the previous allocation, this function
/// returns `error.OutOfMemory` when the requested new allocation could not be granted.
/// When the size/alignment is less than or equal to the previous allocation,
/// this function returns `error.OutOfMemory` when the allocator decides the client
/// would be better off keeping the extra alignment/size. Clients will call
/// `callResizeFn` when they require the allocator to track a new alignment/size,
/// and so this function should only return success when the allocator considers
/// the reallocation desirable from the allocator's perspective.
/// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle
/// reallocation failure, even when `new_n` <= `old_mem.len`. A `FixedBufferAllocator`
/// would always return `error.OutOfMemory` for `reallocFn` when the size/alignment
/// is less than or equal to the old allocation, because it cannot reclaim the memory,
/// and thus the `std.ArrayList` would be better off retaining its capacity.
/// When `reallocFn` returns,
/// `return_value[0..min(old_mem.len, new_byte_count)]` must be the same
/// as `old_mem` was when `reallocFn` is called. The bytes of
/// `return_value[old_mem.len..]` have undefined values.
/// The returned slice must have its pointer aligned at least to `new_alignment` bytes.
fn reallocBytes(
self: *Allocator,
/// Guaranteed to be the same as what was returned from most recent call to
/// `allocFn` or `resizeFn`.
/// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
/// is guaranteed to be >= 1.
old_mem: []u8,
/// If `old_mem.len == 0` then this is `undefined`, otherwise:
/// Guaranteed to be the same as what was passed to `allocFn`.
/// Guaranteed to be >= 1.
/// Guaranteed to be a power of 2.
old_alignment: u29,
/// If `new_byte_count` is 0 then this is a free and it is guaranteed that
/// `old_mem.len != 0`.
new_byte_count: usize,
/// Guaranteed to be >= 1.
/// Guaranteed to be a power of 2.
/// Returned slice's pointer must have this alignment.
new_alignment: u29,
/// 0 indicates the length of the slice returned MUST match `new_byte_count` exactly
/// non-zero means the length of the returned slice must be aligned by `len_align`
/// `new_len` must be aligned by `len_align`
len_align: u29,
) Error![]u8 {
if (old_mem.len == 0) {
const new_mem = try self.callAllocFn(new_byte_count, new_alignment, len_align);
@memset(new_mem.ptr, undefined, new_byte_count);
return new_mem;
}
if (isAligned(@ptrToInt(old_mem.ptr), new_alignment)) {
if (new_byte_count <= old_mem.len) {
const shrunk_len = self.shrinkBytes(old_mem, new_byte_count, len_align);
return old_mem.ptr[0..shrunk_len];
}
if (self.callResizeFn(old_mem, new_byte_count, len_align)) |resized_len| {
assert(resized_len >= new_byte_count);
@memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
return old_mem.ptr[0..resized_len];
} else |_| {}
}
if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) {
return error.OutOfMemory;
}
return self.moveBytes(old_mem, new_byte_count, new_alignment, len_align);
}
/// Move the given memory to a new location in the given allocator to accomodate a new
/// size and alignment.
fn moveBytes(self: *Allocator, old_mem: []u8, new_len: usize, new_alignment: u29, len_align: u29) Error![]u8 {
assert(old_mem.len > 0);
assert(new_len > 0);
const new_mem = try self.callAllocFn(new_len, new_alignment, len_align);
@memcpy(new_mem.ptr, old_mem.ptr, std.math.min(new_len, old_mem.len));
// DISABLED TO AVOID BUGS IN TRANSLATE C
// use './zig build test-translate-c' to reproduce, some of the symbols in the
// generated C code will be a sequence of 0xaa (the undefined value), meaning
// it is printing data that has been freed
//@memset(old_mem.ptr, undefined, old_mem.len);
_ = self.shrinkBytes(old_mem, 0, 0);
return new_mem;
}
/// Returns a pointer to undefined memory.
/// Call `destroy` with the result to free the memory.
pub fn create(self: *Allocator, comptime T: type) Error!*T {
if (@sizeOf(T) == 0) return &(T{});
const slice = try self.alloc(T, 1);
return &slice[0];
}
/// `ptr` should be the return value of `create`, or otherwise
/// have the same address and alignment property.
pub fn destroy(self: *Allocator, ptr: anytype) void {
const T = @TypeOf(ptr).Child;
if (@sizeOf(T) == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
_ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], 0, 0);
}
/// Allocates an array of `n` items of type `T` and sets all the
/// items to `undefined`. Depending on the Allocator
/// implementation, it may be required to call `free` once the
/// memory is no longer needed, to avoid a resource leak. If the
/// `Allocator` implementation is unknown, then correct code will
/// call `free` when done.
///
/// For allocating a single item, see `create`.
pub fn alloc(self: *Allocator, comptime T: type, n: usize) Error![]T {
return self.alignedAlloc(T, null, n);
}
pub fn allocWithOptions(
self: *Allocator,
comptime Elem: type,
n: usize,
/// null means naturally aligned
comptime optional_alignment: ?u29,
comptime optional_sentinel: ?Elem,
) Error!AllocWithOptionsPayload(Elem, optional_alignment, optional_sentinel) {
if (optional_sentinel) |sentinel| {
const ptr = try self.alignedAlloc(Elem, optional_alignment, n + 1);
ptr[n] = sentinel;
return ptr[0..n :sentinel];
} else {
return self.alignedAlloc(Elem, optional_alignment, n);
}
}
fn AllocWithOptionsPayload(comptime Elem: type, comptime alignment: ?u29, comptime sentinel: ?Elem) type {
if (sentinel) |s| {
return [:s]align(alignment orelse @alignOf(Elem)) Elem;
} else {
return []align(alignment orelse @alignOf(Elem)) Elem;
}
}
/// Allocates an array of `n + 1` items of type `T` and sets the first `n`
/// items to `undefined` and the last item to `sentinel`. Depending on the
/// Allocator implementation, it may be required to call `free` once the
/// memory is no longer needed, to avoid a resource leak. If the
/// `Allocator` implementation is unknown, then correct code will
/// call `free` when done.
///
/// For allocating a single item, see `create`.
///
/// Deprecated; use `allocWithOptions`.
pub fn allocSentinel(self: *Allocator, comptime Elem: type, n: usize, comptime sentinel: Elem) Error![:sentinel]Elem {
return self.allocWithOptions(Elem, n, null, sentinel);
}
/// Deprecated: use `allocAdvanced`
pub fn alignedAlloc(
self: *Allocator,
comptime T: type,
/// null means naturally aligned
comptime alignment: ?u29,
n: usize,
) Error![]align(alignment orelse @alignOf(T)) T {
return self.allocAdvanced(T, alignment, n, .exact);
}
const Exact = enum { exact, at_least };
pub fn allocAdvanced(
self: *Allocator,
comptime T: type,
/// null means naturally aligned
comptime alignment: ?u29,
n: usize,
exact: Exact,
) Error![]align(alignment orelse @alignOf(T)) T {
const a = if (alignment) |a| blk: {
if (a == @alignOf(T)) return allocAdvanced(self, T, null, n, exact);
break :blk a;
} else @alignOf(T);
if (n == 0) {
return @as([*]align(a) T, undefined)[0..0];
}
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
// TODO The `if (alignment == null)` blocks are workarounds for zig not being able to
// access certain type information about T without creating a circular dependency in async
// functions that heap-allocate their own frame with @Frame(func).
const sizeOfT = if (alignment == null) @intCast(u29, @divExact(byte_count, n)) else @sizeOf(T);
const byte_slice = try self.callAllocFn(byte_count, a, if (exact == .exact) @as(u29, 0) else sizeOfT);
switch (exact) {
.exact => assert(byte_slice.len == byte_count),
.at_least => assert(byte_slice.len >= byte_count),
}
@memset(byte_slice.ptr, undefined, byte_slice.len);
if (alignment == null) {
// This if block is a workaround (see comment above)
return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..@divExact(byte_slice.len, @sizeOf(T))];
} else {
return mem.bytesAsSlice(T, @alignCast(a, byte_slice));
}
}
/// This function requests a new byte size for an existing allocation,
/// which can be larger, smaller, or the same size as the old memory
/// allocation.
/// This function is preferred over `shrink`, because it can fail, even
/// when shrinking. This gives the allocator a chance to perform a
/// cheap shrink operation if possible, or otherwise return OutOfMemory,
/// indicating that the caller should keep their capacity, for example
/// in `std.ArrayList.shrink`.
/// If you need guaranteed success, call `shrink`.
/// If `new_n` is 0, this is the same as `free` and it always succeeds.
pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t Error![]align(Slice.alignment) Slice.child;
} {
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
return self.reallocAdvanced(old_mem, old_alignment, new_n, .exact);
}
pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t Error![]align(Slice.alignment) Slice.child;
} {
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
return self.reallocAdvanced(old_mem, old_alignment, new_n, .at_least);
}
// Deprecated: use `reallocAdvanced`
pub fn alignedRealloc(
self: *Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
return self.reallocAdvanced(old_mem, new_alignment, new_n, .exact);
}
/// This is the same as `realloc`, except caller may additionally request
/// a new alignment, which can be larger, smaller, or the same as the old
/// allocation.
pub fn reallocAdvanced(
self: *Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
exact: Exact,
) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
if (old_mem.len == 0) {
return self.allocAdvanced(T, new_alignment, new_n, exact);
}
if (new_n == 0) {
self.free(old_mem);
return @as([*]align(new_alignment) T, undefined)[0..0];
}
const old_byte_slice = mem.sliceAsBytes(old_mem);
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
// Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, if (exact == .exact) @as(u29, 0) else @sizeOf(T));
return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice));
}
/// Prefer calling realloc to shrink if you can tolerate failure, such as
/// in an ArrayList data structure with a storage capacity.
/// Shrink always succeeds, and `new_n` must be <= `old_mem.len`.
/// Returned slice has same alignment as old_mem.
/// Shrinking to 0 is the same as calling `free`.
pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t []align(Slice.alignment) Slice.child;
} {
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
return self.alignedShrink(old_mem, old_alignment, new_n);
}
/// This is the same as `shrink`, except caller may additionally request
/// a new alignment, which must be smaller or the same as the old
/// allocation.
pub fn alignedShrink(
self: *Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
) []align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
if (new_n == old_mem.len)
return old_mem;
assert(new_n < old_mem.len);
assert(new_alignment <= Slice.alignment);
// Here we skip the overflow checking on the multiplication because
// new_n <= old_mem.len and the multiplication didn't overflow for that operation.
const byte_count = @sizeOf(T) * new_n;
const old_byte_slice = mem.sliceAsBytes(old_mem);
@memset(old_byte_slice.ptr + byte_count, undefined, old_byte_slice.len - byte_count);
_ = self.shrinkBytes(old_byte_slice, byte_count, 0);
return old_mem[0..new_n];
}
/// Free an array allocated with `alloc`. To free a single item,
/// see `destroy`.
pub fn free(self: *Allocator, memory: anytype) void {
const Slice = @typeInfo(@TypeOf(memory)).Pointer;
const bytes = mem.sliceAsBytes(memory);
const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0;
if (bytes_len == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
@memset(non_const_ptr, undefined, bytes_len);
_ = self.shrinkBytes(non_const_ptr[0..bytes_len], 0, 0);
}
/// Copies `m` to newly allocated memory. Caller owns the memory.
pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T {
const new_buf = try allocator.alloc(T, m.len);
copy(T, new_buf, m);
return new_buf;
}
/// Copies `m` to newly allocated memory, with a null-terminated element. Caller owns the memory.
pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T {
const new_buf = try allocator.alloc(T, m.len + 1);
copy(T, new_buf, m);
new_buf[m.len] = 0;
return new_buf[0..m.len :0];
}
};
pub const Allocator = @import("mem/Allocator.zig");
/// Detects and asserts if the std.mem.Allocator interface is violated by the caller
/// or the allocator.
@ -424,7 +46,8 @@ pub fn ValidationAllocator(comptime T: type) type {
}
const self = @fieldParentPtr(@This(), "allocator", allocator);
const result = try self.getUnderlyingAllocatorPtr().callAllocFn(n, ptr_align, len_align);
const underlying = self.getUnderlyingAllocatorPtr();
const result = try underlying.allocFn(underlying, n, ptr_align, len_align);
assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align));
if (len_align == 0) {
assert(result.len == n);
@ -434,14 +57,21 @@ pub fn ValidationAllocator(comptime T: type) type {
}
return result;
}
pub fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize {
pub fn resize(
allocator: *Allocator,
buf: []u8,
buf_align: u29,
new_len: usize,
len_align: u29,
) Allocator.Error!usize {
assert(buf.len > 0);
if (len_align != 0) {
assert(mem.isAlignedAnyAlign(new_len, len_align));
assert(new_len >= len_align);
}
const self = @fieldParentPtr(@This(), "allocator", allocator);
const result = try self.getUnderlyingAllocatorPtr().callResizeFn(buf, new_len, len_align);
const underlying = self.getUnderlyingAllocatorPtr();
const result = try underlying.resizeFn(underlying, buf, buf_align, new_len, len_align);
if (len_align == 0) {
assert(result == new_len);
} else {

410
lib/std/mem/Allocator.zig Normal file
View File

@ -0,0 +1,410 @@
//! The standard memory allocation interface.
const std = @import("../std.zig");
const assert = std.debug.assert;
const math = std.math;
const mem = std.mem;
const Allocator = @This();
pub const Error = error{OutOfMemory};
/// Attempt to allocate at least `len` bytes aligned to `ptr_align`.
///
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
/// otherwise, the length must be aligned to `len_align`.
///
/// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Error![]u8,
/// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent
/// length returned by `allocFn` or `resizeFn`. `buf_align` must equal the same value
/// that was passed as the `ptr_align` parameter to the original `allocFn` call.
///
/// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no
/// longer be passed to `resizeFn`.
///
/// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`.
/// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be
/// unmodified and error.OutOfMemory MUST be returned.
///
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
/// otherwise, the length must be aligned to `len_align`. Note that `len_align` does *not*
/// provide a way to modify the alignment of a pointer. Rather it provides an API for
/// accepting more bytes of memory from the allocator than requested.
///
/// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
resizeFn: fn (self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29) Error!usize,
/// Set to resizeFn if in-place resize is not supported.
pub fn noResize(self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29) Error!usize {
if (new_len > buf.len)
return error.OutOfMemory;
return new_len;
}
/// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning
/// error.OutOfMemory should be impossible.
pub fn shrinkBytes(self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29) usize {
assert(new_len <= buf.len);
return self.resizeFn(self, buf, buf_align, new_len, len_align) catch unreachable;
}
/// Realloc is used to modify the size or alignment of an existing allocation,
/// as well as to provide the allocator with an opportunity to move an allocation
/// to a better location.
/// When the size/alignment is greater than the previous allocation, this function
/// returns `error.OutOfMemory` when the requested new allocation could not be granted.
/// When the size/alignment is less than or equal to the previous allocation,
/// this function returns `error.OutOfMemory` when the allocator decides the client
/// would be better off keeping the extra alignment/size. Clients will call
/// `resizeFn` when they require the allocator to track a new alignment/size,
/// and so this function should only return success when the allocator considers
/// the reallocation desirable from the allocator's perspective.
/// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle
/// reallocation failure, even when `new_n` <= `old_mem.len`. A `FixedBufferAllocator`
/// would always return `error.OutOfMemory` for `reallocFn` when the size/alignment
/// is less than or equal to the old allocation, because it cannot reclaim the memory,
/// and thus the `std.ArrayList` would be better off retaining its capacity.
/// When `reallocFn` returns,
/// `return_value[0..min(old_mem.len, new_byte_count)]` must be the same
/// as `old_mem` was when `reallocFn` is called. The bytes of
/// `return_value[old_mem.len..]` have undefined values.
/// The returned slice must have its pointer aligned at least to `new_alignment` bytes.
fn reallocBytes(
self: *Allocator,
/// Guaranteed to be the same as what was returned from most recent call to
/// `allocFn` or `resizeFn`.
/// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
/// is guaranteed to be >= 1.
old_mem: []u8,
/// If `old_mem.len == 0` then this is `undefined`, otherwise:
/// Guaranteed to be the same as what was passed to `allocFn`.
/// Guaranteed to be >= 1.
/// Guaranteed to be a power of 2.
old_alignment: u29,
/// If `new_byte_count` is 0 then this is a free and it is guaranteed that
/// `old_mem.len != 0`.
new_byte_count: usize,
/// Guaranteed to be >= 1.
/// Guaranteed to be a power of 2.
/// Returned slice's pointer must have this alignment.
new_alignment: u29,
/// 0 indicates the length of the slice returned MUST match `new_byte_count` exactly
/// non-zero means the length of the returned slice must be aligned by `len_align`
/// `new_len` must be aligned by `len_align`
len_align: u29,
) Error![]u8 {
if (old_mem.len == 0) {
const new_mem = try self.allocFn(self, new_byte_count, new_alignment, len_align);
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(new_mem.ptr, undefined, new_byte_count);
return new_mem;
}
if (mem.isAligned(@ptrToInt(old_mem.ptr), new_alignment)) {
if (new_byte_count <= old_mem.len) {
const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align);
return old_mem.ptr[0..shrunk_len];
}
if (self.resizeFn(self, old_mem, old_alignment, new_byte_count, len_align)) |resized_len| {
assert(resized_len >= new_byte_count);
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
return old_mem.ptr[0..resized_len];
} else |_| {}
}
if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) {
return error.OutOfMemory;
}
return self.moveBytes(old_mem, old_alignment, new_byte_count, new_alignment, len_align);
}
/// Move the given memory to a new location in the given allocator to accomodate a new
/// size and alignment.
fn moveBytes(
self: *Allocator,
old_mem: []u8,
old_align: u29,
new_len: usize,
new_alignment: u29,
len_align: u29,
) Error![]u8 {
assert(old_mem.len > 0);
assert(new_len > 0);
const new_mem = try self.allocFn(self, new_len, new_alignment, len_align);
@memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len));
// TODO DISABLED TO AVOID BUGS IN TRANSLATE C
// TODO see also https://github.com/ziglang/zig/issues/4298
// use './zig build test-translate-c' to reproduce, some of the symbols in the
// generated C code will be a sequence of 0xaa (the undefined value), meaning
// it is printing data that has been freed
//@memset(old_mem.ptr, undefined, old_mem.len);
_ = self.shrinkBytes(old_mem, old_align, 0, 0);
return new_mem;
}
/// Returns a pointer to undefined memory.
/// Call `destroy` with the result to free the memory.
pub fn create(self: *Allocator, comptime T: type) Error!*T {
if (@sizeOf(T) == 0) return &(T{});
const slice = try self.alloc(T, 1);
return &slice[0];
}
/// `ptr` should be the return value of `create`, or otherwise
/// have the same address and alignment property.
pub fn destroy(self: *Allocator, ptr: anytype) void {
const T = @TypeOf(ptr).Child;
if (@sizeOf(T) == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
const ptr_align = @typeInfo(@TypeOf(ptr)).Pointer.alignment;
_ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], ptr_align, 0, 0);
}
/// Allocates an array of `n` items of type `T` and sets all the
/// items to `undefined`. Depending on the Allocator
/// implementation, it may be required to call `free` once the
/// memory is no longer needed, to avoid a resource leak. If the
/// `Allocator` implementation is unknown, then correct code will
/// call `free` when done.
///
/// For allocating a single item, see `create`.
pub fn alloc(self: *Allocator, comptime T: type, n: usize) Error![]T {
return self.alignedAlloc(T, null, n);
}
pub fn allocWithOptions(
self: *Allocator,
comptime Elem: type,
n: usize,
/// null means naturally aligned
comptime optional_alignment: ?u29,
comptime optional_sentinel: ?Elem,
) Error!AllocWithOptionsPayload(Elem, optional_alignment, optional_sentinel) {
if (optional_sentinel) |sentinel| {
const ptr = try self.alignedAlloc(Elem, optional_alignment, n + 1);
ptr[n] = sentinel;
return ptr[0..n :sentinel];
} else {
return self.alignedAlloc(Elem, optional_alignment, n);
}
}
fn AllocWithOptionsPayload(comptime Elem: type, comptime alignment: ?u29, comptime sentinel: ?Elem) type {
if (sentinel) |s| {
return [:s]align(alignment orelse @alignOf(Elem)) Elem;
} else {
return []align(alignment orelse @alignOf(Elem)) Elem;
}
}
/// Allocates an array of `n + 1` items of type `T` and sets the first `n`
/// items to `undefined` and the last item to `sentinel`. Depending on the
/// Allocator implementation, it may be required to call `free` once the
/// memory is no longer needed, to avoid a resource leak. If the
/// `Allocator` implementation is unknown, then correct code will
/// call `free` when done.
///
/// For allocating a single item, see `create`.
///
/// Deprecated; use `allocWithOptions`.
pub fn allocSentinel(self: *Allocator, comptime Elem: type, n: usize, comptime sentinel: Elem) Error![:sentinel]Elem {
return self.allocWithOptions(Elem, n, null, sentinel);
}
/// Deprecated: use `allocAdvanced`
pub fn alignedAlloc(
self: *Allocator,
comptime T: type,
/// null means naturally aligned
comptime alignment: ?u29,
n: usize,
) Error![]align(alignment orelse @alignOf(T)) T {
return self.allocAdvanced(T, alignment, n, .exact);
}
const Exact = enum { exact, at_least };
pub fn allocAdvanced(
self: *Allocator,
comptime T: type,
/// null means naturally aligned
comptime alignment: ?u29,
n: usize,
exact: Exact,
) Error![]align(alignment orelse @alignOf(T)) T {
const a = if (alignment) |a| blk: {
if (a == @alignOf(T)) return allocAdvanced(self, T, null, n, exact);
break :blk a;
} else @alignOf(T);
if (n == 0) {
return @as([*]align(a) T, undefined)[0..0];
}
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
// TODO The `if (alignment == null)` blocks are workarounds for zig not being able to
// access certain type information about T without creating a circular dependency in async
// functions that heap-allocate their own frame with @Frame(func).
const sizeOfT = if (alignment == null) @intCast(u29, @divExact(byte_count, n)) else @sizeOf(T);
const byte_slice = try self.allocFn(self, byte_count, a, if (exact == .exact) @as(u29, 0) else sizeOfT);
switch (exact) {
.exact => assert(byte_slice.len == byte_count),
.at_least => assert(byte_slice.len >= byte_count),
}
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(byte_slice.ptr, undefined, byte_slice.len);
if (alignment == null) {
// This if block is a workaround (see comment above)
return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..@divExact(byte_slice.len, @sizeOf(T))];
} else {
return mem.bytesAsSlice(T, @alignCast(a, byte_slice));
}
}
/// Increases or decreases the size of an allocation. It is guaranteed to not move the pointer.
pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old_mem) {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
if (new_n == 0) {
self.free(old_mem);
return &[0]T{};
}
const old_byte_slice = mem.sliceAsBytes(old_mem);
const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
const rc = try self.resizeFn(self, old_byte_slice, Slice.alignment, new_byte_count, 0);
assert(rc == new_byte_count);
const new_byte_slice = old_mem.ptr[0..new_byte_count];
return mem.bytesAsSlice(T, new_byte_slice);
}
/// This function requests a new byte size for an existing allocation,
/// which can be larger, smaller, or the same size as the old memory
/// allocation.
/// This function is preferred over `shrink`, because it can fail, even
/// when shrinking. This gives the allocator a chance to perform a
/// cheap shrink operation if possible, or otherwise return OutOfMemory,
/// indicating that the caller should keep their capacity, for example
/// in `std.ArrayList.shrink`.
/// If you need guaranteed success, call `shrink`.
/// If `new_n` is 0, this is the same as `free` and it always succeeds.
pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t Error![]align(Slice.alignment) Slice.child;
} {
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
return self.reallocAdvanced(old_mem, old_alignment, new_n, .exact);
}
pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t Error![]align(Slice.alignment) Slice.child;
} {
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
return self.reallocAdvanced(old_mem, old_alignment, new_n, .at_least);
}
// Deprecated: use `reallocAdvanced`
pub fn alignedRealloc(
self: *Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
return self.reallocAdvanced(old_mem, new_alignment, new_n, .exact);
}
/// This is the same as `realloc`, except caller may additionally request
/// a new alignment, which can be larger, smaller, or the same as the old
/// allocation.
pub fn reallocAdvanced(
self: *Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
exact: Exact,
) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
if (old_mem.len == 0) {
return self.allocAdvanced(T, new_alignment, new_n, exact);
}
if (new_n == 0) {
self.free(old_mem);
return @as([*]align(new_alignment) T, undefined)[0..0];
}
const old_byte_slice = mem.sliceAsBytes(old_mem);
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
// Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, if (exact == .exact) @as(u29, 0) else @sizeOf(T));
return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice));
}
/// Prefer calling realloc to shrink if you can tolerate failure, such as
/// in an ArrayList data structure with a storage capacity.
/// Shrink always succeeds, and `new_n` must be <= `old_mem.len`.
/// Returned slice has same alignment as old_mem.
/// Shrinking to 0 is the same as calling `free`.
pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t []align(Slice.alignment) Slice.child;
} {
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
return self.alignedShrink(old_mem, old_alignment, new_n);
}
/// This is the same as `shrink`, except caller may additionally request
/// a new alignment, which must be smaller or the same as the old
/// allocation.
pub fn alignedShrink(
self: *Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
) []align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
if (new_n == old_mem.len)
return old_mem;
assert(new_n < old_mem.len);
assert(new_alignment <= Slice.alignment);
// Here we skip the overflow checking on the multiplication because
// new_n <= old_mem.len and the multiplication didn't overflow for that operation.
const byte_count = @sizeOf(T) * new_n;
const old_byte_slice = mem.sliceAsBytes(old_mem);
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(old_byte_slice.ptr + byte_count, undefined, old_byte_slice.len - byte_count);
_ = self.shrinkBytes(old_byte_slice, Slice.alignment, byte_count, 0);
return old_mem[0..new_n];
}
/// Free an array allocated with `alloc`. To free a single item,
/// see `destroy`.
pub fn free(self: *Allocator, memory: anytype) void {
const Slice = @typeInfo(@TypeOf(memory)).Pointer;
const bytes = mem.sliceAsBytes(memory);
const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0;
if (bytes_len == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(non_const_ptr, undefined, bytes_len);
_ = self.shrinkBytes(non_const_ptr[0..bytes_len], Slice.alignment, 0, 0);
}
/// Copies `m` to newly allocated memory. Caller owns the memory.
pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T {
const new_buf = try allocator.alloc(T, m.len);
mem.copy(T, new_buf, m);
return new_buf;
}
/// Copies `m` to newly allocated memory, with a null-terminated element. Caller owns the memory.
pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T {
const new_buf = try allocator.alloc(T, m.len + 1);
mem.copy(T, new_buf, m);
new_buf[m.len] = 0;
return new_buf[0..m.len :0];
}

View File

@ -30,49 +30,7 @@ const ResetEvent = std.ResetEvent;
/// // ... lock not acquired
/// }
pub const Mutex = if (builtin.single_threaded)
struct {
lock: @TypeOf(lock_init),
const lock_init = if (std.debug.runtime_safety) false else {};
pub const Held = struct {
mutex: *Mutex,
pub fn release(self: Held) void {
if (std.debug.runtime_safety) {
self.mutex.lock = false;
}
}
};
/// Create a new mutex in unlocked state.
pub fn init() Mutex {
return Mutex{ .lock = lock_init };
}
/// Free a mutex created with init. Calling this while the
/// mutex is held is illegal behavior.
pub fn deinit(self: *Mutex) void {
self.* = undefined;
}
/// Try to acquire the mutex without blocking. Returns null if
/// the mutex is unavailable. Otherwise returns Held. Call
/// release on Held.
pub fn tryAcquire(self: *Mutex) ?Held {
if (std.debug.runtime_safety) {
if (self.lock) return null;
self.lock = true;
}
return Held{ .mutex = self };
}
/// Acquire the mutex. Will deadlock if the mutex is already
/// held by the calling thread.
pub fn acquire(self: *Mutex) Held {
return self.tryAcquire() orelse @panic("deadlock detected");
}
}
Dummy
else if (builtin.os.tag == .windows)
// https://locklessinc.com/articles/keyed_events/
extern union {
@ -82,6 +40,8 @@ else if (builtin.os.tag == .windows)
const WAKE = 1 << 8;
const WAIT = 1 << 9;
pub const Dummy = Dummy;
pub fn init() Mutex {
return Mutex{ .waiters = 0 };
}
@ -166,6 +126,8 @@ else if (builtin.link_libc or builtin.os.tag == .linux)
struct {
state: usize,
pub const Dummy = Dummy;
/// number of times to spin trying to acquire the lock.
/// https://webkit.org/blog/6161/locking-in-webkit/
const SPIN_COUNT = 40;
@ -298,6 +260,52 @@ else if (builtin.link_libc or builtin.os.tag == .linux)
else
SpinLock;
/// This has the sematics as `Mutex`, however it does not actually do any
/// synchronization. Operations are safety-checked no-ops.
pub const Dummy = struct {
lock: @TypeOf(lock_init),
const lock_init = if (std.debug.runtime_safety) false else {};
pub const Held = struct {
mutex: *Dummy,
pub fn release(self: Held) void {
if (std.debug.runtime_safety) {
self.mutex.lock = false;
}
}
};
/// Create a new mutex in unlocked state.
pub fn init() Dummy {
return Dummy{ .lock = lock_init };
}
/// Free a mutex created with init. Calling this while the
/// mutex is held is illegal behavior.
pub fn deinit(self: *Dummy) void {
self.* = undefined;
}
/// Try to acquire the mutex without blocking. Returns null if
/// the mutex is unavailable. Otherwise returns Held. Call
/// release on Held.
pub fn tryAcquire(self: *Dummy) ?Held {
if (std.debug.runtime_safety) {
if (self.lock) return null;
self.lock = true;
}
return Held{ .mutex = self };
}
/// Acquire the mutex. Will deadlock if the mutex is already
/// held by the calling thread.
pub fn acquire(self: *Dummy) Held {
return self.tryAcquire() orelse @panic("deadlock detected");
}
};
const TestContext = struct {
mutex: *Mutex,
data: i128,

View File

@ -20,14 +20,15 @@ pub fn main() anyerror!void {
async_frame_buffer = &[_]u8{};
for (test_fn_list) |test_fn, i| {
std.testing.base_allocator_instance.reset();
std.testing.allocator_instance = std.heap.GeneralPurposeAllocator(.{}){};
defer std.testing.allocator_instance.deinit();
std.testing.log_level = .warn;
var test_node = root_node.start(test_fn.name, null);
test_node.activate();
progress.refresh();
if (progress.terminal == null) {
std.debug.warn("{}/{} {}...", .{ i + 1, test_fn_list.len, test_fn.name });
std.debug.print("{}/{} {}...", .{ i + 1, test_fn_list.len, test_fn.name });
}
const result = if (test_fn.async_frame_size) |size| switch (io_mode) {
.evented => blk: {
@ -42,24 +43,20 @@ pub fn main() anyerror!void {
skip_count += 1;
test_node.end();
progress.log("{}...SKIP (async test)\n", .{test_fn.name});
if (progress.terminal == null) std.debug.warn("SKIP (async test)\n", .{});
if (progress.terminal == null) std.debug.print("SKIP (async test)\n", .{});
continue;
},
} else test_fn.func();
if (result) |_| {
ok_count += 1;
test_node.end();
std.testing.allocator_instance.validate() catch |err| switch (err) {
error.Leak => std.debug.panic("", .{}),
else => std.debug.panic("error.{}", .{@errorName(err)}),
};
if (progress.terminal == null) std.debug.warn("OK\n", .{});
if (progress.terminal == null) std.debug.print("OK\n", .{});
} else |err| switch (err) {
error.SkipZigTest => {
skip_count += 1;
test_node.end();
progress.log("{}...SKIP\n", .{test_fn.name});
if (progress.terminal == null) std.debug.warn("SKIP\n", .{});
if (progress.terminal == null) std.debug.print("SKIP\n", .{});
},
else => {
progress.log("", .{});
@ -69,9 +66,9 @@ pub fn main() anyerror!void {
}
root_node.end();
if (ok_count == test_fn_list.len) {
std.debug.warn("All {} tests passed.\n", .{ok_count});
std.debug.print("All {} tests passed.\n", .{ok_count});
} else {
std.debug.warn("{} passed; {} skipped.\n", .{ ok_count, skip_count });
std.debug.print("{} passed; {} skipped.\n", .{ ok_count, skip_count });
}
}

View File

@ -1,18 +1,16 @@
const std = @import("std.zig");
const warn = std.debug.warn;
const print = std.debug.print;
pub const LeakCountAllocator = @import("testing/leak_count_allocator.zig").LeakCountAllocator;
pub const FailingAllocator = @import("testing/failing_allocator.zig").FailingAllocator;
/// This should only be used in temporary test programs.
pub const allocator = &allocator_instance.allocator;
pub var allocator_instance = LeakCountAllocator.init(&base_allocator_instance.allocator);
pub var allocator_instance: std.heap.GeneralPurposeAllocator(.{}) = undefined;
pub const failing_allocator = &failing_allocator_instance.allocator;
pub var failing_allocator_instance = FailingAllocator.init(&base_allocator_instance.allocator, 0);
pub var base_allocator_instance = std.mem.validationWrap(std.heap.ThreadSafeFixedBufferAllocator.init(allocator_mem[0..]));
var allocator_mem: [2 * 1024 * 1024]u8 = undefined;
pub var base_allocator_instance = std.heap.FixedBufferAllocator.init("");
/// TODO https://github.com/ziglang/zig/issues/5738
pub var log_level = std.log.Level.warn;
@ -326,22 +324,22 @@ test "expectEqual vector" {
pub fn expectEqualStrings(expected: []const u8, actual: []const u8) void {
if (std.mem.indexOfDiff(u8, actual, expected)) |diff_index| {
warn("\n====== expected this output: =========\n", .{});
print("\n====== expected this output: =========\n", .{});
printWithVisibleNewlines(expected);
warn("\n======== instead found this: =========\n", .{});
print("\n======== instead found this: =========\n", .{});
printWithVisibleNewlines(actual);
warn("\n======================================\n", .{});
print("\n======================================\n", .{});
var diff_line_number: usize = 1;
for (expected[0..diff_index]) |value| {
if (value == '\n') diff_line_number += 1;
}
warn("First difference occurs on line {}:\n", .{diff_line_number});
print("First difference occurs on line {}:\n", .{diff_line_number});
warn("expected:\n", .{});
print("expected:\n", .{});
printIndicatorLine(expected, diff_index);
warn("found:\n", .{});
print("found:\n", .{});
printIndicatorLine(actual, diff_index);
@panic("test failure");
@ -362,9 +360,9 @@ fn printIndicatorLine(source: []const u8, indicator_index: usize) void {
{
var i: usize = line_begin_index;
while (i < indicator_index) : (i += 1)
warn(" ", .{});
print(" ", .{});
}
warn("^\n", .{});
print("^\n", .{});
}
fn printWithVisibleNewlines(source: []const u8) void {
@ -372,15 +370,15 @@ fn printWithVisibleNewlines(source: []const u8) void {
while (std.mem.indexOf(u8, source[i..], "\n")) |nl| : (i += nl + 1) {
printLine(source[i .. i + nl]);
}
warn("{}␃\n", .{source[i..]}); // End of Text symbol (ETX)
print("{}␃\n", .{source[i..]}); // End of Text symbol (ETX)
}
fn printLine(line: []const u8) void {
if (line.len != 0) switch (line[line.len - 1]) {
' ', '\t' => warn("{}⏎\n", .{line}), // Carriage return symbol,
' ', '\t' => print("{}⏎\n", .{line}), // Carriage return symbol,
else => {},
};
warn("{}\n", .{line});
print("{}\n", .{line});
}
test "" {

View File

@ -50,16 +50,22 @@ pub const FailingAllocator = struct {
if (self.index == self.fail_index) {
return error.OutOfMemory;
}
const result = try self.internal_allocator.callAllocFn(len, ptr_align, len_align);
const result = try self.internal_allocator.allocFn(self.internal_allocator, len, ptr_align, len_align);
self.allocated_bytes += result.len;
self.allocations += 1;
self.index += 1;
return result;
}
fn resize(allocator: *std.mem.Allocator, old_mem: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize {
fn resize(
allocator: *std.mem.Allocator,
old_mem: []u8,
old_align: u29,
new_len: usize,
len_align: u29,
) error{OutOfMemory}!usize {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
const r = self.internal_allocator.callResizeFn(old_mem, new_len, len_align) catch |e| {
const r = self.internal_allocator.resizeFn(self.internal_allocator, old_mem, old_align, new_len, len_align) catch |e| {
std.debug.assert(new_len > old_mem.len);
return e;
};

View File

@ -1,51 +0,0 @@
const std = @import("../std.zig");
/// This allocator is used in front of another allocator and counts the numbers of allocs and frees.
/// The test runner asserts every alloc has a corresponding free at the end of each test.
///
/// The detection algorithm is incredibly primitive and only accounts for number of calls.
/// This should be replaced by the general purpose debug allocator.
pub const LeakCountAllocator = struct {
count: usize,
allocator: std.mem.Allocator,
internal_allocator: *std.mem.Allocator,
pub fn init(allocator: *std.mem.Allocator) LeakCountAllocator {
return .{
.count = 0,
.allocator = .{
.allocFn = alloc,
.resizeFn = resize,
},
.internal_allocator = allocator,
};
}
fn alloc(allocator: *std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
const self = @fieldParentPtr(LeakCountAllocator, "allocator", allocator);
const ptr = try self.internal_allocator.callAllocFn(len, ptr_align, len_align);
self.count += 1;
return ptr;
}
fn resize(allocator: *std.mem.Allocator, old_mem: []u8, new_size: usize, len_align: u29) error{OutOfMemory}!usize {
const self = @fieldParentPtr(LeakCountAllocator, "allocator", allocator);
if (new_size == 0) {
if (self.count == 0) {
std.debug.panic("error - too many calls to free, most likely double free", .{});
}
self.count -= 1;
}
return self.internal_allocator.callResizeFn(old_mem, new_size, len_align) catch |e| {
std.debug.assert(new_size > old_mem.len);
return e;
};
}
pub fn validate(self: LeakCountAllocator) !void {
if (self.count > 0) {
std.debug.warn("error - detected leaked allocations without matching free: {}\n", .{self.count});
return error.Leak;
}
}
};

View File

@ -25067,12 +25067,12 @@ static PtrLen size_enum_index_to_ptr_len(BuiltinPtrSize size_enum_index) {
zig_unreachable();
}
static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, ZigType *ptr_type_entry) {
Error err;
static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, IrInst *source_instr, ZigType *ptr_type_entry) {
ZigType *attrs_type;
BuiltinPtrSize size_enum_index;
if (is_slice(ptr_type_entry)) {
attrs_type = ptr_type_entry->data.structure.fields[slice_ptr_index]->type_entry;
TypeStructField *ptr_field = ptr_type_entry->data.structure.fields[slice_ptr_index];
attrs_type = resolve_struct_field_type(ira->codegen, ptr_field);
size_enum_index = BuiltinPtrSizeSlice;
} else if (ptr_type_entry->id == ZigTypeIdPointer) {
attrs_type = ptr_type_entry;
@ -25081,9 +25081,6 @@ static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, ZigType *ptr_type_ent
zig_unreachable();
}
if ((err = type_resolve(ira->codegen, attrs_type->data.pointer.child_type, ResolveStatusSizeKnown)))
return nullptr;
ZigType *type_info_pointer_type = ir_type_info_get_type(ira, "Pointer", nullptr);
assertNoError(type_resolve(ira->codegen, type_info_pointer_type, ResolveStatusSizeKnown));
@ -25114,9 +25111,18 @@ static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, ZigType *ptr_type_ent
fields[2]->data.x_bool = attrs_type->data.pointer.is_volatile;
// alignment: u32
ensure_field_index(result->type, "alignment", 3);
fields[3]->special = ConstValSpecialStatic;
fields[3]->type = ira->codegen->builtin_types.entry_num_lit_int;
bigint_init_unsigned(&fields[3]->data.x_bigint, get_ptr_align(ira->codegen, attrs_type));
if (attrs_type->data.pointer.explicit_alignment != 0) {
fields[3]->special = ConstValSpecialStatic;
bigint_init_unsigned(&fields[3]->data.x_bigint, attrs_type->data.pointer.explicit_alignment);
} else {
LazyValueAlignOf *lazy_align_of = heap::c_allocator.create<LazyValueAlignOf>();
lazy_align_of->ira = ira; ira_ref(ira);
fields[3]->special = ConstValSpecialLazy;
fields[3]->data.x_lazy = &lazy_align_of->base;
lazy_align_of->base.id = LazyValueIdAlignOf;
lazy_align_of->target_type = ir_const_type(ira, source_instr, attrs_type->data.pointer.child_type);
}
// child: type
ensure_field_index(result->type, "child", 4);
fields[4]->special = ConstValSpecialStatic;
@ -25130,7 +25136,7 @@ static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, ZigType *ptr_type_ent
// sentinel: anytype
ensure_field_index(result->type, "sentinel", 6);
fields[6]->special = ConstValSpecialStatic;
if (attrs_type->data.pointer.child_type->id != ZigTypeIdOpaque) {
if (attrs_type->data.pointer.sentinel != nullptr) {
fields[6]->type = get_optional_type(ira->codegen, attrs_type->data.pointer.child_type);
set_optional_payload(fields[6], attrs_type->data.pointer.sentinel);
} else {
@ -25165,9 +25171,6 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
assert(type_entry != nullptr);
assert(!type_is_invalid(type_entry));
if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
return err;
auto entry = ira->codegen->type_info_cache.maybe_get(type_entry);
if (entry != nullptr) {
*out = entry->value;
@ -25231,7 +25234,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
}
case ZigTypeIdPointer:
{
result = create_ptr_like_type_info(ira, type_entry);
result = create_ptr_like_type_info(ira, source_instr, type_entry);
if (result == nullptr)
return ErrorSemanticAnalyzeFail;
break;
@ -25317,6 +25320,9 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
}
case ZigTypeIdEnum:
{
if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
return err;
result = ira->codegen->pass1_arena->create<ZigValue>();
result->special = ConstValSpecialStatic;
result->type = ir_type_info_get_type(ira, "Enum", nullptr);
@ -25455,6 +25461,9 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
}
case ZigTypeIdUnion:
{
if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
return err;
result = ira->codegen->pass1_arena->create<ZigValue>();
result->special = ConstValSpecialStatic;
result->type = ir_type_info_get_type(ira, "Union", nullptr);
@ -25545,12 +25554,15 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
case ZigTypeIdStruct:
{
if (type_entry->data.structure.special == StructSpecialSlice) {
result = create_ptr_like_type_info(ira, type_entry);
result = create_ptr_like_type_info(ira, source_instr, type_entry);
if (result == nullptr)
return ErrorSemanticAnalyzeFail;
break;
}
if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
return err;
result = ira->codegen->pass1_arena->create<ZigValue>();
result->special = ConstValSpecialStatic;
result->type = ir_type_info_get_type(ira, "Struct", nullptr);