diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 2e881dff94..88f27fad49 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -100,7 +100,7 @@ pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize { pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null; const PageAllocator = struct { - fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 { + fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 { assert(n > 0); const aligned_len = mem.alignForward(n, mem.page_size); @@ -196,7 +196,14 @@ const PageAllocator = struct { return result_ptr[0..alignPageAllocLen(aligned_len, n, len_align)]; } - fn resize(allocator: *Allocator, buf_unaligned: []u8, buf_align: u29, new_size: usize, len_align: u29) Allocator.Error!usize { + fn resize( + allocator: *Allocator, + buf_unaligned: []u8, + buf_align: u29, + new_size: usize, + len_align: u29, + return_address: usize, + ) Allocator.Error!usize { const new_size_aligned = mem.alignForward(new_size, mem.page_size); if (builtin.os.tag == .windows) { @@ -344,7 +351,7 @@ const WasmPageAllocator = struct { return mem.alignForward(memsize, mem.page_size) / mem.page_size; } - fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 { + fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 { const page_count = nPages(len); const page_idx = try allocPages(page_count, alignment); return @intToPtr([*]u8, page_idx * mem.page_size)[0..alignPageAllocLen(page_count * mem.page_size, len, len_align)]; @@ -397,7 +404,14 @@ const WasmPageAllocator = struct { } } - fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize { + fn resize( + allocator: *Allocator, + buf: []u8, + buf_align: u29, + new_len: usize, + len_align: u29, + return_address: usize, + ) error{OutOfMemory}!usize { const aligned_len = mem.alignForward(buf.len, mem.page_size); if (new_len > aligned_len) return error.OutOfMemory; const current_n = nPages(aligned_len); @@ -437,7 +451,13 @@ pub const HeapAllocator = switch (builtin.os.tag) { return @intToPtr(*align(1) usize, @ptrToInt(buf.ptr) + buf.len); } - fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 { + fn alloc( + allocator: *Allocator, + n: usize, + ptr_align: u29, + len_align: u29, + return_address: usize, + ) error{OutOfMemory}![]u8 { const self = @fieldParentPtr(HeapAllocator, "allocator", allocator); const amt = n + ptr_align - 1 + @sizeOf(usize); @@ -470,6 +490,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { buf_align: u29, new_size: usize, len_align: u29, + return_address: usize, ) error{OutOfMemory}!usize { const self = @fieldParentPtr(HeapAllocator, "allocator", allocator); if (new_size == 0) { @@ -542,7 +563,7 @@ pub const FixedBufferAllocator = struct { return buf.ptr + buf.len == self.buffer.ptr + self.end_index; } - fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 { + fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 { const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator); const aligned_addr = mem.alignForward(@ptrToInt(self.buffer.ptr) + self.end_index, ptr_align); const adjusted_index = aligned_addr - @ptrToInt(self.buffer.ptr); @@ -556,7 +577,14 @@ pub const FixedBufferAllocator = struct { return result; } - fn resize(allocator: *Allocator, buf: []u8, buf_align: u29, new_size: usize, len_align: u29) Allocator.Error!usize { + fn resize( + allocator: *Allocator, + buf: []u8, + buf_align: u29, + new_size: usize, + len_align: u29, + return_address: usize, + ) Allocator.Error!usize { const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator); assert(self.ownsSlice(buf)); // sanity check @@ -606,7 +634,7 @@ pub const ThreadSafeFixedBufferAllocator = blk: { }; } - fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 { + fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 { const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator); var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst); while (true) { @@ -654,18 +682,31 @@ pub fn StackFallbackAllocator(comptime size: usize) type { return &self.allocator; } - fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![*]u8 { + fn alloc( + allocator: *Allocator, + len: usize, + ptr_align: u29, + len_align: u29, + return_address: usize, + ) error{OutOfMemory}![*]u8 { const self = @fieldParentPtr(Self, "allocator", allocator); return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align) catch return fallback_allocator.alloc(len, ptr_align); } - fn resize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!void { + fn resize( + self: *Allocator, + buf: []u8, + buf_align: u29, + new_len: usize, + len_align: u29, + return_address: usize, + ) error{OutOfMemory}!void { const self = @fieldParentPtr(Self, "allocator", allocator); if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { - try self.fixed_buffer_allocator.callResizeFn(buf, new_len); + try self.fixed_buffer_allocator.resize(buf, new_len); } else { - try self.fallback_allocator.callResizeFn(buf, new_len); + try self.fallback_allocator.resize(buf, new_len); } } }; @@ -950,7 +991,7 @@ pub fn testAllocatorAlignedShrink(base_allocator: *mem.Allocator) mem.Allocator. slice[60] = 0x34; // realloc to a smaller size but with a larger alignment - slice = try allocator.alignedRealloc(slice, mem.page_size * 32, alloc_size / 2); + slice = try allocator.reallocAdvanced(slice, mem.page_size * 32, alloc_size / 2, .exact); testing.expect(slice[0] == 0x12); testing.expect(slice[60] == 0x34); } diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index 4a833bcb28..191f0c19e9 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -49,7 +49,7 @@ pub const ArenaAllocator = struct { const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16); const big_enough_len = prev_len + actual_min_size; const len = big_enough_len + big_enough_len / 2; - const buf = try self.child_allocator.allocFn(self.child_allocator, len, @alignOf(BufNode), 1); + const buf = try self.child_allocator.allocFn(self.child_allocator, len, @alignOf(BufNode), 1, @returnAddress()); const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr)); buf_node.* = BufNode{ .data = buf, @@ -60,7 +60,7 @@ pub const ArenaAllocator = struct { return buf_node; } - fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 { + fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 { const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator); var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + ptr_align); diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index 6ed9980a4f..9abf9cf253 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -104,7 +104,7 @@ const SlotIndex = std.meta.Int(false, math.log2(page_size) + 1); pub const Config = struct { /// Number of stack frames to capture. - stack_trace_frames: usize = if (std.debug.runtime_safety) @as(usize, 6) else @as(usize, 0), + stack_trace_frames: usize = if (std.debug.runtime_safety) @as(usize, 4) else @as(usize, 0), /// If true, the allocator will have two fields: /// * `total_requested_bytes` which tracks the total allocated bytes of memory requested. @@ -199,7 +199,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { fn captureStackTrace( bucket: *BucketHeader, - return_address: usize, + ret_addr: usize, size_class: usize, slot_index: SlotIndex, trace_kind: TraceKind, @@ -207,7 +207,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { // Initialize them to 0. When determining the count we must look // for non zero addresses. const stack_addresses = bucket.stackTracePtr(size_class, slot_index, trace_kind); - collectStackTrace(return_address, stack_addresses); + collectStackTrace(ret_addr, stack_addresses); } }; @@ -284,7 +284,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { var leaks = false; for (self.buckets) |optional_bucket, bucket_i| { const first_bucket = optional_bucket orelse continue; - const size_class = @as(usize, 1) << @intCast(u6, bucket_i); + const size_class = @as(usize, 1) << @intCast(math.Log2Int(usize), bucket_i); const used_bits_count = usedBitsCount(size_class); var bucket = first_bucket; while (true) { @@ -377,7 +377,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { trace_addr: usize, ) void { // Capture stack trace to be the "first free", in case a double free happens. - bucket.captureStackTrace(@returnAddress(), size_class, slot_index, .free); + bucket.captureStackTrace(trace_addr, size_class, slot_index, .free); used_byte.* &= ~(@as(u8, 1) << used_bit_index); bucket.used_count -= 1; @@ -408,7 +408,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { old_align: u29, new_size: usize, len_align: u29, - return_addr: usize, + ret_addr: usize, ) Error!usize { const entry = self.large_allocations.getEntry(@ptrToInt(old_mem.ptr)) orelse { if (config.safety) { @@ -428,7 +428,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { @panic("\nFree here:"); } - const result_len = try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align); + const result_len = try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align, ret_addr); if (result_len == 0) { self.large_allocations.removeAssertDiscard(@ptrToInt(old_mem.ptr)); @@ -436,7 +436,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } entry.value.bytes = old_mem.ptr[0..result_len]; - collectStackTrace(return_addr, &entry.value.stack_addresses); + collectStackTrace(ret_addr, &entry.value.stack_addresses); return result_len; } @@ -450,6 +450,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { old_align: u29, new_size: usize, len_align: u29, + ret_addr: usize, ) Error!usize { const self = @fieldParentPtr(Self, "allocator", allocator); @@ -472,7 +473,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { const aligned_size = math.max(old_mem.len, old_align); if (aligned_size > largest_bucket_object_size) { - return self.resizeLarge(old_mem, old_align, new_size, len_align, @returnAddress()); + return self.resizeLarge(old_mem, old_align, new_size, len_align, ret_addr); } const size_class_hint = up_to_nearest_power_of_2(usize, aligned_size); @@ -484,7 +485,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } size_class *= 2; } else { - return self.resizeLarge(old_mem, old_align, new_size, len_align, @returnAddress()); + return self.resizeLarge(old_mem, old_align, new_size, len_align, ret_addr); }; const byte_offset = @ptrToInt(old_mem.ptr) - @ptrToInt(bucket.page); const slot_index = @intCast(SlotIndex, byte_offset / size_class); @@ -507,7 +508,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } } if (new_size == 0) { - self.freeSlot(bucket, bucket_index, size_class, slot_index, used_byte, used_bit_index, @returnAddress()); + self.freeSlot(bucket, bucket_index, size_class, slot_index, used_byte, used_bit_index, ret_addr); return @as(usize, 0); } const new_aligned_size = math.max(new_size, old_align); @@ -518,7 +519,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { return error.OutOfMemory; } - fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) Error![]u8 { + fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 { const self = @fieldParentPtr(Self, "allocator", allocator); const held = self.mutex.acquire(); @@ -543,17 +544,17 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { self.large_allocations.entries.items.len + 1, ); - const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align); + const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr); const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr)); assert(!gop.found_existing); // This would mean the kernel double-mapped pages. gop.entry.value.bytes = slice; - collectStackTrace(@returnAddress(), &gop.entry.value.stack_addresses); + collectStackTrace(ret_addr, &gop.entry.value.stack_addresses); return slice; } else { const new_size_class = up_to_nearest_power_of_2(usize, new_aligned_size); - const ptr = try self.allocSlot(new_size_class, @returnAddress()); + const ptr = try self.allocSlot(new_size_class, ret_addr); return ptr[0..len]; } } @@ -782,7 +783,7 @@ test "shrink large object to large object with larger alignment" { slice[0] = 0x12; slice[60] = 0x34; - slice = try allocator.alignedRealloc(slice, page_size * 2, alloc_size / 2); + slice = try allocator.reallocAdvanced(slice, page_size * 2, alloc_size / 2, .exact); assert(slice[0] == 0x12); assert(slice[60] == 0x34); } @@ -833,15 +834,15 @@ test "realloc large object to larger alignment" { slice[0] = 0x12; slice[16] = 0x34; - slice = try allocator.alignedRealloc(slice, 32, page_size * 2 + 100); + slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 100, .exact); assert(slice[0] == 0x12); assert(slice[16] == 0x34); - slice = try allocator.alignedRealloc(slice, 32, page_size * 2 + 25); + slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 25, .exact); assert(slice[0] == 0x12); assert(slice[16] == 0x34); - slice = try allocator.alignedRealloc(slice, page_size * 2, page_size * 2 + 100); + slice = try allocator.reallocAdvanced(slice, page_size * 2, page_size * 2 + 100, .exact); assert(slice[0] == 0x12); assert(slice[16] == 0x34); } diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig index 5f91efa10a..eff20a5c46 100644 --- a/lib/std/heap/logging_allocator.zig +++ b/lib/std/heap/logging_allocator.zig @@ -23,10 +23,16 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type { }; } - fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 { + fn alloc( + allocator: *Allocator, + len: usize, + ptr_align: u29, + len_align: u29, + ra: usize, + ) error{OutOfMemory}![]u8 { const self = @fieldParentPtr(Self, "allocator", allocator); self.out_stream.print("alloc : {}", .{len}) catch {}; - const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align); + const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra); if (result) |buff| { self.out_stream.print(" success!\n", .{}) catch {}; } else |err| { @@ -41,6 +47,7 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type { buf_align: u29, new_len: usize, len_align: u29, + ra: usize, ) error{OutOfMemory}!usize { const self = @fieldParentPtr(Self, "allocator", allocator); if (new_len == 0) { @@ -50,7 +57,7 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type { } else { self.out_stream.print("expand: {} to {}", .{ buf.len, new_len }) catch {}; } - if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align)) |resized_len| { + if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ra)) |resized_len| { if (new_len > buf.len) { self.out_stream.print(" success!\n", .{}) catch {}; } @@ -80,9 +87,9 @@ test "LoggingAllocator" { const allocator = &loggingAllocator(&fixedBufferAllocator.allocator, fbs.outStream()).allocator; var a = try allocator.alloc(u8, 10); - a.len = allocator.shrinkBytes(a, 1, 5, 0); + a = allocator.shrink(a, 5); std.debug.assert(a.len == 5); - std.testing.expectError(error.OutOfMemory, allocator.resizeFn(allocator, a, 1, 20, 0)); + std.testing.expectError(error.OutOfMemory, allocator.resize(a, 20)); allocator.free(a); std.testing.expectEqualSlices(u8, diff --git a/lib/std/mem.zig b/lib/std/mem.zig index a8ca09fb74..5a0927ea6e 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -37,7 +37,13 @@ pub fn ValidationAllocator(comptime T: type) type { if (*T == *Allocator) return &self.underlying_allocator; return &self.underlying_allocator.allocator; } - pub fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 { + pub fn alloc( + allocator: *Allocator, + n: usize, + ptr_align: u29, + len_align: u29, + ret_addr: usize, + ) Allocator.Error![]u8 { assert(n > 0); assert(mem.isValidAlign(ptr_align)); if (len_align != 0) { @@ -47,7 +53,7 @@ pub fn ValidationAllocator(comptime T: type) type { const self = @fieldParentPtr(@This(), "allocator", allocator); const underlying = self.getUnderlyingAllocatorPtr(); - const result = try underlying.allocFn(underlying, n, ptr_align, len_align); + const result = try underlying.allocFn(underlying, n, ptr_align, len_align, ret_addr); assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align)); if (len_align == 0) { assert(result.len == n); @@ -63,6 +69,7 @@ pub fn ValidationAllocator(comptime T: type) type { buf_align: u29, new_len: usize, len_align: u29, + ret_addr: usize, ) Allocator.Error!usize { assert(buf.len > 0); if (len_align != 0) { @@ -71,7 +78,7 @@ pub fn ValidationAllocator(comptime T: type) type { } const self = @fieldParentPtr(@This(), "allocator", allocator); const underlying = self.getUnderlyingAllocatorPtr(); - const result = try underlying.resizeFn(underlying, buf, buf_align, new_len, len_align); + const result = try underlying.resizeFn(underlying, buf, buf_align, new_len, len_align, ret_addr); if (len_align == 0) { assert(result == new_len); } else { @@ -111,7 +118,7 @@ var failAllocator = Allocator{ .allocFn = failAllocatorAlloc, .resizeFn = Allocator.noResize, }; -fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29) Allocator.Error![]u8 { +fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 { return error.OutOfMemory; } diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig index f7a036d5ee..8bdab81bc6 100644 --- a/lib/std/mem/Allocator.zig +++ b/lib/std/mem/Allocator.zig @@ -14,7 +14,10 @@ pub const Error = error{OutOfMemory}; /// otherwise, the length must be aligned to `len_align`. /// /// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`. -allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Error![]u8, +/// +/// `ret_addr` is optionally provided as the first return address of the allocation call stack. +/// If the value is `0` it means no return address has been provided. +allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8, /// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent /// length returned by `allocFn` or `resizeFn`. `buf_align` must equal the same value @@ -33,22 +36,25 @@ allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Error /// accepting more bytes of memory from the allocator than requested. /// /// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`. -resizeFn: fn (self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29) Error!usize, +/// +/// `ret_addr` is optionally provided as the first return address of the allocation call stack. +/// If the value is `0` it means no return address has been provided. +resizeFn: fn (self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize, /// Set to resizeFn if in-place resize is not supported. -pub fn noResize(self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29) Error!usize { +pub fn noResize( + self: *Allocator, + buf: []u8, + buf_align: u29, + new_len: usize, + len_align: u29, + ret_addr: usize, +) Error!usize { if (new_len > buf.len) return error.OutOfMemory; return new_len; } -/// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning -/// error.OutOfMemory should be impossible. -pub fn shrinkBytes(self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29) usize { - assert(new_len <= buf.len); - return self.resizeFn(self, buf, buf_align, new_len, len_align) catch unreachable; -} - /// Realloc is used to modify the size or alignment of an existing allocation, /// as well as to provide the allocator with an opportunity to move an allocation /// to a better location. @@ -93,9 +99,10 @@ fn reallocBytes( /// non-zero means the length of the returned slice must be aligned by `len_align` /// `new_len` must be aligned by `len_align` len_align: u29, + return_address: usize, ) Error![]u8 { if (old_mem.len == 0) { - const new_mem = try self.allocFn(self, new_byte_count, new_alignment, len_align); + const new_mem = try self.allocFn(self, new_byte_count, new_alignment, len_align, return_address); // TODO: https://github.com/ziglang/zig/issues/4298 @memset(new_mem.ptr, undefined, new_byte_count); return new_mem; @@ -103,10 +110,10 @@ fn reallocBytes( if (mem.isAligned(@ptrToInt(old_mem.ptr), new_alignment)) { if (new_byte_count <= old_mem.len) { - const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align); + const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align, return_address); return old_mem.ptr[0..shrunk_len]; } - if (self.resizeFn(self, old_mem, old_alignment, new_byte_count, len_align)) |resized_len| { + if (self.resizeFn(self, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| { assert(resized_len >= new_byte_count); // TODO: https://github.com/ziglang/zig/issues/4298 @memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count); @@ -116,7 +123,7 @@ fn reallocBytes( if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) { return error.OutOfMemory; } - return self.moveBytes(old_mem, old_alignment, new_byte_count, new_alignment, len_align); + return self.moveBytes(old_mem, old_alignment, new_byte_count, new_alignment, len_align, return_address); } /// Move the given memory to a new location in the given allocator to accomodate a new @@ -128,10 +135,11 @@ fn moveBytes( new_len: usize, new_alignment: u29, len_align: u29, + return_address: usize, ) Error![]u8 { assert(old_mem.len > 0); assert(new_len > 0); - const new_mem = try self.allocFn(self, new_len, new_alignment, len_align); + const new_mem = try self.allocFn(self, new_len, new_alignment, len_align, return_address); @memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len)); // TODO DISABLED TO AVOID BUGS IN TRANSLATE C // TODO see also https://github.com/ziglang/zig/issues/4298 @@ -139,7 +147,7 @@ fn moveBytes( // generated C code will be a sequence of 0xaa (the undefined value), meaning // it is printing data that has been freed //@memset(old_mem.ptr, undefined, old_mem.len); - _ = self.shrinkBytes(old_mem, old_align, 0, 0); + _ = self.shrinkBytes(old_mem, old_align, 0, 0, return_address); return new_mem; } @@ -147,7 +155,7 @@ fn moveBytes( /// Call `destroy` with the result to free the memory. pub fn create(self: *Allocator, comptime T: type) Error!*T { if (@sizeOf(T) == 0) return &(T{}); - const slice = try self.alloc(T, 1); + const slice = try self.allocAdvancedWithRetAddr(T, null, 1, .exact, @returnAddress()); return &slice[0]; } @@ -158,7 +166,7 @@ pub fn destroy(self: *Allocator, ptr: anytype) void { if (@sizeOf(T) == 0) return; const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr)); const ptr_align = @typeInfo(@TypeOf(ptr)).Pointer.alignment; - _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], ptr_align, 0, 0); + _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], ptr_align, 0, 0, @returnAddress()); } /// Allocates an array of `n` items of type `T` and sets all the @@ -170,7 +178,7 @@ pub fn destroy(self: *Allocator, ptr: anytype) void { /// /// For allocating a single item, see `create`. pub fn alloc(self: *Allocator, comptime T: type, n: usize) Error![]T { - return self.alignedAlloc(T, null, n); + return self.allocAdvancedWithRetAddr(T, null, n, .exact, @returnAddress()); } pub fn allocWithOptions( @@ -180,13 +188,25 @@ pub fn allocWithOptions( /// null means naturally aligned comptime optional_alignment: ?u29, comptime optional_sentinel: ?Elem, +) Error!AllocWithOptionsPayload(Elem, optional_alignment, optional_sentinel) { + return self.allocWithOptionsRetAddr(Elem, n, optional_alignment, optional_sentinel, @returnAddress()); +} + +pub fn allocWithOptionsRetAddr( + self: *Allocator, + comptime Elem: type, + n: usize, + /// null means naturally aligned + comptime optional_alignment: ?u29, + comptime optional_sentinel: ?Elem, + return_address: usize, ) Error!AllocWithOptionsPayload(Elem, optional_alignment, optional_sentinel) { if (optional_sentinel) |sentinel| { - const ptr = try self.alignedAlloc(Elem, optional_alignment, n + 1); + const ptr = try self.allocAdvancedWithRetAddr(Elem, optional_alignment, n + 1, .exact, return_address); ptr[n] = sentinel; return ptr[0..n :sentinel]; } else { - return self.alignedAlloc(Elem, optional_alignment, n); + return self.allocAdvancedWithRetAddr(Elem, optional_alignment, n, .exact, return_address); } } @@ -208,8 +228,13 @@ fn AllocWithOptionsPayload(comptime Elem: type, comptime alignment: ?u29, compti /// For allocating a single item, see `create`. /// /// Deprecated; use `allocWithOptions`. -pub fn allocSentinel(self: *Allocator, comptime Elem: type, n: usize, comptime sentinel: Elem) Error![:sentinel]Elem { - return self.allocWithOptions(Elem, n, null, sentinel); +pub fn allocSentinel( + self: *Allocator, + comptime Elem: type, + n: usize, + comptime sentinel: Elem, +) Error![:sentinel]Elem { + return self.allocWithOptionsRetAddr(Elem, n, null, sentinel, @returnAddress()); } /// Deprecated: use `allocAdvanced` @@ -220,10 +245,9 @@ pub fn alignedAlloc( comptime alignment: ?u29, n: usize, ) Error![]align(alignment orelse @alignOf(T)) T { - return self.allocAdvanced(T, alignment, n, .exact); + return self.allocAdvancedWithRetAddr(T, alignment, n, .exact, @returnAddress()); } -const Exact = enum { exact, at_least }; pub fn allocAdvanced( self: *Allocator, comptime T: type, @@ -231,9 +255,23 @@ pub fn allocAdvanced( comptime alignment: ?u29, n: usize, exact: Exact, +) Error![]align(alignment orelse @alignOf(T)) T { + return self.allocAdvancedWithRetAddr(T, alignment, n, exact, @returnAddress()); +} + +pub const Exact = enum { exact, at_least }; + +pub fn allocAdvancedWithRetAddr( + self: *Allocator, + comptime T: type, + /// null means naturally aligned + comptime alignment: ?u29, + n: usize, + exact: Exact, + return_address: usize, ) Error![]align(alignment orelse @alignOf(T)) T { const a = if (alignment) |a| blk: { - if (a == @alignOf(T)) return allocAdvanced(self, T, null, n, exact); + if (a == @alignOf(T)) return allocAdvancedWithRetAddr(self, T, null, n, exact, return_address); break :blk a; } else @alignOf(T); @@ -245,8 +283,12 @@ pub fn allocAdvanced( // TODO The `if (alignment == null)` blocks are workarounds for zig not being able to // access certain type information about T without creating a circular dependency in async // functions that heap-allocate their own frame with @Frame(func). - const sizeOfT = if (alignment == null) @intCast(u29, @divExact(byte_count, n)) else @sizeOf(T); - const byte_slice = try self.allocFn(self, byte_count, a, if (exact == .exact) @as(u29, 0) else sizeOfT); + const size_of_T = if (alignment == null) @intCast(u29, @divExact(byte_count, n)) else @sizeOf(T); + const len_align: u29 = switch (exact) { + .exact => 0, + .at_least => size_of_T, + }; + const byte_slice = try self.allocFn(self, byte_count, a, len_align, return_address); switch (exact) { .exact => assert(byte_slice.len == byte_count), .at_least => assert(byte_slice.len >= byte_count), @@ -271,7 +313,7 @@ pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(ol } const old_byte_slice = mem.sliceAsBytes(old_mem); const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory; - const rc = try self.resizeFn(self, old_byte_slice, Slice.alignment, new_byte_count, 0); + const rc = try self.resizeFn(self, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress()); assert(rc == new_byte_count); const new_byte_slice = old_mem.ptr[0..new_byte_count]; return mem.bytesAsSlice(T, new_byte_slice); @@ -292,7 +334,7 @@ pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: { break :t Error![]align(Slice.alignment) Slice.child; } { const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment; - return self.reallocAdvanced(old_mem, old_alignment, new_n, .exact); + return self.reallocAdvancedWithRetAddr(old_mem, old_alignment, new_n, .exact, @returnAddress()); } pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: { @@ -300,17 +342,7 @@ pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: { break :t Error![]align(Slice.alignment) Slice.child; } { const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment; - return self.reallocAdvanced(old_mem, old_alignment, new_n, .at_least); -} - -// Deprecated: use `reallocAdvanced` -pub fn alignedRealloc( - self: *Allocator, - old_mem: anytype, - comptime new_alignment: u29, - new_n: usize, -) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child { - return self.reallocAdvanced(old_mem, new_alignment, new_n, .exact); + return self.reallocAdvancedWithRetAddr(old_mem, old_alignment, new_n, .at_least, @returnAddress()); } /// This is the same as `realloc`, except caller may additionally request @@ -322,6 +354,17 @@ pub fn reallocAdvanced( comptime new_alignment: u29, new_n: usize, exact: Exact, +) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child { + return self.reallocAdvancedWithRetAddr(old_mem, new_alignment, new_n, exact, @returnAddress()); +} + +pub fn reallocAdvancedWithRetAddr( + self: *Allocator, + old_mem: anytype, + comptime new_alignment: u29, + new_n: usize, + exact: Exact, + return_address: usize, ) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child { const Slice = @typeInfo(@TypeOf(old_mem)).Pointer; const T = Slice.child; @@ -336,7 +379,11 @@ pub fn reallocAdvanced( const old_byte_slice = mem.sliceAsBytes(old_mem); const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory; // Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure - const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, if (exact == .exact) @as(u29, 0) else @sizeOf(T)); + const len_align: u29 = switch (exact) { + .exact => 0, + .at_least => @sizeOf(T), + }; + const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, len_align, return_address); return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice)); } @@ -350,7 +397,7 @@ pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: { break :t []align(Slice.alignment) Slice.child; } { const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment; - return self.alignedShrink(old_mem, old_alignment, new_n); + return self.alignedShrinkWithRetAddr(old_mem, old_alignment, new_n, @returnAddress()); } /// This is the same as `shrink`, except caller may additionally request @@ -361,6 +408,19 @@ pub fn alignedShrink( old_mem: anytype, comptime new_alignment: u29, new_n: usize, +) []align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child { + return self.alignedShrinkWithRetAddr(old_mem, new_alignment, new_n, @returnAddress()); +} + +/// This is the same as `alignedShrink`, except caller may additionally pass +/// the return address of the first stack frame, which may be relevant for +/// allocators which collect stack traces. +pub fn alignedShrinkWithRetAddr( + self: *Allocator, + old_mem: anytype, + comptime new_alignment: u29, + new_n: usize, + return_address: usize, ) []align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child { const Slice = @typeInfo(@TypeOf(old_mem)).Pointer; const T = Slice.child; @@ -377,7 +437,7 @@ pub fn alignedShrink( const old_byte_slice = mem.sliceAsBytes(old_mem); // TODO: https://github.com/ziglang/zig/issues/4298 @memset(old_byte_slice.ptr + byte_count, undefined, old_byte_slice.len - byte_count); - _ = self.shrinkBytes(old_byte_slice, Slice.alignment, byte_count, 0); + _ = self.shrinkBytes(old_byte_slice, Slice.alignment, byte_count, 0, return_address); return old_mem[0..new_n]; } @@ -391,7 +451,7 @@ pub fn free(self: *Allocator, memory: anytype) void { const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr)); // TODO: https://github.com/ziglang/zig/issues/4298 @memset(non_const_ptr, undefined, bytes_len); - _ = self.shrinkBytes(non_const_ptr[0..bytes_len], Slice.alignment, 0, 0); + _ = self.shrinkBytes(non_const_ptr[0..bytes_len], Slice.alignment, 0, 0, @returnAddress()); } /// Copies `m` to newly allocated memory. Caller owns the memory. @@ -408,3 +468,19 @@ pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T { new_buf[m.len] = 0; return new_buf[0..m.len :0]; } + +/// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning +/// error.OutOfMemory should be impossible. +/// This function allows a runtime `buf_align` value. Callers should generally prefer +/// to call `shrink` directly. +pub fn shrinkBytes( + self: *Allocator, + buf: []u8, + buf_align: u29, + new_len: usize, + len_align: u29, + return_address: usize, +) usize { + assert(new_len <= buf.len); + return self.resizeFn(self, buf, buf_align, new_len, len_align, return_address) catch unreachable; +} diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig index 7febaaac64..d8b243d0fa 100644 --- a/lib/std/testing/failing_allocator.zig +++ b/lib/std/testing/failing_allocator.zig @@ -45,12 +45,18 @@ pub const FailingAllocator = struct { }; } - fn alloc(allocator: *std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 { + fn alloc( + allocator: *std.mem.Allocator, + len: usize, + ptr_align: u29, + len_align: u29, + return_address: usize, + ) error{OutOfMemory}![]u8 { const self = @fieldParentPtr(FailingAllocator, "allocator", allocator); if (self.index == self.fail_index) { return error.OutOfMemory; } - const result = try self.internal_allocator.allocFn(self.internal_allocator, len, ptr_align, len_align); + const result = try self.internal_allocator.allocFn(self.internal_allocator, len, ptr_align, len_align, return_address); self.allocated_bytes += result.len; self.allocations += 1; self.index += 1; @@ -63,9 +69,10 @@ pub const FailingAllocator = struct { old_align: u29, new_len: usize, len_align: u29, + ra: usize, ) error{OutOfMemory}!usize { const self = @fieldParentPtr(FailingAllocator, "allocator", allocator); - const r = self.internal_allocator.resizeFn(self.internal_allocator, old_mem, old_align, new_len, len_align) catch |e| { + const r = self.internal_allocator.resizeFn(self.internal_allocator, old_mem, old_align, new_len, len_align, ra) catch |e| { std.debug.assert(new_len > old_mem.len); return e; };