std.heap: remove HeapAllocator

Windows-only, depends on kernel32 in violation of zig std lib policy,
and redundant with other cross-platform APIs that perform the same
functionality.
This commit is contained in:
Andrew Kelley 2025-02-05 18:03:14 -08:00
parent f82ec3f02a
commit 5e9b8c38d3
3 changed files with 0 additions and 158 deletions

View File

@ -363,127 +363,6 @@ pub const wasm_allocator: Allocator = .{
.vtable = &WasmAllocator.vtable,
};
pub const HeapAllocator = switch (builtin.os.tag) {
.windows => struct {
heap_handle: ?HeapHandle,
const HeapHandle = windows.HANDLE;
pub fn init() HeapAllocator {
return HeapAllocator{
.heap_handle = null,
};
}
pub fn allocator(self: *HeapAllocator) Allocator {
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.remap = remap,
.free = free,
},
};
}
pub fn deinit(self: *HeapAllocator) void {
if (self.heap_handle) |heap_handle| {
windows.HeapDestroy(heap_handle);
}
}
fn getRecordPtr(buf: []u8) *align(1) usize {
return @as(*align(1) usize, @ptrFromInt(@intFromPtr(buf.ptr) + buf.len));
}
fn alloc(
ctx: *anyopaque,
n: usize,
alignment: mem.Alignment,
return_address: usize,
) ?[*]u8 {
_ = return_address;
const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
const ptr_align = alignment.toByteUnits();
const amt = n + ptr_align - 1 + @sizeOf(usize);
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .seq_cst);
const heap_handle = optional_heap_handle orelse blk: {
const options = if (builtin.single_threaded) windows.HEAP_NO_SERIALIZE else 0;
const hh = windows.kernel32.HeapCreate(options, amt, 0) orelse return null;
const other_hh = @cmpxchgStrong(?HeapHandle, &self.heap_handle, null, hh, .seq_cst, .seq_cst) orelse break :blk hh;
windows.HeapDestroy(hh);
break :blk other_hh.?; // can't be null because of the cmpxchg
};
const ptr = windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return null;
const root_addr = @intFromPtr(ptr);
const aligned_addr = mem.alignForward(usize, root_addr, ptr_align);
const buf = @as([*]u8, @ptrFromInt(aligned_addr))[0..n];
getRecordPtr(buf).* = root_addr;
return buf.ptr;
}
fn resize(
ctx: *anyopaque,
buf: []u8,
alignment: mem.Alignment,
new_size: usize,
return_address: usize,
) bool {
_ = alignment;
_ = return_address;
const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
const root_addr = getRecordPtr(buf).*;
const align_offset = @intFromPtr(buf.ptr) - root_addr;
const amt = align_offset + new_size + @sizeOf(usize);
const new_ptr = windows.kernel32.HeapReAlloc(
self.heap_handle.?,
windows.HEAP_REALLOC_IN_PLACE_ONLY,
@as(*anyopaque, @ptrFromInt(root_addr)),
amt,
) orelse return false;
assert(new_ptr == @as(*anyopaque, @ptrFromInt(root_addr)));
getRecordPtr(buf.ptr[0..new_size]).* = root_addr;
return true;
}
fn remap(
ctx: *anyopaque,
buf: []u8,
alignment: mem.Alignment,
new_size: usize,
return_address: usize,
) ?[*]u8 {
_ = alignment;
_ = return_address;
const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
const root_addr = getRecordPtr(buf).*;
const align_offset = @intFromPtr(buf.ptr) - root_addr;
const amt = align_offset + new_size + @sizeOf(usize);
const new_ptr = windows.kernel32.HeapReAlloc(self.heap_handle.?, 0, @ptrFromInt(root_addr), amt) orelse return null;
assert(new_ptr == @as(*anyopaque, @ptrFromInt(root_addr)));
getRecordPtr(buf.ptr[0..new_size]).* = root_addr;
return @ptrCast(new_ptr);
}
fn free(
ctx: *anyopaque,
buf: []u8,
alignment: mem.Alignment,
return_address: usize,
) void {
_ = alignment;
_ = return_address;
const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
windows.HeapFree(self.heap_handle.?, 0, @as(*anyopaque, @ptrFromInt(getRecordPtr(buf).*)));
}
},
else => @compileError("Unsupported OS"),
};
/// Returns a `StackFallbackAllocator` allocating using either a
/// `FixedBufferAllocator` on an array of size `size` and falling back to
/// `fallback_allocator` if that fails.
@ -628,22 +507,6 @@ test PageAllocator {
}
}
test HeapAllocator {
if (builtin.os.tag == .windows) {
// https://github.com/ziglang/zig/issues/13702
if (builtin.cpu.arch == .aarch64) return error.SkipZigTest;
var heap_allocator = HeapAllocator.init();
defer heap_allocator.deinit();
const allocator = heap_allocator.allocator();
try testAllocator(allocator);
try testAllocatorAligned(allocator);
try testAllocatorLargeAlignment(allocator);
try testAllocatorAlignedShrink(allocator);
}
}
test ArenaAllocator {
var arena_allocator = ArenaAllocator.init(page_allocator);
defer arena_allocator.deinit();

View File

@ -2016,18 +2016,6 @@ pub fn InitOnceExecuteOnce(InitOnce: *INIT_ONCE, InitFn: INIT_ONCE_FN, Parameter
assert(kernel32.InitOnceExecuteOnce(InitOnce, InitFn, Parameter, Context) != 0);
}
pub fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *anyopaque) void {
assert(kernel32.HeapFree(hHeap, dwFlags, lpMem) != 0);
}
pub fn HeapDestroy(hHeap: HANDLE) void {
assert(kernel32.HeapDestroy(hHeap) != 0);
}
pub fn LocalFree(hMem: HLOCAL) void {
assert(kernel32.LocalFree(hMem) == null);
}
pub const SetFileTimeError = error{Unexpected};
pub fn SetFileTime(

View File

@ -528,11 +528,6 @@ pub extern "kernel32" fn HeapCreate(
dwMaximumSize: SIZE_T,
) callconv(.winapi) ?HANDLE;
// TODO: Wrapper around RtlDestroyHeap (BOOLEAN -> BOOL).
pub extern "kernel32" fn HeapDestroy(
hHeap: HANDLE,
) callconv(.winapi) BOOL;
// TODO: Forwarder to RtlReAllocateHeap.
pub extern "kernel32" fn HeapReAlloc(
hHeap: HANDLE,
@ -585,10 +580,6 @@ pub extern "kernel32" fn VirtualQuery(
dwLength: SIZE_T,
) callconv(.winapi) SIZE_T;
pub extern "kernel32" fn LocalFree(
hMem: HLOCAL,
) callconv(.winapi) ?HLOCAL;
// TODO: Getter for peb.ProcessHeap
pub extern "kernel32" fn GetProcessHeap() callconv(.winapi) ?HANDLE;