From eff926b4545fea9f87559d2e1d7372d6cbeba780 Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Wed, 27 Nov 2019 18:46:42 -0600 Subject: [PATCH 01/20] Brain dump new wasm allocator --- lib/std/heap.zig | 106 +++++++++++++++++++++-------------------------- 1 file changed, 47 insertions(+), 59 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 997f1fa06f..8740c9171f 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -246,84 +246,72 @@ const PageAllocator = struct { } }; +extern const __heap_base: [*]u8; // TODO Exposed LLVM intrinsics is a bug // See: https://github.com/ziglang/zig/issues/2291 extern fn @"llvm.wasm.memory.size.i32"(u32) u32; extern fn @"llvm.wasm.memory.grow.i32"(u32, u32) i32; -/// TODO: make this re-use freed pages, and cooperate with other callers of these global intrinsics -/// by better utilizing the return value of grow() -const WasmPageAllocator = struct { - var start_ptr: [*]u8 = undefined; - var num_pages: usize = 0; - var end_index: usize = 0; +test "" { + _ = WasmPageAllocator.alloc; +} - comptime { - if (builtin.arch != .wasm32) { - @compileError("WasmPageAllocator is only available for wasm32 arch"); +const WasmPageAllocator = struct { + const FreeBlock = struct { + offset: usize = 0, + data: []u8 = &[_]u8{}, + + fn alloc(self: FreeBlock, num_pages: usize) ?[]u8 { + return null; } + }; + var base = FreeBlock{}; + var additional = FreeBlock{}; + + fn nPages(memsize: usize) usize { + return std.mem.alignForward(memsize, std.mem.page_size) / std.mem.page_size; } - fn alloc(allocator: *Allocator, size: usize, alignment: u29) ![]u8 { - const addr = @ptrToInt(start_ptr) + end_index; - const adjusted_addr = mem.alignForward(addr, alignment); - const adjusted_index = end_index + (adjusted_addr - addr); - const new_end_index = adjusted_index + size; + fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 { + if (alignment > std.mem.page_size) { + return error.OutOfMemory; + } - if (new_end_index > num_pages * mem.page_size) { - const required_memory = new_end_index - (num_pages * mem.page_size); - - var inner_num_pages: usize = required_memory / mem.page_size; - if (required_memory % mem.page_size != 0) { - inner_num_pages += 1; - } - - const prev_page = @"llvm.wasm.memory.grow.i32"(0, @intCast(u32, inner_num_pages)); - if (prev_page == -1) { + const n_pages = nPages(n); + return base.alloc(n_pages) orelse additional.alloc(n_pages) orelse { + const prev_page_count = @"llvm.wasm.memory.grow.i32"(0, @intCast(u32, n_pages)); + if (prev_page_count < 0) { return error.OutOfMemory; } - num_pages += inner_num_pages; - } - - const result = start_ptr[adjusted_index..new_end_index]; - end_index = new_end_index; - - return result; + const start_ptr = @intToPtr([*]u8, @intCast(usize, prev_page_count) * std.mem.page_size); + return start_ptr[0..n]; + }; } - // Check if memory is the last "item" and is aligned correctly - fn is_last_item(memory: []u8, alignment: u29) bool { - return memory.ptr == start_ptr + end_index - memory.len and mem.alignForward(@ptrToInt(memory.ptr), alignment) == @ptrToInt(memory.ptr); - } - - fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { - // Initialize start_ptr at the first realloc - if (num_pages == 0) { - start_ptr = @intToPtr([*]u8, @intCast(usize, @"llvm.wasm.memory.size.i32"(0)) * mem.page_size); - } - - if (is_last_item(old_mem, new_align)) { - const start_index = end_index - old_mem.len; - const new_end_index = start_index + new_size; - - if (new_end_index > num_pages * mem.page_size) { - _ = try alloc(allocator, new_end_index - end_index, new_align); - } - const result = start_ptr[start_index..new_end_index]; - - end_index = new_end_index; - return result; - } else if (new_size <= old_mem.len and new_align <= old_align) { - return error.OutOfMemory; + pub fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) Allocator.Error![]u8 { + if (old_mem.len == 0) { + return alloc(allocator, new_size, new_align); + } else if (new_size < old_mem.len) { + return shrink(allocator, old_mem, old_align, new_size, new_align); + } else if (nPages(new_size) == nPages(old_mem.len)) { + return old_mem.ptr[0..new_size]; } else { - const result = try alloc(allocator, new_size, new_align); - mem.copy(u8, result, old_mem); - return result; + const new_mem = try alloc(allocator, new_size, new_align); + std.mem.copy(u8, new_mem, old_mem); + _ = shrink(allocator, old_mem, old_align, 0, 0); + return new_mem[0..new_size]; } } - fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { + pub fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { + var shrinkage = nPages(old_mem.len) - nPages(new_size); + if (shrinkage > 0) { + const success = base.recycle(old_mem[new_size..old_mem.len]); + if (!success) { + std.debug.assert(additional.recycle(old_mem[new_size..old_mem.len])); + } + } return old_mem[0..new_size]; } }; From eb1628b03325787504d46bfaef3046fef5ed1d59 Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Wed, 27 Nov 2019 21:19:08 -0600 Subject: [PATCH 02/20] Initialize memory segments --- lib/std/heap.zig | 47 +++++++++++++++++++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 10 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 8740c9171f..86cec05f86 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -253,7 +253,7 @@ extern fn @"llvm.wasm.memory.size.i32"(u32) u32; extern fn @"llvm.wasm.memory.grow.i32"(u32, u32) i32; test "" { - _ = WasmPageAllocator.alloc; + _ = WasmPageAllocator.realloc; } const WasmPageAllocator = struct { @@ -261,12 +261,24 @@ const WasmPageAllocator = struct { offset: usize = 0, data: []u8 = &[_]u8{}, - fn alloc(self: FreeBlock, num_pages: usize) ?[]u8 { + fn initData(self: *FreeBlock, data: []u8) void { + std.mem.set(u8, data, 0); + self.data = data; + } + + fn totalPages(self: FreeBlock) usize { + return self.data.len * @typeInfo(usize).Int.bits; + } + + fn alloc(self: *FreeBlock, num_pages: usize) ?[]u8 { return null; } + + fn reclaim(self: *FreeBlock, start_page: usize, end_page: usize) void {} }; - var base = FreeBlock{}; - var additional = FreeBlock{}; + + var conventional = FreeBlock{}; + var extended = FreeBlock{}; fn nPages(memsize: usize) usize { return std.mem.alignForward(memsize, std.mem.page_size) / std.mem.page_size; @@ -278,7 +290,7 @@ const WasmPageAllocator = struct { } const n_pages = nPages(n); - return base.alloc(n_pages) orelse additional.alloc(n_pages) orelse { + return conventional.alloc(n_pages) orelse extended.alloc(n_pages) orelse { const prev_page_count = @"llvm.wasm.memory.grow.i32"(0, @intCast(u32, n_pages)); if (prev_page_count < 0) { return error.OutOfMemory; @@ -305,13 +317,28 @@ const WasmPageAllocator = struct { } pub fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { - var shrinkage = nPages(old_mem.len) - nPages(new_size); - if (shrinkage > 0) { - const success = base.recycle(old_mem[new_size..old_mem.len]); - if (!success) { - std.debug.assert(additional.recycle(old_mem[new_size..old_mem.len])); + const free_start = nPages(@ptrToInt(old_mem.ptr) + new_size); + var free_end = nPages(@ptrToInt(old_mem.ptr) + old_mem.len); + + if (free_end > free_start) { + if (conventional.data.len == 0) { + conventional.initData(__heap_base[0..@intCast(usize, @"llvm.wasm.memory.size.i32"(0) * std.mem.page_size)]); + } + + if (free_start < conventional.totalPages()) { + conventional.reclaim(free_start, free_end); + } else { + if (extended.data.len == 0) { + extended.offset = conventional.totalPages(); + + // Steal the last page from the memory currently being reclaimed + free_end -= 1; + extended.initData(@intToPtr([*]u8, free_end)[0..std.mem.page_size]); + } + conventional.reclaim(free_start, free_end); } } + return old_mem[0..new_size]; } }; From ba38a6d122ff0f7307fe7319af8c25184010e080 Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Wed, 27 Nov 2019 22:02:54 -0600 Subject: [PATCH 03/20] Get stuff vaguely working --- lib/std/heap.zig | 39 +++++++++++++++++++++++++++++++++------ 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 86cec05f86..5e1fc92068 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -259,22 +259,49 @@ test "" { const WasmPageAllocator = struct { const FreeBlock = struct { offset: usize = 0, - data: []u8 = &[_]u8{}, + packed_data: std.PackedIntSlice(u1) = std.PackedIntSlice(u1).init(&[_]u8{}, 0), fn initData(self: *FreeBlock, data: []u8) void { + // 0 == used, 1 == free std.mem.set(u8, data, 0); - self.data = data; + self.packed_data = std.PackedIntSlice(u1).init(data, data.len * 8); } fn totalPages(self: FreeBlock) usize { - return self.data.len * @typeInfo(usize).Int.bits; + return self.packed_data.int_count; } + // TODO: optimize this terribleness fn alloc(self: *FreeBlock, num_pages: usize) ?[]u8 { + var found_idx: usize = 0; + var found_size: usize = 0; + + var i: usize = 0; + while (i < self.packed_data.int_count) : (i += 1) { + if (self.packed_data.get(i) == 0) { + found_size = 0; + } else { + if (found_size == 0) { + found_idx = i; + } + found_size += 1; + + if (found_size >= num_pages) { + const page_ptr = @intToPtr([*]u8, (found_idx + self.offset) * std.mem.page_size); + return page_ptr[0 .. found_size * std.mem.page_size]; + } + } + } return null; } - fn reclaim(self: *FreeBlock, start_page: usize, end_page: usize) void {} + fn reclaim(self: *FreeBlock, start_index: usize, end_index: usize) void { + var i = start_index - self.offset; + while (i < end_index - self.offset) : (i += 1) { + std.debug.assert(self.packed_data.get(i) == 0); + self.packed_data.set(i, 1); + } + } }; var conventional = FreeBlock{}; @@ -321,14 +348,14 @@ const WasmPageAllocator = struct { var free_end = nPages(@ptrToInt(old_mem.ptr) + old_mem.len); if (free_end > free_start) { - if (conventional.data.len == 0) { + if (conventional.totalPages() == 0) { conventional.initData(__heap_base[0..@intCast(usize, @"llvm.wasm.memory.size.i32"(0) * std.mem.page_size)]); } if (free_start < conventional.totalPages()) { conventional.reclaim(free_start, free_end); } else { - if (extended.data.len == 0) { + if (extended.totalPages() == 0) { extended.offset = conventional.totalPages(); // Steal the last page from the memory currently being reclaimed From f32555aa087f69e1d2f4c214b180d945cca9f37b Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Wed, 27 Nov 2019 22:14:16 -0600 Subject: [PATCH 04/20] Work around __heap_base for now --- lib/std/heap.zig | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 5e1fc92068..8f451ea3cb 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -257,6 +257,9 @@ test "" { } const WasmPageAllocator = struct { + // TODO: figure out why __heap_base cannot be found + var heap_base_wannabe: [256]u8 = undefined; + const FreeBlock = struct { offset: usize = 0, packed_data: std.PackedIntSlice(u1) = std.PackedIntSlice(u1).init(&[_]u8{}, 0), @@ -349,7 +352,8 @@ const WasmPageAllocator = struct { if (free_end > free_start) { if (conventional.totalPages() == 0) { - conventional.initData(__heap_base[0..@intCast(usize, @"llvm.wasm.memory.size.i32"(0) * std.mem.page_size)]); + //conventional.initData(__heap_base[0..@intCast(usize, @"llvm.wasm.memory.size.i32"(0) * std.mem.page_size)]); + conventional.initData(heap_base_wannabe[0..]); } if (free_start < conventional.totalPages()) { From 45e04412786db37ecaf31a5fe6c4c1f4186e6d13 Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Mon, 2 Dec 2019 12:26:14 -0600 Subject: [PATCH 05/20] Fix bugs --- lib/std/heap.zig | 44 +++++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 8f451ea3cb..d3cd3d662e 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -252,10 +252,6 @@ extern const __heap_base: [*]u8; extern fn @"llvm.wasm.memory.size.i32"(u32) u32; extern fn @"llvm.wasm.memory.grow.i32"(u32, u32) i32; -test "" { - _ = WasmPageAllocator.realloc; -} - const WasmPageAllocator = struct { // TODO: figure out why __heap_base cannot be found var heap_base_wannabe: [256]u8 = undefined; @@ -275,7 +271,7 @@ const WasmPageAllocator = struct { } // TODO: optimize this terribleness - fn alloc(self: *FreeBlock, num_pages: usize) ?[]u8 { + fn useRecycled(self: *FreeBlock, num_pages: usize) ?[*]u8 { var found_idx: usize = 0; var found_size: usize = 0; @@ -290,15 +286,18 @@ const WasmPageAllocator = struct { found_size += 1; if (found_size >= num_pages) { - const page_ptr = @intToPtr([*]u8, (found_idx + self.offset) * std.mem.page_size); - return page_ptr[0 .. found_size * std.mem.page_size]; + while (found_size > 0) { + found_size -= 1; + self.packed_data.set(found_idx + found_size, 0); + } + return @intToPtr([*]u8, (found_idx + self.offset) * std.mem.page_size); } } } return null; } - fn reclaim(self: *FreeBlock, start_index: usize, end_index: usize) void { + fn recycle(self: *FreeBlock, start_index: usize, end_index: usize) void { var i = start_index - self.offset; while (i < end_index - self.offset) : (i += 1) { std.debug.assert(self.packed_data.get(i) == 0); @@ -315,23 +314,24 @@ const WasmPageAllocator = struct { } fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 { - if (alignment > std.mem.page_size) { - return error.OutOfMemory; - } - const n_pages = nPages(n); - return conventional.alloc(n_pages) orelse extended.alloc(n_pages) orelse { + const page = conventional.useRecycled(n_pages) orelse extended.useRecycled(n_pages) orelse blk: { const prev_page_count = @"llvm.wasm.memory.grow.i32"(0, @intCast(u32, n_pages)); if (prev_page_count < 0) { return error.OutOfMemory; } - const start_ptr = @intToPtr([*]u8, @intCast(usize, prev_page_count) * std.mem.page_size); - return start_ptr[0..n]; + break :blk @intToPtr([*]u8, @intCast(usize, prev_page_count) * std.mem.page_size); }; + + return page[0..n]; } pub fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) Allocator.Error![]u8 { + if (new_align > std.mem.page_size) { + return error.OutOfMemory; + } + if (old_mem.len == 0) { return alloc(allocator, new_size, new_align); } else if (new_size < old_mem.len) { @@ -352,21 +352,22 @@ const WasmPageAllocator = struct { if (free_end > free_start) { if (conventional.totalPages() == 0) { + conventional.offset = 0; //conventional.initData(__heap_base[0..@intCast(usize, @"llvm.wasm.memory.size.i32"(0) * std.mem.page_size)]); conventional.initData(heap_base_wannabe[0..]); } if (free_start < conventional.totalPages()) { - conventional.reclaim(free_start, free_end); + conventional.recycle(free_start, free_end); } else { if (extended.totalPages() == 0) { - extended.offset = conventional.totalPages(); + extended.offset = conventional.offset + conventional.totalPages(); - // Steal the last page from the memory currently being reclaimed + // Steal the last page from the memory currently being recycled free_end -= 1; - extended.initData(@intToPtr([*]u8, free_end)[0..std.mem.page_size]); + extended.initData(@intToPtr([*]u8, free_end * std.mem.page_size)[0..std.mem.page_size]); } - conventional.reclaim(free_start, free_end); + extended.recycle(free_start, free_end); } } @@ -932,7 +933,8 @@ fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !voi fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void { //Maybe a platform's page_size is actually the same as or // very near usize? - if (mem.page_size << 2 > maxInt(usize)) return; + //if (mem.page_size << 2 > maxInt(usize)) return; + if (mem.page_size << 2 > 32768) return; const USizeShift = @IntType(false, std.math.log2(usize.bit_count)); const large_align = @as(u29, mem.page_size << 2); From b33211ed51901bfbc36d23cb5a02d1c8ca3b072e Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Tue, 3 Dec 2019 17:24:50 -0600 Subject: [PATCH 06/20] Implement block-based skipping --- lib/std/heap.zig | 47 +++++++++++++++++++++++++++++------------------ 1 file changed, 29 insertions(+), 18 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index d3cd3d662e..7b1a571c54 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -254,43 +254,54 @@ extern fn @"llvm.wasm.memory.grow.i32"(u32, u32) i32; const WasmPageAllocator = struct { // TODO: figure out why __heap_base cannot be found - var heap_base_wannabe: [256]u8 = undefined; + var heap_base_wannabe: [256]u8 align(16) = undefined; const FreeBlock = struct { offset: usize = 0, packed_data: std.PackedIntSlice(u1) = std.PackedIntSlice(u1).init(&[_]u8{}, 0), + block_data: []u128 = &[_]u128{}, - fn initData(self: *FreeBlock, data: []u8) void { + fn initData(self: *FreeBlock, data: []align(16) u8) void { // 0 == used, 1 == free std.mem.set(u8, data, 0); self.packed_data = std.PackedIntSlice(u1).init(data, data.len * 8); + self.block_data = @bytesToSlice(u128, data); } fn totalPages(self: FreeBlock) usize { return self.packed_data.int_count; } - // TODO: optimize this terribleness fn useRecycled(self: *FreeBlock, num_pages: usize) ?[*]u8 { var found_idx: usize = 0; var found_size: usize = 0; - var i: usize = 0; - while (i < self.packed_data.int_count) : (i += 1) { - if (self.packed_data.get(i) == 0) { - found_size = 0; - } else { - if (found_size == 0) { - found_idx = i; - } - found_size += 1; + for (self.block_data) |segment, i| { + const spills_into_next = @bitCast(i128, segment) < 0; + const has_enough_bits = @popCount(u128, segment) >= num_pages; - if (found_size >= num_pages) { - while (found_size > 0) { - found_size -= 1; - self.packed_data.set(found_idx + found_size, 0); + if (!spills_into_next and !has_enough_bits) continue; + + var j: usize = i * 128; + while (j < self.packed_data.int_count) : (j += 1) { + if (self.packed_data.get(j) == 0) { + found_size = 0; + if (j > (i + 1) * 128) { + break; + } + } else { + if (found_size == 0) { + found_idx = j; + } + found_size += 1; + + if (found_size >= num_pages) { + while (found_size > 0) { + found_size -= 1; + self.packed_data.set(found_idx + found_size, 0); + } + return @intToPtr([*]u8, (found_idx + self.offset) * std.mem.page_size); } - return @intToPtr([*]u8, (found_idx + self.offset) * std.mem.page_size); } } } @@ -365,7 +376,7 @@ const WasmPageAllocator = struct { // Steal the last page from the memory currently being recycled free_end -= 1; - extended.initData(@intToPtr([*]u8, free_end * std.mem.page_size)[0..std.mem.page_size]); + extended.initData(@intToPtr([*]align(16) u8, free_end * std.mem.page_size)[0..std.mem.page_size]); } extended.recycle(free_start, free_end); } From baffaf7986d34b47580819eb70b66a5311556046 Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Tue, 3 Dec 2019 17:41:05 -0600 Subject: [PATCH 07/20] Extract setBits --- lib/std/heap.zig | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 7b1a571c54..8a2d7f0cf7 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -272,6 +272,13 @@ const WasmPageAllocator = struct { return self.packed_data.int_count; } + fn setBits(self: *FreeBlock, start_idx: usize, len: usize, val: u1) void { + var i: usize = 0; + while (i < len) : (i += 1) { + self.packed_data.set(i + start_idx + self.offset, val); + } + } + fn useRecycled(self: *FreeBlock, num_pages: usize) ?[*]u8 { var found_idx: usize = 0; var found_size: usize = 0; @@ -296,10 +303,7 @@ const WasmPageAllocator = struct { found_size += 1; if (found_size >= num_pages) { - while (found_size > 0) { - found_size -= 1; - self.packed_data.set(found_idx + found_size, 0); - } + self.setBits(found_idx, found_size, 0); return @intToPtr([*]u8, (found_idx + self.offset) * std.mem.page_size); } } @@ -308,12 +312,8 @@ const WasmPageAllocator = struct { return null; } - fn recycle(self: *FreeBlock, start_index: usize, end_index: usize) void { - var i = start_index - self.offset; - while (i < end_index - self.offset) : (i += 1) { - std.debug.assert(self.packed_data.get(i) == 0); - self.packed_data.set(i, 1); - } + fn recycle(self: *FreeBlock, start_idx: usize, len: usize) void { + self.setBits(start_idx, len, 1); } }; @@ -369,7 +369,7 @@ const WasmPageAllocator = struct { } if (free_start < conventional.totalPages()) { - conventional.recycle(free_start, free_end); + conventional.recycle(free_start, free_end - free_start); } else { if (extended.totalPages() == 0) { extended.offset = conventional.offset + conventional.totalPages(); @@ -378,7 +378,7 @@ const WasmPageAllocator = struct { free_end -= 1; extended.initData(@intToPtr([*]align(16) u8, free_end * std.mem.page_size)[0..std.mem.page_size]); } - extended.recycle(free_start, free_end); + extended.recycle(free_start, free_end - free_start); } } From 01e73bba8d8c98558de35a43e82a5f8de837d8a5 Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Tue, 3 Dec 2019 23:49:56 -0600 Subject: [PATCH 08/20] Tighten recycled search --- lib/std/heap.zig | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 8a2d7f0cf7..20f2b94766 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -280,9 +280,7 @@ const WasmPageAllocator = struct { } fn useRecycled(self: *FreeBlock, num_pages: usize) ?[*]u8 { - var found_idx: usize = 0; - var found_size: usize = 0; - + @setCold(true); for (self.block_data) |segment, i| { const spills_into_next = @bitCast(i128, segment) < 0; const has_enough_bits = @popCount(u128, segment) >= num_pages; @@ -290,23 +288,16 @@ const WasmPageAllocator = struct { if (!spills_into_next and !has_enough_bits) continue; var j: usize = i * 128; - while (j < self.packed_data.int_count) : (j += 1) { - if (self.packed_data.get(j) == 0) { - found_size = 0; - if (j > (i + 1) * 128) { - break; - } - } else { - if (found_size == 0) { - found_idx = j; - } - found_size += 1; - - if (found_size >= num_pages) { - self.setBits(found_idx, found_size, 0); - return @intToPtr([*]u8, (found_idx + self.offset) * std.mem.page_size); + while (j < (i + 1) * 128) : (j += 1) { + var count: usize = 0; + while (j + count < self.packed_data.int_count and self.packed_data.get(j + count) == 1) { + count += 1; + if (count >= num_pages) { + self.setBits(j, num_pages, 0); + return @intToPtr([*]u8, (j + self.offset) * std.mem.page_size); } } + j += count; } } return null; From a6f838aab2678be491a774316c02a339487fe34d Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Wed, 4 Dec 2019 00:10:37 -0600 Subject: [PATCH 09/20] Remove redundant alloc --- lib/std/heap.zig | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 20f2b94766..d7334c3a54 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -334,12 +334,10 @@ const WasmPageAllocator = struct { return error.OutOfMemory; } - if (old_mem.len == 0) { - return alloc(allocator, new_size, new_align); + if (nPages(new_size) == nPages(old_mem.len)) { + return old_mem.ptr[0..new_size]; } else if (new_size < old_mem.len) { return shrink(allocator, old_mem, old_align, new_size, new_align); - } else if (nPages(new_size) == nPages(old_mem.len)) { - return old_mem.ptr[0..new_size]; } else { const new_mem = try alloc(allocator, new_size, new_align); std.mem.copy(u8, new_mem, old_mem); From a910a6c871d1840c900287d68f0d5c9f8888247d Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Wed, 4 Dec 2019 18:12:25 -0600 Subject: [PATCH 10/20] Rejuggle how offsets are calculated --- lib/std/heap.zig | 59 ++++++++++++++++++++++++++++++------------------ 1 file changed, 37 insertions(+), 22 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index d7334c3a54..4630e845a3 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -257,7 +257,6 @@ const WasmPageAllocator = struct { var heap_base_wannabe: [256]u8 align(16) = undefined; const FreeBlock = struct { - offset: usize = 0, packed_data: std.PackedIntSlice(u1) = std.PackedIntSlice(u1).init(&[_]u8{}, 0), block_data: []u128 = &[_]u128{}, @@ -275,11 +274,20 @@ const WasmPageAllocator = struct { fn setBits(self: *FreeBlock, start_idx: usize, len: usize, val: u1) void { var i: usize = 0; while (i < len) : (i += 1) { - self.packed_data.set(i + start_idx + self.offset, val); + self.packed_data.set(i + start_idx, val); } } - fn useRecycled(self: *FreeBlock, num_pages: usize) ?[*]u8 { + // Use '0xFFFFFFFF' as a _missing_ sentinel + // This saves ~50 bytes compared to returning a nullable + + // We can guarantee that conventional memory never gets this big, + // and wasm32 would not be able to address this block (32 GB > usize). + + // Revisit if this is settled: https://github.com/ziglang/zig/issues/3806 + const not_found = std.math.maxInt(usize); + + fn useRecycled(self: *FreeBlock, num_pages: usize) usize { @setCold(true); for (self.block_data) |segment, i| { const spills_into_next = @bitCast(i128, segment) < 0; @@ -294,13 +302,13 @@ const WasmPageAllocator = struct { count += 1; if (count >= num_pages) { self.setBits(j, num_pages, 0); - return @intToPtr([*]u8, (j + self.offset) * std.mem.page_size); + return j; } } j += count; } } - return null; + return not_found; } fn recycle(self: *FreeBlock, start_idx: usize, len: usize) void { @@ -311,22 +319,31 @@ const WasmPageAllocator = struct { var conventional = FreeBlock{}; var extended = FreeBlock{}; + fn extendedOffset() usize { + return conventional.totalPages(); + } + fn nPages(memsize: usize) usize { return std.mem.alignForward(memsize, std.mem.page_size) / std.mem.page_size; } - fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 { - const n_pages = nPages(n); - const page = conventional.useRecycled(n_pages) orelse extended.useRecycled(n_pages) orelse blk: { - const prev_page_count = @"llvm.wasm.memory.grow.i32"(0, @intCast(u32, n_pages)); - if (prev_page_count < 0) { - return error.OutOfMemory; - } + fn alloc(allocator: *Allocator, page_count: usize, alignment: u29) error{OutOfMemory}!usize { + var idx = conventional.useRecycled(page_count); + if (idx != FreeBlock.not_found) { + return idx; + } - break :blk @intToPtr([*]u8, @intCast(usize, prev_page_count) * std.mem.page_size); - }; + idx = extended.useRecycled(page_count); + if (idx != FreeBlock.not_found) { + return idx + extendedOffset(); + } - return page[0..n]; + const prev_page_count = @"llvm.wasm.memory.grow.i32"(0, @intCast(u32, page_count)); + if (prev_page_count <= 0) { + return error.OutOfMemory; + } + + return @intCast(usize, prev_page_count); } pub fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) Allocator.Error![]u8 { @@ -339,10 +356,11 @@ const WasmPageAllocator = struct { } else if (new_size < old_mem.len) { return shrink(allocator, old_mem, old_align, new_size, new_align); } else { - const new_mem = try alloc(allocator, new_size, new_align); + const page_idx = try alloc(allocator, nPages(new_size), new_align); + const new_mem = @intToPtr([*]u8, page_idx * std.mem.page_size)[0..new_size]; std.mem.copy(u8, new_mem, old_mem); _ = shrink(allocator, old_mem, old_align, 0, 0); - return new_mem[0..new_size]; + return new_mem; } } @@ -352,22 +370,19 @@ const WasmPageAllocator = struct { if (free_end > free_start) { if (conventional.totalPages() == 0) { - conventional.offset = 0; //conventional.initData(__heap_base[0..@intCast(usize, @"llvm.wasm.memory.size.i32"(0) * std.mem.page_size)]); conventional.initData(heap_base_wannabe[0..]); } - if (free_start < conventional.totalPages()) { + if (free_start < extendedOffset()) { conventional.recycle(free_start, free_end - free_start); } else { if (extended.totalPages() == 0) { - extended.offset = conventional.offset + conventional.totalPages(); - // Steal the last page from the memory currently being recycled free_end -= 1; extended.initData(@intToPtr([*]align(16) u8, free_end * std.mem.page_size)[0..std.mem.page_size]); } - extended.recycle(free_start, free_end - free_start); + extended.recycle(free_start - extendedOffset(), free_end - free_start); } } From 5784985bb8a4c6aa686ad0b9fb98532c44c74b85 Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Wed, 4 Dec 2019 21:21:54 -0600 Subject: [PATCH 11/20] Use raw PackedIo to shave ~150b --- lib/std/heap.zig | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 4630e845a3..4bdee3539a 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -257,24 +257,28 @@ const WasmPageAllocator = struct { var heap_base_wannabe: [256]u8 align(16) = undefined; const FreeBlock = struct { - packed_data: std.PackedIntSlice(u1) = std.PackedIntSlice(u1).init(&[_]u8{}, 0), - block_data: []u128 = &[_]u128{}, + const Io = std.packed_int_array.PackedIntIo(u1, .Little); - fn initData(self: *FreeBlock, data: []align(16) u8) void { + bytes: []align(16) u8, + + fn initData(self: *FreeBlock, bytes: []align(16) u8) void { // 0 == used, 1 == free - std.mem.set(u8, data, 0); - self.packed_data = std.PackedIntSlice(u1).init(data, data.len * 8); - self.block_data = @bytesToSlice(u128, data); + std.mem.set(u8, bytes, 0); + self.bytes = bytes; } fn totalPages(self: FreeBlock) usize { - return self.packed_data.int_count; + return self.bytes.len * 8; + } + + fn getBit(self: *FreeBlock, idx: usize) u1 { + return Io.get(self.bytes, idx, 0); } fn setBits(self: *FreeBlock, start_idx: usize, len: usize, val: u1) void { var i: usize = 0; while (i < len) : (i += 1) { - self.packed_data.set(i + start_idx, val); + Io.set(self.bytes, start_idx + i, 0, val); } } @@ -282,14 +286,15 @@ const WasmPageAllocator = struct { // This saves ~50 bytes compared to returning a nullable // We can guarantee that conventional memory never gets this big, - // and wasm32 would not be able to address this block (32 GB > usize). + // and wasm32 would not be able to address this memory (32 GB > usize). // Revisit if this is settled: https://github.com/ziglang/zig/issues/3806 const not_found = std.math.maxInt(usize); fn useRecycled(self: *FreeBlock, num_pages: usize) usize { @setCold(true); - for (self.block_data) |segment, i| { + const segments = @bytesToSlice(u128, self.bytes); + for (segments) |segment, i| { const spills_into_next = @bitCast(i128, segment) < 0; const has_enough_bits = @popCount(u128, segment) >= num_pages; @@ -298,7 +303,7 @@ const WasmPageAllocator = struct { var j: usize = i * 128; while (j < (i + 1) * 128) : (j += 1) { var count: usize = 0; - while (j + count < self.packed_data.int_count and self.packed_data.get(j + count) == 1) { + while (j + count < self.totalPages() and self.getBit(j + count) == 1) { count += 1; if (count >= num_pages) { self.setBits(j, num_pages, 0); @@ -316,8 +321,8 @@ const WasmPageAllocator = struct { } }; - var conventional = FreeBlock{}; - var extended = FreeBlock{}; + var conventional = FreeBlock{ .bytes = &[_]u8{} }; + var extended = FreeBlock{ .bytes = &[_]u8{} }; fn extendedOffset() usize { return conventional.totalPages(); From 86ae75363e3fbf6b3835b87c1cfb79fe4bf97790 Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Wed, 4 Dec 2019 21:41:01 -0600 Subject: [PATCH 12/20] Strip out an unnecessary memset --- lib/std/heap.zig | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 4bdee3539a..25580a974b 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -254,31 +254,30 @@ extern fn @"llvm.wasm.memory.grow.i32"(u32, u32) i32; const WasmPageAllocator = struct { // TODO: figure out why __heap_base cannot be found - var heap_base_wannabe: [256]u8 align(16) = undefined; + var heap_base_wannabe align(16) = [_]u8{0} ** 256; const FreeBlock = struct { - const Io = std.packed_int_array.PackedIntIo(u1, .Little); - bytes: []align(16) u8, - fn initData(self: *FreeBlock, bytes: []align(16) u8) void { - // 0 == used, 1 == free - std.mem.set(u8, bytes, 0); - self.bytes = bytes; - } + const Io = std.packed_int_array.PackedIntIo(u1, .Little); + + const used = 0; + const free = 1; fn totalPages(self: FreeBlock) usize { return self.bytes.len * 8; } fn getBit(self: *FreeBlock, idx: usize) u1 { - return Io.get(self.bytes, idx, 0); + const bit_offset = 0; + return Io.get(self.bytes, idx, bit_offset); } fn setBits(self: *FreeBlock, start_idx: usize, len: usize, val: u1) void { + const bit_offset = 0; var i: usize = 0; while (i < len) : (i += 1) { - Io.set(self.bytes, start_idx + i, 0, val); + Io.set(self.bytes, start_idx + i, bit_offset, val); } } @@ -306,7 +305,7 @@ const WasmPageAllocator = struct { while (j + count < self.totalPages() and self.getBit(j + count) == 1) { count += 1; if (count >= num_pages) { - self.setBits(j, num_pages, 0); + self.setBits(j, num_pages, used); return j; } } @@ -317,7 +316,7 @@ const WasmPageAllocator = struct { } fn recycle(self: *FreeBlock, start_idx: usize, len: usize) void { - self.setBits(start_idx, len, 1); + self.setBits(start_idx, len, free); } }; @@ -375,8 +374,8 @@ const WasmPageAllocator = struct { if (free_end > free_start) { if (conventional.totalPages() == 0) { - //conventional.initData(__heap_base[0..@intCast(usize, @"llvm.wasm.memory.size.i32"(0) * std.mem.page_size)]); - conventional.initData(heap_base_wannabe[0..]); + //conventional.bytes = __heap_base[0..@intCast(usize, @"llvm.wasm.memory.size.i32"(0) * std.mem.page_size)]; + conventional.bytes = heap_base_wannabe[0..]; } if (free_start < extendedOffset()) { @@ -384,8 +383,12 @@ const WasmPageAllocator = struct { } else { if (extended.totalPages() == 0) { // Steal the last page from the memory currently being recycled + // TODO: would it be better if we use the first page instead? free_end -= 1; - extended.initData(@intToPtr([*]align(16) u8, free_end * std.mem.page_size)[0..std.mem.page_size]); + + extended.bytes = @intToPtr([*]align(16) u8, free_end * std.mem.page_size)[0..std.mem.page_size]; + // Since this is the first page being freed and we consume it, assume *nothing* is free. + std.mem.set(u8, extended.bytes, FreeBlock.used); } extended.recycle(free_start - extendedOffset(), free_end - free_start); } From 30da6d49f435b7ef317b059113ec1fab21d72d00 Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Wed, 4 Dec 2019 22:43:02 -0600 Subject: [PATCH 13/20] Fix freeing memory across bounds --- lib/std/heap.zig | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 25580a974b..fcc9420cf7 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -379,8 +379,11 @@ const WasmPageAllocator = struct { } if (free_start < extendedOffset()) { - conventional.recycle(free_start, free_end - free_start); - } else { + const clamped_end = std.math.min(extendedOffset(), free_end); + conventional.recycle(free_start, clamped_end - free_start); + } + + if (free_end > extendedOffset()) { if (extended.totalPages() == 0) { // Steal the last page from the memory currently being recycled // TODO: would it be better if we use the first page instead? @@ -390,7 +393,8 @@ const WasmPageAllocator = struct { // Since this is the first page being freed and we consume it, assume *nothing* is free. std.mem.set(u8, extended.bytes, FreeBlock.used); } - extended.recycle(free_start - extendedOffset(), free_end - free_start); + const clamped_start = std.math.max(extendedOffset(), free_start); + extended.recycle(clamped_start - extendedOffset(), free_end - clamped_start); } } From 7d1c4fe4dcb6188d2af0f65bf713aca8d326ea46 Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Thu, 5 Dec 2019 17:59:43 -0600 Subject: [PATCH 14/20] Switch bitmask to enums --- lib/std/heap.zig | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index fcc9420cf7..a32db9862a 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -256,28 +256,32 @@ const WasmPageAllocator = struct { // TODO: figure out why __heap_base cannot be found var heap_base_wannabe align(16) = [_]u8{0} ** 256; + const PageStatus = enum(u1) { + used = 0, + free = 1, + + pub const all_used: u8 = 0; + }; + const FreeBlock = struct { bytes: []align(16) u8, const Io = std.packed_int_array.PackedIntIo(u1, .Little); - const used = 0; - const free = 1; - fn totalPages(self: FreeBlock) usize { return self.bytes.len * 8; } - fn getBit(self: *FreeBlock, idx: usize) u1 { + fn getBit(self: *FreeBlock, idx: usize) PageStatus { const bit_offset = 0; - return Io.get(self.bytes, idx, bit_offset); + return @intToEnum(PageStatus, Io.get(self.bytes, idx, bit_offset)); } - fn setBits(self: *FreeBlock, start_idx: usize, len: usize, val: u1) void { + fn setBits(self: *FreeBlock, start_idx: usize, len: usize, val: PageStatus) void { const bit_offset = 0; var i: usize = 0; while (i < len) : (i += 1) { - Io.set(self.bytes, start_idx + i, bit_offset, val); + Io.set(self.bytes, start_idx + i, bit_offset, @enumToInt(val)); } } @@ -302,10 +306,10 @@ const WasmPageAllocator = struct { var j: usize = i * 128; while (j < (i + 1) * 128) : (j += 1) { var count: usize = 0; - while (j + count < self.totalPages() and self.getBit(j + count) == 1) { + while (j + count < self.totalPages() and self.getBit(j + count) == .free) { count += 1; if (count >= num_pages) { - self.setBits(j, num_pages, used); + self.setBits(j, num_pages, .used); return j; } } @@ -316,7 +320,7 @@ const WasmPageAllocator = struct { } fn recycle(self: *FreeBlock, start_idx: usize, len: usize) void { - self.setBits(start_idx, len, free); + self.setBits(start_idx, len, .free); } }; @@ -391,7 +395,7 @@ const WasmPageAllocator = struct { extended.bytes = @intToPtr([*]align(16) u8, free_end * std.mem.page_size)[0..std.mem.page_size]; // Since this is the first page being freed and we consume it, assume *nothing* is free. - std.mem.set(u8, extended.bytes, FreeBlock.used); + std.mem.set(u8, extended.bytes, PageStatus.all_used); } const clamped_start = std.math.max(extendedOffset(), free_start); extended.recycle(clamped_start - extendedOffset(), free_end - clamped_start); From 694616adb549fc0dc2d7416e120e17992087e75c Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Thu, 5 Dec 2019 18:28:32 -0600 Subject: [PATCH 15/20] Standardize around bigger slices --- lib/std/heap.zig | 37 ++++++++++++++----------------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index a32db9862a..7c0ea4174c 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -246,16 +246,12 @@ const PageAllocator = struct { } }; -extern const __heap_base: [*]u8; // TODO Exposed LLVM intrinsics is a bug // See: https://github.com/ziglang/zig/issues/2291 extern fn @"llvm.wasm.memory.size.i32"(u32) u32; extern fn @"llvm.wasm.memory.grow.i32"(u32, u32) i32; const WasmPageAllocator = struct { - // TODO: figure out why __heap_base cannot be found - var heap_base_wannabe align(16) = [_]u8{0} ** 256; - const PageStatus = enum(u1) { used = 0, free = 1, @@ -264,24 +260,24 @@ const WasmPageAllocator = struct { }; const FreeBlock = struct { - bytes: []align(16) u8, + data: []u128, const Io = std.packed_int_array.PackedIntIo(u1, .Little); fn totalPages(self: FreeBlock) usize { - return self.bytes.len * 8; + return self.data.len * 8; } - fn getBit(self: *FreeBlock, idx: usize) PageStatus { + fn getBit(self: FreeBlock, idx: usize) PageStatus { const bit_offset = 0; - return @intToEnum(PageStatus, Io.get(self.bytes, idx, bit_offset)); + return @intToEnum(PageStatus, Io.get(@sliceToBytes(self.data), idx, bit_offset)); } - fn setBits(self: *FreeBlock, start_idx: usize, len: usize, val: PageStatus) void { + fn setBits(self: FreeBlock, start_idx: usize, len: usize, val: PageStatus) void { const bit_offset = 0; var i: usize = 0; while (i < len) : (i += 1) { - Io.set(self.bytes, start_idx + i, bit_offset, @enumToInt(val)); + Io.set(@sliceToBytes(self.data), start_idx + i, bit_offset, @enumToInt(val)); } } @@ -294,10 +290,9 @@ const WasmPageAllocator = struct { // Revisit if this is settled: https://github.com/ziglang/zig/issues/3806 const not_found = std.math.maxInt(usize); - fn useRecycled(self: *FreeBlock, num_pages: usize) usize { + fn useRecycled(self: FreeBlock, num_pages: usize) usize { @setCold(true); - const segments = @bytesToSlice(u128, self.bytes); - for (segments) |segment, i| { + for (self.data) |segment, i| { const spills_into_next = @bitCast(i128, segment) < 0; const has_enough_bits = @popCount(u128, segment) >= num_pages; @@ -319,13 +314,13 @@ const WasmPageAllocator = struct { return not_found; } - fn recycle(self: *FreeBlock, start_idx: usize, len: usize) void { + fn recycle(self: FreeBlock, start_idx: usize, len: usize) void { self.setBits(start_idx, len, .free); } }; - var conventional = FreeBlock{ .bytes = &[_]u8{} }; - var extended = FreeBlock{ .bytes = &[_]u8{} }; + const conventional = FreeBlock{ .data = &[_]u128{ 0, 0 } }; + var extended = FreeBlock{ .data = &[_]u128{} }; fn extendedOffset() usize { return conventional.totalPages(); @@ -373,15 +368,11 @@ const WasmPageAllocator = struct { } pub fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { + @setCold(true); const free_start = nPages(@ptrToInt(old_mem.ptr) + new_size); var free_end = nPages(@ptrToInt(old_mem.ptr) + old_mem.len); if (free_end > free_start) { - if (conventional.totalPages() == 0) { - //conventional.bytes = __heap_base[0..@intCast(usize, @"llvm.wasm.memory.size.i32"(0) * std.mem.page_size)]; - conventional.bytes = heap_base_wannabe[0..]; - } - if (free_start < extendedOffset()) { const clamped_end = std.math.min(extendedOffset(), free_end); conventional.recycle(free_start, clamped_end - free_start); @@ -393,9 +384,9 @@ const WasmPageAllocator = struct { // TODO: would it be better if we use the first page instead? free_end -= 1; - extended.bytes = @intToPtr([*]align(16) u8, free_end * std.mem.page_size)[0..std.mem.page_size]; + extended.data = @intToPtr([*]u128, free_end * std.mem.page_size)[0 .. std.mem.page_size / @sizeOf(u128)]; // Since this is the first page being freed and we consume it, assume *nothing* is free. - std.mem.set(u8, extended.bytes, PageStatus.all_used); + std.mem.set(u128, extended.data, PageStatus.all_used); } const clamped_start = std.math.max(extendedOffset(), free_start); extended.recycle(clamped_start - extendedOffset(), free_end - clamped_start); From f2b0dbea748ecc927a9c2482dbb1dad8843cf9f8 Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Thu, 5 Dec 2019 19:31:49 -0600 Subject: [PATCH 16/20] Resolve tests to work with or skip WasmPageAllocator --- lib/std/heap.zig | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 7c0ea4174c..e7fab637c0 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -794,8 +794,10 @@ test "PageAllocator" { const allocator = page_allocator; try testAllocator(allocator); try testAllocatorAligned(allocator, 16); - try testAllocatorLargeAlignment(allocator); - try testAllocatorAlignedShrink(allocator); + if (!std.Target.current.isWasm()) { + try testAllocatorLargeAlignment(allocator); + try testAllocatorAlignedShrink(allocator); + } if (builtin.os == .windows) { // Trying really large alignment. As mentionned in the implementation, @@ -832,7 +834,7 @@ test "ArenaAllocator" { try testAllocatorAlignedShrink(&arena_allocator.allocator); } -var test_fixed_buffer_allocator_memory: [80000 * @sizeOf(u64)]u8 = undefined; +var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined; test "FixedBufferAllocator" { var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]); @@ -955,8 +957,7 @@ fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !voi fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void { //Maybe a platform's page_size is actually the same as or // very near usize? - //if (mem.page_size << 2 > maxInt(usize)) return; - if (mem.page_size << 2 > 32768) return; + if (mem.page_size << 2 > maxInt(usize)) return; const USizeShift = @IntType(false, std.math.log2(usize.bit_count)); const large_align = @as(u29, mem.page_size << 2); From eb495d934b266cc9083f5caf093ec2c437f19788 Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Thu, 5 Dec 2019 21:54:57 -0600 Subject: [PATCH 17/20] Add WasmPageAllocator tests --- lib/std/heap.zig | 48 +++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 43 insertions(+), 5 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index e7fab637c0..9ff8b9f7b9 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -256,7 +256,7 @@ const WasmPageAllocator = struct { used = 0, free = 1, - pub const all_used: u8 = 0; + pub const none_free: u8 = 0; }; const FreeBlock = struct { @@ -265,7 +265,11 @@ const WasmPageAllocator = struct { const Io = std.packed_int_array.PackedIntIo(u1, .Little); fn totalPages(self: FreeBlock) usize { - return self.data.len * 8; + return self.data.len * 128; + } + + fn isInitialized(self: FreeBlock) bool { + return self.data.len > 0; } fn getBit(self: FreeBlock, idx: usize) PageStatus { @@ -319,7 +323,9 @@ const WasmPageAllocator = struct { } }; - const conventional = FreeBlock{ .data = &[_]u128{ 0, 0 } }; + var _conventional_data = [_]u128{0} ** 16; + // Marking `conventional` as const saves ~40 bytes + var conventional = FreeBlock{ .data = &_conventional_data }; var extended = FreeBlock{ .data = &[_]u128{} }; fn extendedOffset() usize { @@ -379,14 +385,14 @@ const WasmPageAllocator = struct { } if (free_end > extendedOffset()) { - if (extended.totalPages() == 0) { + if (!extended.isInitialized()) { // Steal the last page from the memory currently being recycled // TODO: would it be better if we use the first page instead? free_end -= 1; extended.data = @intToPtr([*]u128, free_end * std.mem.page_size)[0 .. std.mem.page_size / @sizeOf(u128)]; // Since this is the first page being freed and we consume it, assume *nothing* is free. - std.mem.set(u128, extended.data, PageStatus.all_used); + std.mem.set(u128, extended.data, PageStatus.none_free); } const clamped_start = std.math.max(extendedOffset(), free_start); extended.recycle(clamped_start - extendedOffset(), free_end - clamped_start); @@ -790,6 +796,38 @@ test "c_allocator" { } } +test "WasmPageAllocator internals" { + if (std.Target.current.isWasm()) { + const none_free = WasmPageAllocator.PageStatus.none_free; + std.debug.assert(none_free == WasmPageAllocator.conventional.data[0]); // Passes if this test runs first + std.debug.assert(!WasmPageAllocator.extended.isInitialized()); // Passes if this test runs first + + const tmp = try page_allocator.alloc(u8, 1); + testing.expect(none_free == WasmPageAllocator.conventional.data[0]); + page_allocator.free(tmp); + testing.expect(none_free < WasmPageAllocator.conventional.data[0]); + + const a_little_free = WasmPageAllocator.conventional.data[0]; + const tmp_large = try page_allocator.alloc(u8, std.mem.page_size + 1); + testing.expect(a_little_free == WasmPageAllocator.conventional.data[0]); + const tmp_small = try page_allocator.alloc(u8, 1); + testing.expect(none_free == WasmPageAllocator.conventional.data[0]); + + page_allocator.free(tmp_small); + testing.expect(a_little_free == WasmPageAllocator.conventional.data[0]); + page_allocator.free(tmp_large); + testing.expect(a_little_free < WasmPageAllocator.conventional.data[0]); + + const more_free = WasmPageAllocator.conventional.data[0]; + const supersize = try page_allocator.alloc(u8, std.mem.page_size * (WasmPageAllocator.conventional.totalPages() + 1)); + testing.expect(more_free == WasmPageAllocator.conventional.data[0]); + testing.expect(!WasmPageAllocator.extended.isInitialized()); + page_allocator.free(supersize); + testing.expect(WasmPageAllocator.extended.isInitialized()); + testing.expect(more_free < WasmPageAllocator.conventional.data[0]); + } +} + test "PageAllocator" { const allocator = page_allocator; try testAllocator(allocator); From 5a004ed834b62a15ebf1649856580baeb68501fa Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Fri, 6 Dec 2019 15:16:07 -0600 Subject: [PATCH 18/20] Actually use `const conventional` as the comment indicates --- lib/std/heap.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 9ff8b9f7b9..d456407706 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -325,7 +325,7 @@ const WasmPageAllocator = struct { var _conventional_data = [_]u128{0} ** 16; // Marking `conventional` as const saves ~40 bytes - var conventional = FreeBlock{ .data = &_conventional_data }; + const conventional = FreeBlock{ .data = &_conventional_data }; var extended = FreeBlock{ .data = &[_]u128{} }; fn extendedOffset() usize { From e91522b875b72cc3990fd4086a331ab63ac70dc8 Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Fri, 6 Dec 2019 16:03:15 -0600 Subject: [PATCH 19/20] Add back comptime check for wasm --- lib/std/heap.zig | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index d456407706..b6a6c6e3fe 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -252,6 +252,12 @@ extern fn @"llvm.wasm.memory.size.i32"(u32) u32; extern fn @"llvm.wasm.memory.grow.i32"(u32, u32) i32; const WasmPageAllocator = struct { + comptime { + if (!std.Target.current.isWasm()) { + @compileError("WasmPageAllocator is only available for wasm32 arch"); + } + } + const PageStatus = enum(u1) { used = 0, free = 1, @@ -797,7 +803,7 @@ test "c_allocator" { } test "WasmPageAllocator internals" { - if (std.Target.current.isWasm()) { + if (comptime std.Target.current.isWasm()) { const none_free = WasmPageAllocator.PageStatus.none_free; std.debug.assert(none_free == WasmPageAllocator.conventional.data[0]); // Passes if this test runs first std.debug.assert(!WasmPageAllocator.extended.isInitialized()); // Passes if this test runs first From 608d36ad8c910c9a9c112f3d0c047655aa2f91e8 Mon Sep 17 00:00:00 2001 From: Benjamin Feng Date: Sun, 8 Dec 2019 21:22:07 -0600 Subject: [PATCH 20/20] Rewrite WasmPageAllocator tests to be less flaky on environment --- lib/std/heap.zig | 51 ++++++++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index b6a6c6e3fe..4507d86a0f 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -804,33 +804,38 @@ test "c_allocator" { test "WasmPageAllocator internals" { if (comptime std.Target.current.isWasm()) { - const none_free = WasmPageAllocator.PageStatus.none_free; - std.debug.assert(none_free == WasmPageAllocator.conventional.data[0]); // Passes if this test runs first - std.debug.assert(!WasmPageAllocator.extended.isInitialized()); // Passes if this test runs first + const conventional_memsize = WasmPageAllocator.conventional.totalPages() * std.mem.page_size; + const initial = try page_allocator.alloc(u8, std.mem.page_size); + std.debug.assert(@ptrToInt(initial.ptr) < conventional_memsize); // If this isn't conventional, the rest of these tests don't make sense. Also we have a serious memory leak in the test suite. - const tmp = try page_allocator.alloc(u8, 1); - testing.expect(none_free == WasmPageAllocator.conventional.data[0]); - page_allocator.free(tmp); - testing.expect(none_free < WasmPageAllocator.conventional.data[0]); + var inplace = try page_allocator.realloc(initial, 1); + testing.expectEqual(initial.ptr, inplace.ptr); + inplace = try page_allocator.realloc(inplace, 4); + testing.expectEqual(initial.ptr, inplace.ptr); + page_allocator.free(inplace); - const a_little_free = WasmPageAllocator.conventional.data[0]; - const tmp_large = try page_allocator.alloc(u8, std.mem.page_size + 1); - testing.expect(a_little_free == WasmPageAllocator.conventional.data[0]); - const tmp_small = try page_allocator.alloc(u8, 1); - testing.expect(none_free == WasmPageAllocator.conventional.data[0]); + const reuse = try page_allocator.alloc(u8, 1); + testing.expectEqual(initial.ptr, reuse.ptr); + page_allocator.free(reuse); - page_allocator.free(tmp_small); - testing.expect(a_little_free == WasmPageAllocator.conventional.data[0]); - page_allocator.free(tmp_large); - testing.expect(a_little_free < WasmPageAllocator.conventional.data[0]); + // This segment may span conventional and extended which has really complex rules so we're just ignoring it for now. + const padding = try page_allocator.alloc(u8, conventional_memsize); + page_allocator.free(padding); - const more_free = WasmPageAllocator.conventional.data[0]; - const supersize = try page_allocator.alloc(u8, std.mem.page_size * (WasmPageAllocator.conventional.totalPages() + 1)); - testing.expect(more_free == WasmPageAllocator.conventional.data[0]); - testing.expect(!WasmPageAllocator.extended.isInitialized()); - page_allocator.free(supersize); - testing.expect(WasmPageAllocator.extended.isInitialized()); - testing.expect(more_free < WasmPageAllocator.conventional.data[0]); + const extended = try page_allocator.alloc(u8, conventional_memsize); + testing.expect(@ptrToInt(extended.ptr) >= conventional_memsize); + + const use_small = try page_allocator.alloc(u8, 1); + testing.expectEqual(initial.ptr, use_small.ptr); + page_allocator.free(use_small); + + inplace = try page_allocator.realloc(extended, 1); + testing.expectEqual(extended.ptr, inplace.ptr); + page_allocator.free(inplace); + + const reuse_extended = try page_allocator.alloc(u8, conventional_memsize); + testing.expectEqual(extended.ptr, reuse_extended.ptr); + page_allocator.free(reuse_extended); } }