remove std.heap.IncrementingAllocator

Use std.heap.FixedBufferAllocator combined with
std.heap.DirectAllocator instead.

std.mem.FixedBufferAllocator is moved to std.heap.FixedBufferAllocator
This commit is contained in:
Andrew Kelley 2018-02-12 02:27:02 -05:00
parent 445b03384a
commit 4a4ea92cf3
7 changed files with 61 additions and 141 deletions

View File

@ -1078,5 +1078,5 @@ fn readILeb128(in_stream: var) !i64 {
}
pub const global_allocator = &global_fixed_allocator.allocator;
var global_fixed_allocator = mem.FixedBufferAllocator.init(global_allocator_mem[0..]);
var global_fixed_allocator = std.heap.FixedBufferAllocator.init(global_allocator_mem[0..]);
var global_allocator_mem: [100 * 1024]u8 = undefined;

View File

@ -39,74 +39,6 @@ fn cFree(self: &Allocator, old_mem: []u8) void {
c.free(old_ptr);
}
/// Use this allocator when you want to allocate completely up front and guarantee that individual
/// allocations will never make syscalls.
pub const IncrementingAllocator = struct {
allocator: Allocator,
bytes: []u8,
end_index: usize,
direct_allocator: DirectAllocator,
pub fn init(capacity: usize) !IncrementingAllocator {
var direct_allocator = DirectAllocator.init();
const bytes = try direct_allocator.allocator.alloc(u8, capacity);
errdefer direct_allocator.allocator.free(bytes);
return IncrementingAllocator {
.allocator = Allocator {
.allocFn = alloc,
.reallocFn = realloc,
.freeFn = free,
},
.bytes = bytes,
.direct_allocator = direct_allocator,
.end_index = 0,
};
}
pub fn deinit(self: &IncrementingAllocator) void {
self.direct_allocator.allocator.free(self.bytes);
self.direct_allocator.deinit();
}
fn reset(self: &IncrementingAllocator) void {
self.end_index = 0;
}
fn bytesLeft(self: &const IncrementingAllocator) usize {
return self.bytes.len - self.end_index;
}
fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(IncrementingAllocator, "allocator", allocator);
const addr = @ptrToInt(&self.bytes[self.end_index]);
const rem = @rem(addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_index = self.end_index + march_forward_bytes;
const new_end_index = adjusted_index + n;
if (new_end_index > self.bytes.len) {
return error.OutOfMemory;
}
const result = self.bytes[adjusted_index .. new_end_index];
self.end_index = new_end_index;
return result;
}
fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
const result = try alloc(allocator, new_size, alignment);
mem.copy(u8, result, old_mem);
return result;
}
}
fn free(allocator: &Allocator, bytes: []u8) void {
// Do nothing. That's the point of an incrementing allocator.
}
};
/// This allocator makes a syscall directly for every allocation and free.
pub const DirectAllocator = struct {
allocator: Allocator,
@ -327,6 +259,52 @@ pub const ArenaAllocator = struct {
fn free(allocator: &Allocator, bytes: []u8) void { }
};
pub const FixedBufferAllocator = struct {
allocator: Allocator,
end_index: usize,
buffer: []u8,
pub fn init(buffer: []u8) FixedBufferAllocator {
return FixedBufferAllocator {
.allocator = Allocator {
.allocFn = alloc,
.reallocFn = realloc,
.freeFn = free,
},
.buffer = buffer,
.end_index = 0,
};
}
fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
const addr = @ptrToInt(&self.buffer[self.end_index]);
const rem = @rem(addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_index = self.end_index + march_forward_bytes;
const new_end_index = adjusted_index + n;
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
}
const result = self.buffer[adjusted_index .. new_end_index];
self.end_index = new_end_index;
return result;
}
fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
const result = try alloc(allocator, new_size, alignment);
mem.copy(u8, result, old_mem);
return result;
}
}
fn free(allocator: &Allocator, bytes: []u8) void { }
};
test "c_allocator" {
@ -337,26 +315,6 @@ test "c_allocator" {
}
}
test "IncrementingAllocator" {
const total_bytes = 10 * 1024 * 1024;
var inc_allocator = try IncrementingAllocator.init(total_bytes);
defer inc_allocator.deinit();
const allocator = &inc_allocator.allocator;
const slice = try allocator.alloc(&i32, 100);
for (slice) |*item, i| {
*item = try allocator.create(i32);
**item = i32(i);
}
assert(inc_allocator.bytesLeft() == total_bytes - @sizeOf(i32) * 100 - @sizeOf(usize) * 100);
inc_allocator.reset();
assert(inc_allocator.bytesLeft() == total_bytes);
}
test "DirectAllocator" {
var direct_allocator = DirectAllocator.init();
defer direct_allocator.deinit();
@ -375,6 +333,13 @@ test "ArenaAllocator" {
try testAllocator(&arena_allocator.allocator);
}
var test_fixed_buffer_allocator_memory: [30000 * @sizeOf(usize)]u8 = undefined;
test "FixedBufferAllocator" {
var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
try testAllocator(&fixed_buffer_allocator.allocator);
}
fn testAllocator(allocator: &mem.Allocator) !void {
var slice = try allocator.alloc(&i32, 100);

View File

@ -111,51 +111,6 @@ pub const Allocator = struct {
}
};
pub const FixedBufferAllocator = struct {
allocator: Allocator,
end_index: usize,
buffer: []u8,
pub fn init(buffer: []u8) FixedBufferAllocator {
return FixedBufferAllocator {
.allocator = Allocator {
.allocFn = alloc,
.reallocFn = realloc,
.freeFn = free,
},
.buffer = buffer,
.end_index = 0,
};
}
fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
const addr = @ptrToInt(&self.buffer[self.end_index]);
const rem = @rem(addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_index = self.end_index + march_forward_bytes;
const new_end_index = adjusted_index + n;
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
}
const result = self.buffer[adjusted_index .. new_end_index];
self.end_index = new_end_index;
return result;
}
fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
const result = try alloc(allocator, new_size, alignment);
copy(u8, result, old_mem);
return result;
}
}
fn free(allocator: &Allocator, bytes: []u8) void { }
};
/// Copy all of source into dest at position 0.
/// dest.len must be >= source.len.
pub fn copy(comptime T: type, dest: []T, source: []const T) void {

View File

@ -363,7 +363,7 @@ pub const ChildProcess = struct {
const dev_null_fd = if (any_ignore) blk: {
const dev_null_path = "/dev/null";
var fixed_buffer_mem: [dev_null_path.len + 1]u8 = undefined;
var fixed_allocator = mem.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
break :blk try os.posixOpen(&fixed_allocator.allocator, "/dev/null", posix.O_RDWR, 0);
} else blk: {
break :blk undefined;
@ -472,7 +472,7 @@ pub const ChildProcess = struct {
const nul_handle = if (any_ignore) blk: {
const nul_file_path = "NUL";
var fixed_buffer_mem: [nul_file_path.len + 1]u8 = undefined;
var fixed_allocator = mem.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
break :blk try os.windowsOpen(&fixed_allocator.allocator, "NUL", windows.GENERIC_READ, windows.FILE_SHARE_READ,
windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL);
} else blk: {

View File

@ -1702,12 +1702,12 @@ pub fn openSelfExe() !os.File {
Os.linux => {
const proc_file_path = "/proc/self/exe";
var fixed_buffer_mem: [proc_file_path.len + 1]u8 = undefined;
var fixed_allocator = mem.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
return os.File.openRead(&fixed_allocator.allocator, proc_file_path);
},
Os.macosx, Os.ios => {
var fixed_buffer_mem: [darwin.PATH_MAX * 2]u8 = undefined;
var fixed_allocator = mem.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
const self_exe_path = try selfExePath(&fixed_allocator.allocator);
return os.File.openRead(&fixed_allocator.allocator, self_exe_path);
},

View File

@ -1094,7 +1094,7 @@ var fixed_buffer_mem: [100 * 1024]u8 = undefined;
fn fuzzTest(rng: &std.rand.Rand) void {
const array_size = rng.range(usize, 0, 1000);
var fixed_allocator = mem.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var array = fixed_allocator.allocator.alloc(IdAndValue, array_size) catch unreachable;
// populate with random data
for (array) |*item, index| {

View File

@ -1060,7 +1060,7 @@ fn testParse(source: []const u8, allocator: &mem.Allocator) ![]u8 {
fn testCanonical(source: []const u8) !void {
const needed_alloc_count = x: {
// Try it once with unlimited memory, make sure it works
var fixed_allocator = mem.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var failing_allocator = std.debug.FailingAllocator.init(&fixed_allocator.allocator, @maxValue(usize));
const result_source = try testParse(source, &failing_allocator.allocator);
if (!mem.eql(u8, result_source, source)) {
@ -1077,7 +1077,7 @@ fn testCanonical(source: []const u8) !void {
var fail_index: usize = 0;
while (fail_index < needed_alloc_count) : (fail_index += 1) {
var fixed_allocator = mem.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var failing_allocator = std.debug.FailingAllocator.init(&fixed_allocator.allocator, fail_index);
if (testParse(source, &failing_allocator.allocator)) |_| {
return error.NondeterministicMemoryUsage;