mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
allocgate: std Allocator interface refactor
This commit is contained in:
parent
1e0addcf73
commit
85de022c56
@ -18,7 +18,7 @@ pub fn main() !void {
|
||||
}
|
||||
|
||||
fn render(
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
in_file: []const u8,
|
||||
out_file: []const u8,
|
||||
fmt: enum {
|
||||
|
||||
@ -342,7 +342,7 @@ const Action = enum {
|
||||
Close,
|
||||
};
|
||||
|
||||
fn genToc(allocator: *Allocator, tokenizer: *Tokenizer) !Toc {
|
||||
fn genToc(allocator: Allocator, tokenizer: *Tokenizer) !Toc {
|
||||
var urls = std.StringHashMap(Token).init(allocator);
|
||||
errdefer urls.deinit();
|
||||
|
||||
@ -708,7 +708,7 @@ fn genToc(allocator: *Allocator, tokenizer: *Tokenizer) !Toc {
|
||||
};
|
||||
}
|
||||
|
||||
fn urlize(allocator: *Allocator, input: []const u8) ![]u8 {
|
||||
fn urlize(allocator: Allocator, input: []const u8) ![]u8 {
|
||||
var buf = std.ArrayList(u8).init(allocator);
|
||||
defer buf.deinit();
|
||||
|
||||
@ -727,7 +727,7 @@ fn urlize(allocator: *Allocator, input: []const u8) ![]u8 {
|
||||
return buf.toOwnedSlice();
|
||||
}
|
||||
|
||||
fn escapeHtml(allocator: *Allocator, input: []const u8) ![]u8 {
|
||||
fn escapeHtml(allocator: Allocator, input: []const u8) ![]u8 {
|
||||
var buf = std.ArrayList(u8).init(allocator);
|
||||
defer buf.deinit();
|
||||
|
||||
@ -773,7 +773,7 @@ test "term color" {
|
||||
try testing.expectEqualSlices(u8, "A<span class=\"t32_1\">green</span>B", result);
|
||||
}
|
||||
|
||||
fn termColor(allocator: *Allocator, input: []const u8) ![]u8 {
|
||||
fn termColor(allocator: Allocator, input: []const u8) ![]u8 {
|
||||
var buf = std.ArrayList(u8).init(allocator);
|
||||
defer buf.deinit();
|
||||
|
||||
@ -883,7 +883,7 @@ fn writeEscapedLines(out: anytype, text: []const u8) !void {
|
||||
}
|
||||
|
||||
fn tokenizeAndPrintRaw(
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
docgen_tokenizer: *Tokenizer,
|
||||
out: anytype,
|
||||
source_token: Token,
|
||||
@ -1137,7 +1137,7 @@ fn tokenizeAndPrintRaw(
|
||||
}
|
||||
|
||||
fn tokenizeAndPrint(
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
docgen_tokenizer: *Tokenizer,
|
||||
out: anytype,
|
||||
source_token: Token,
|
||||
@ -1146,7 +1146,7 @@ fn tokenizeAndPrint(
|
||||
return tokenizeAndPrintRaw(allocator, docgen_tokenizer, out, source_token, raw_src);
|
||||
}
|
||||
|
||||
fn printSourceBlock(allocator: *Allocator, docgen_tokenizer: *Tokenizer, out: anytype, syntax_block: SyntaxBlock) !void {
|
||||
fn printSourceBlock(allocator: Allocator, docgen_tokenizer: *Tokenizer, out: anytype, syntax_block: SyntaxBlock) !void {
|
||||
const source_type = @tagName(syntax_block.source_type);
|
||||
|
||||
try out.print("<figure><figcaption class=\"{s}-cap\"><cite class=\"file\">{s}</cite></figcaption><pre>", .{ source_type, syntax_block.name });
|
||||
@ -1188,7 +1188,7 @@ fn printShell(out: anytype, shell_content: []const u8) !void {
|
||||
}
|
||||
|
||||
fn genHtml(
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
tokenizer: *Tokenizer,
|
||||
toc: *Toc,
|
||||
out: anytype,
|
||||
@ -1687,7 +1687,7 @@ fn genHtml(
|
||||
}
|
||||
}
|
||||
|
||||
fn exec(allocator: *Allocator, env_map: *std.BufMap, args: []const []const u8) !ChildProcess.ExecResult {
|
||||
fn exec(allocator: Allocator, env_map: *std.BufMap, args: []const []const u8) !ChildProcess.ExecResult {
|
||||
const result = try ChildProcess.exec(.{
|
||||
.allocator = allocator,
|
||||
.argv = args,
|
||||
@ -1711,7 +1711,7 @@ fn exec(allocator: *Allocator, env_map: *std.BufMap, args: []const []const u8) !
|
||||
return result;
|
||||
}
|
||||
|
||||
fn getBuiltinCode(allocator: *Allocator, env_map: *std.BufMap, zig_exe: []const u8) ![]const u8 {
|
||||
fn getBuiltinCode(allocator: Allocator, env_map: *std.BufMap, zig_exe: []const u8) ![]const u8 {
|
||||
const result = try exec(allocator, env_map, &[_][]const u8{ zig_exe, "build-obj", "--show-builtin" });
|
||||
return result.stdout;
|
||||
}
|
||||
|
||||
@ -460,7 +460,7 @@ const WindowsThreadImpl = struct {
|
||||
errdefer assert(windows.kernel32.HeapFree(heap_handle, 0, alloc_ptr) != 0);
|
||||
|
||||
const instance_bytes = @ptrCast([*]u8, alloc_ptr)[0..alloc_bytes];
|
||||
const instance = std.heap.FixedBufferAllocator.init(instance_bytes).allocator.create(Instance) catch unreachable;
|
||||
const instance = std.heap.FixedBufferAllocator.init(instance_bytes).getAllocator().create(Instance) catch unreachable;
|
||||
instance.* = .{
|
||||
.fn_args = args,
|
||||
.thread = .{
|
||||
|
||||
@ -79,7 +79,7 @@ pub fn ArrayHashMap(
|
||||
comptime std.hash_map.verifyContext(Context, K, K, u32);
|
||||
return struct {
|
||||
unmanaged: Unmanaged,
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
ctx: Context,
|
||||
|
||||
/// The ArrayHashMapUnmanaged type using the same settings as this managed map.
|
||||
@ -118,12 +118,12 @@ pub fn ArrayHashMap(
|
||||
const Self = @This();
|
||||
|
||||
/// Create an ArrayHashMap instance which will use a specified allocator.
|
||||
pub fn init(allocator: *Allocator) Self {
|
||||
pub fn init(allocator: Allocator) Self {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call initContext instead.");
|
||||
return initContext(allocator, undefined);
|
||||
}
|
||||
pub fn initContext(allocator: *Allocator, ctx: Context) Self {
|
||||
pub fn initContext(allocator: Allocator, ctx: Context) Self {
|
||||
return .{
|
||||
.unmanaged = .{},
|
||||
.allocator = allocator,
|
||||
@ -383,7 +383,7 @@ pub fn ArrayHashMap(
|
||||
/// Create a copy of the hash map which can be modified separately.
|
||||
/// The copy uses the same context as this instance, but the specified
|
||||
/// allocator.
|
||||
pub fn cloneWithAllocator(self: Self, allocator: *Allocator) !Self {
|
||||
pub fn cloneWithAllocator(self: Self, allocator: Allocator) !Self {
|
||||
var other = try self.unmanaged.cloneContext(allocator, self.ctx);
|
||||
return other.promoteContext(allocator, self.ctx);
|
||||
}
|
||||
@ -396,7 +396,7 @@ pub fn ArrayHashMap(
|
||||
}
|
||||
/// Create a copy of the hash map which can be modified separately.
|
||||
/// The copy uses the specified allocator and context.
|
||||
pub fn cloneWithAllocatorAndContext(self: Self, allocator: *Allocator, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) {
|
||||
pub fn cloneWithAllocatorAndContext(self: Self, allocator: Allocator, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) {
|
||||
var other = try self.unmanaged.cloneContext(allocator, ctx);
|
||||
return other.promoteContext(allocator, ctx);
|
||||
}
|
||||
@ -533,12 +533,12 @@ pub fn ArrayHashMapUnmanaged(
|
||||
|
||||
/// Convert from an unmanaged map to a managed map. After calling this,
|
||||
/// the promoted map should no longer be used.
|
||||
pub fn promote(self: Self, allocator: *Allocator) Managed {
|
||||
pub fn promote(self: Self, allocator: Allocator) Managed {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead.");
|
||||
return self.promoteContext(allocator, undefined);
|
||||
}
|
||||
pub fn promoteContext(self: Self, allocator: *Allocator, ctx: Context) Managed {
|
||||
pub fn promoteContext(self: Self, allocator: Allocator, ctx: Context) Managed {
|
||||
return .{
|
||||
.unmanaged = self,
|
||||
.allocator = allocator,
|
||||
@ -549,7 +549,7 @@ pub fn ArrayHashMapUnmanaged(
|
||||
/// Frees the backing allocation and leaves the map in an undefined state.
|
||||
/// Note that this does not free keys or values. You must take care of that
|
||||
/// before calling this function, if it is needed.
|
||||
pub fn deinit(self: *Self, allocator: *Allocator) void {
|
||||
pub fn deinit(self: *Self, allocator: Allocator) void {
|
||||
self.entries.deinit(allocator);
|
||||
if (self.index_header) |header| {
|
||||
header.free(allocator);
|
||||
@ -570,7 +570,7 @@ pub fn ArrayHashMapUnmanaged(
|
||||
}
|
||||
|
||||
/// Clears the map and releases the backing allocation
|
||||
pub fn clearAndFree(self: *Self, allocator: *Allocator) void {
|
||||
pub fn clearAndFree(self: *Self, allocator: Allocator) void {
|
||||
self.entries.shrinkAndFree(allocator, 0);
|
||||
if (self.index_header) |header| {
|
||||
header.free(allocator);
|
||||
@ -633,24 +633,24 @@ pub fn ArrayHashMapUnmanaged(
|
||||
/// Otherwise, puts a new item with undefined value, and
|
||||
/// the `Entry` pointer points to it. Caller should then initialize
|
||||
/// the value (but not the key).
|
||||
pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult {
|
||||
pub fn getOrPut(self: *Self, allocator: Allocator, key: K) !GetOrPutResult {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead.");
|
||||
return self.getOrPutContext(allocator, key, undefined);
|
||||
}
|
||||
pub fn getOrPutContext(self: *Self, allocator: *Allocator, key: K, ctx: Context) !GetOrPutResult {
|
||||
pub fn getOrPutContext(self: *Self, allocator: Allocator, key: K, ctx: Context) !GetOrPutResult {
|
||||
const gop = try self.getOrPutContextAdapted(allocator, key, ctx, ctx);
|
||||
if (!gop.found_existing) {
|
||||
gop.key_ptr.* = key;
|
||||
}
|
||||
return gop;
|
||||
}
|
||||
pub fn getOrPutAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
|
||||
pub fn getOrPutAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContextAdapted instead.");
|
||||
return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined);
|
||||
}
|
||||
pub fn getOrPutContextAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
|
||||
pub fn getOrPutContextAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
|
||||
self.ensureTotalCapacityContext(allocator, self.entries.len + 1, ctx) catch |err| {
|
||||
// "If key exists this function cannot fail."
|
||||
const index = self.getIndexAdapted(key, key_ctx) orelse return err;
|
||||
@ -731,12 +731,12 @@ pub fn ArrayHashMapUnmanaged(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !GetOrPutResult {
|
||||
pub fn getOrPutValue(self: *Self, allocator: Allocator, key: K, value: V) !GetOrPutResult {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutValueContext instead.");
|
||||
return self.getOrPutValueContext(allocator, key, value, undefined);
|
||||
}
|
||||
pub fn getOrPutValueContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !GetOrPutResult {
|
||||
pub fn getOrPutValueContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !GetOrPutResult {
|
||||
const res = try self.getOrPutContextAdapted(allocator, key, ctx, ctx);
|
||||
if (!res.found_existing) {
|
||||
res.key_ptr.* = key;
|
||||
@ -749,12 +749,12 @@ pub fn ArrayHashMapUnmanaged(
|
||||
|
||||
/// Increases capacity, guaranteeing that insertions up until the
|
||||
/// `expected_count` will not cause an allocation, and therefore cannot fail.
|
||||
pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
|
||||
pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_capacity: usize) !void {
|
||||
if (@sizeOf(ByIndexContext) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead.");
|
||||
return self.ensureTotalCapacityContext(allocator, new_capacity, undefined);
|
||||
}
|
||||
pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_capacity: usize, ctx: Context) !void {
|
||||
pub fn ensureTotalCapacityContext(self: *Self, allocator: Allocator, new_capacity: usize, ctx: Context) !void {
|
||||
if (new_capacity <= linear_scan_max) {
|
||||
try self.entries.ensureTotalCapacity(allocator, new_capacity);
|
||||
return;
|
||||
@ -781,7 +781,7 @@ pub fn ArrayHashMapUnmanaged(
|
||||
/// therefore cannot fail.
|
||||
pub fn ensureUnusedCapacity(
|
||||
self: *Self,
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
additional_capacity: usize,
|
||||
) !void {
|
||||
if (@sizeOf(ByIndexContext) != 0)
|
||||
@ -790,7 +790,7 @@ pub fn ArrayHashMapUnmanaged(
|
||||
}
|
||||
pub fn ensureUnusedCapacityContext(
|
||||
self: *Self,
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
additional_capacity: usize,
|
||||
ctx: Context,
|
||||
) !void {
|
||||
@ -808,24 +808,24 @@ pub fn ArrayHashMapUnmanaged(
|
||||
|
||||
/// Clobbers any existing data. To detect if a put would clobber
|
||||
/// existing data, see `getOrPut`.
|
||||
pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void {
|
||||
pub fn put(self: *Self, allocator: Allocator, key: K, value: V) !void {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putContext instead.");
|
||||
return self.putContext(allocator, key, value, undefined);
|
||||
}
|
||||
pub fn putContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
|
||||
pub fn putContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void {
|
||||
const result = try self.getOrPutContext(allocator, key, ctx);
|
||||
result.value_ptr.* = value;
|
||||
}
|
||||
|
||||
/// Inserts a key-value pair into the hash map, asserting that no previous
|
||||
/// entry with the same key is already present
|
||||
pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void {
|
||||
pub fn putNoClobber(self: *Self, allocator: Allocator, key: K, value: V) !void {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putNoClobberContext instead.");
|
||||
return self.putNoClobberContext(allocator, key, value, undefined);
|
||||
}
|
||||
pub fn putNoClobberContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
|
||||
pub fn putNoClobberContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void {
|
||||
const result = try self.getOrPutContext(allocator, key, ctx);
|
||||
assert(!result.found_existing);
|
||||
result.value_ptr.* = value;
|
||||
@ -859,12 +859,12 @@ pub fn ArrayHashMapUnmanaged(
|
||||
}
|
||||
|
||||
/// Inserts a new `Entry` into the hash map, returning the previous one, if any.
|
||||
pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?KV {
|
||||
pub fn fetchPut(self: *Self, allocator: Allocator, key: K, value: V) !?KV {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutContext instead.");
|
||||
return self.fetchPutContext(allocator, key, value, undefined);
|
||||
}
|
||||
pub fn fetchPutContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !?KV {
|
||||
pub fn fetchPutContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !?KV {
|
||||
const gop = try self.getOrPutContext(allocator, key, ctx);
|
||||
var result: ?KV = null;
|
||||
if (gop.found_existing) {
|
||||
@ -1132,12 +1132,12 @@ pub fn ArrayHashMapUnmanaged(
|
||||
|
||||
/// Create a copy of the hash map which can be modified separately.
|
||||
/// The copy uses the same context and allocator as this instance.
|
||||
pub fn clone(self: Self, allocator: *Allocator) !Self {
|
||||
pub fn clone(self: Self, allocator: Allocator) !Self {
|
||||
if (@sizeOf(ByIndexContext) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead.");
|
||||
return self.cloneContext(allocator, undefined);
|
||||
}
|
||||
pub fn cloneContext(self: Self, allocator: *Allocator, ctx: Context) !Self {
|
||||
pub fn cloneContext(self: Self, allocator: Allocator, ctx: Context) !Self {
|
||||
var other: Self = .{};
|
||||
other.entries = try self.entries.clone(allocator);
|
||||
errdefer other.entries.deinit(allocator);
|
||||
@ -1152,12 +1152,12 @@ pub fn ArrayHashMapUnmanaged(
|
||||
|
||||
/// Rebuilds the key indexes. If the underlying entries has been modified directly, users
|
||||
/// can call `reIndex` to update the indexes to account for these new entries.
|
||||
pub fn reIndex(self: *Self, allocator: *Allocator) !void {
|
||||
pub fn reIndex(self: *Self, allocator: Allocator) !void {
|
||||
if (@sizeOf(ByIndexContext) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call reIndexContext instead.");
|
||||
return self.reIndexContext(allocator, undefined);
|
||||
}
|
||||
pub fn reIndexContext(self: *Self, allocator: *Allocator, ctx: Context) !void {
|
||||
pub fn reIndexContext(self: *Self, allocator: Allocator, ctx: Context) !void {
|
||||
if (self.entries.capacity <= linear_scan_max) return;
|
||||
// We're going to rebuild the index header and replace the existing one (if any). The
|
||||
// indexes should sized such that they will be at most 60% full.
|
||||
@ -1189,12 +1189,12 @@ pub fn ArrayHashMapUnmanaged(
|
||||
|
||||
/// Shrinks the underlying `Entry` array to `new_len` elements and discards any associated
|
||||
/// index entries. Reduces allocated capacity.
|
||||
pub fn shrinkAndFree(self: *Self, allocator: *Allocator, new_len: usize) void {
|
||||
pub fn shrinkAndFree(self: *Self, allocator: Allocator, new_len: usize) void {
|
||||
if (@sizeOf(ByIndexContext) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call shrinkAndFreeContext instead.");
|
||||
return self.shrinkAndFreeContext(allocator, new_len, undefined);
|
||||
}
|
||||
pub fn shrinkAndFreeContext(self: *Self, allocator: *Allocator, new_len: usize, ctx: Context) void {
|
||||
pub fn shrinkAndFreeContext(self: *Self, allocator: Allocator, new_len: usize, ctx: Context) void {
|
||||
// Remove index entries from the new length onwards.
|
||||
// Explicitly choose to ONLY remove index entries and not the underlying array list
|
||||
// entries as we're going to remove them in the subsequent shrink call.
|
||||
@ -1844,7 +1844,7 @@ const IndexHeader = struct {
|
||||
|
||||
/// Allocates an index header, and fills the entryIndexes array with empty.
|
||||
/// The distance array contents are undefined.
|
||||
fn alloc(allocator: *Allocator, new_bit_index: u8) !*IndexHeader {
|
||||
fn alloc(allocator: Allocator, new_bit_index: u8) !*IndexHeader {
|
||||
const len = @as(usize, 1) << @intCast(math.Log2Int(usize), new_bit_index);
|
||||
const index_size = hash_map.capacityIndexSize(new_bit_index);
|
||||
const nbytes = @sizeOf(IndexHeader) + index_size * len;
|
||||
@ -1858,7 +1858,7 @@ const IndexHeader = struct {
|
||||
}
|
||||
|
||||
/// Releases the memory for a header and its associated arrays.
|
||||
fn free(header: *IndexHeader, allocator: *Allocator) void {
|
||||
fn free(header: *IndexHeader, allocator: Allocator) void {
|
||||
const index_size = hash_map.capacityIndexSize(header.bit_index);
|
||||
const ptr = @ptrCast([*]align(@alignOf(IndexHeader)) u8, header);
|
||||
const slice = ptr[0 .. @sizeOf(IndexHeader) + header.length() * index_size];
|
||||
|
||||
@ -42,12 +42,12 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
/// How many T values this list can hold without allocating
|
||||
/// additional memory.
|
||||
capacity: usize,
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
|
||||
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
|
||||
|
||||
/// Deinitialize with `deinit` or use `toOwnedSlice`.
|
||||
pub fn init(allocator: *Allocator) Self {
|
||||
pub fn init(allocator: Allocator) Self {
|
||||
return Self{
|
||||
.items = &[_]T{},
|
||||
.capacity = 0,
|
||||
@ -58,7 +58,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
/// Initialize with capacity to hold at least `num` elements.
|
||||
/// The resulting capacity is likely to be equal to `num`.
|
||||
/// Deinitialize with `deinit` or use `toOwnedSlice`.
|
||||
pub fn initCapacity(allocator: *Allocator, num: usize) !Self {
|
||||
pub fn initCapacity(allocator: Allocator, num: usize) !Self {
|
||||
var self = Self.init(allocator);
|
||||
try self.ensureTotalCapacityPrecise(num);
|
||||
return self;
|
||||
@ -74,7 +74,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
/// ArrayList takes ownership of the passed in slice. The slice must have been
|
||||
/// allocated with `allocator`.
|
||||
/// Deinitialize with `deinit` or use `toOwnedSlice`.
|
||||
pub fn fromOwnedSlice(allocator: *Allocator, slice: Slice) Self {
|
||||
pub fn fromOwnedSlice(allocator: Allocator, slice: Slice) Self {
|
||||
return Self{
|
||||
.items = slice,
|
||||
.capacity = slice.len,
|
||||
@ -457,33 +457,33 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
/// Initialize with capacity to hold at least num elements.
|
||||
/// The resulting capacity is likely to be equal to `num`.
|
||||
/// Deinitialize with `deinit` or use `toOwnedSlice`.
|
||||
pub fn initCapacity(allocator: *Allocator, num: usize) !Self {
|
||||
pub fn initCapacity(allocator: Allocator, num: usize) !Self {
|
||||
var self = Self{};
|
||||
try self.ensureTotalCapacityPrecise(allocator, num);
|
||||
return self;
|
||||
}
|
||||
|
||||
/// Release all allocated memory.
|
||||
pub fn deinit(self: *Self, allocator: *Allocator) void {
|
||||
pub fn deinit(self: *Self, allocator: Allocator) void {
|
||||
allocator.free(self.allocatedSlice());
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
/// Convert this list into an analogous memory-managed one.
|
||||
/// The returned list has ownership of the underlying memory.
|
||||
pub fn toManaged(self: *Self, allocator: *Allocator) ArrayListAligned(T, alignment) {
|
||||
pub fn toManaged(self: *Self, allocator: Allocator) ArrayListAligned(T, alignment) {
|
||||
return .{ .items = self.items, .capacity = self.capacity, .allocator = allocator };
|
||||
}
|
||||
|
||||
/// The caller owns the returned memory. ArrayList becomes empty.
|
||||
pub fn toOwnedSlice(self: *Self, allocator: *Allocator) Slice {
|
||||
pub fn toOwnedSlice(self: *Self, allocator: Allocator) Slice {
|
||||
const result = allocator.shrink(self.allocatedSlice(), self.items.len);
|
||||
self.* = Self{};
|
||||
return result;
|
||||
}
|
||||
|
||||
/// The caller owns the returned memory. ArrayList becomes empty.
|
||||
pub fn toOwnedSliceSentinel(self: *Self, allocator: *Allocator, comptime sentinel: T) ![:sentinel]T {
|
||||
pub fn toOwnedSliceSentinel(self: *Self, allocator: Allocator, comptime sentinel: T) ![:sentinel]T {
|
||||
try self.append(allocator, sentinel);
|
||||
const result = self.toOwnedSlice(allocator);
|
||||
return result[0 .. result.len - 1 :sentinel];
|
||||
@ -492,7 +492,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
/// Insert `item` at index `n`. Moves `list[n .. list.len]`
|
||||
/// to higher indices to make room.
|
||||
/// This operation is O(N).
|
||||
pub fn insert(self: *Self, allocator: *Allocator, n: usize, item: T) !void {
|
||||
pub fn insert(self: *Self, allocator: Allocator, n: usize, item: T) !void {
|
||||
try self.ensureUnusedCapacity(allocator, 1);
|
||||
self.items.len += 1;
|
||||
|
||||
@ -503,7 +503,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
/// Insert slice `items` at index `i`. Moves `list[i .. list.len]` to
|
||||
/// higher indicices make room.
|
||||
/// This operation is O(N).
|
||||
pub fn insertSlice(self: *Self, allocator: *Allocator, i: usize, items: []const T) !void {
|
||||
pub fn insertSlice(self: *Self, allocator: Allocator, i: usize, items: []const T) !void {
|
||||
try self.ensureUnusedCapacity(allocator, items.len);
|
||||
self.items.len += items.len;
|
||||
|
||||
@ -515,14 +515,14 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
/// Grows list if `len < new_items.len`.
|
||||
/// Shrinks list if `len > new_items.len`
|
||||
/// Invalidates pointers if this ArrayList is resized.
|
||||
pub fn replaceRange(self: *Self, allocator: *Allocator, start: usize, len: usize, new_items: []const T) !void {
|
||||
pub fn replaceRange(self: *Self, allocator: Allocator, start: usize, len: usize, new_items: []const T) !void {
|
||||
var managed = self.toManaged(allocator);
|
||||
try managed.replaceRange(start, len, new_items);
|
||||
self.* = managed.moveToUnmanaged();
|
||||
}
|
||||
|
||||
/// Extend the list by 1 element. Allocates more memory as necessary.
|
||||
pub fn append(self: *Self, allocator: *Allocator, item: T) !void {
|
||||
pub fn append(self: *Self, allocator: Allocator, item: T) !void {
|
||||
const new_item_ptr = try self.addOne(allocator);
|
||||
new_item_ptr.* = item;
|
||||
}
|
||||
@ -563,7 +563,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
|
||||
/// Append the slice of items to the list. Allocates more
|
||||
/// memory as necessary.
|
||||
pub fn appendSlice(self: *Self, allocator: *Allocator, items: []const T) !void {
|
||||
pub fn appendSlice(self: *Self, allocator: Allocator, items: []const T) !void {
|
||||
try self.ensureUnusedCapacity(allocator, items.len);
|
||||
self.appendSliceAssumeCapacity(items);
|
||||
}
|
||||
@ -580,7 +580,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
|
||||
pub const WriterContext = struct {
|
||||
self: *Self,
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
};
|
||||
|
||||
pub const Writer = if (T != u8)
|
||||
@ -590,7 +590,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
std.io.Writer(WriterContext, error{OutOfMemory}, appendWrite);
|
||||
|
||||
/// Initializes a Writer which will append to the list.
|
||||
pub fn writer(self: *Self, allocator: *Allocator) Writer {
|
||||
pub fn writer(self: *Self, allocator: Allocator) Writer {
|
||||
return .{ .context = .{ .self = self, .allocator = allocator } };
|
||||
}
|
||||
|
||||
@ -603,7 +603,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
|
||||
/// Append a value to the list `n` times.
|
||||
/// Allocates more memory as necessary.
|
||||
pub fn appendNTimes(self: *Self, allocator: *Allocator, value: T, n: usize) !void {
|
||||
pub fn appendNTimes(self: *Self, allocator: Allocator, value: T, n: usize) !void {
|
||||
const old_len = self.items.len;
|
||||
try self.resize(allocator, self.items.len + n);
|
||||
mem.set(T, self.items[old_len..self.items.len], value);
|
||||
@ -621,13 +621,13 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
|
||||
/// Adjust the list's length to `new_len`.
|
||||
/// Does not initialize added items, if any.
|
||||
pub fn resize(self: *Self, allocator: *Allocator, new_len: usize) !void {
|
||||
pub fn resize(self: *Self, allocator: Allocator, new_len: usize) !void {
|
||||
try self.ensureTotalCapacity(allocator, new_len);
|
||||
self.items.len = new_len;
|
||||
}
|
||||
|
||||
/// Reduce allocated capacity to `new_len`.
|
||||
pub fn shrinkAndFree(self: *Self, allocator: *Allocator, new_len: usize) void {
|
||||
pub fn shrinkAndFree(self: *Self, allocator: Allocator, new_len: usize) void {
|
||||
assert(new_len <= self.items.len);
|
||||
|
||||
self.items = allocator.realloc(self.allocatedSlice(), new_len) catch |e| switch (e) {
|
||||
@ -653,7 +653,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
}
|
||||
|
||||
/// Invalidates all element pointers.
|
||||
pub fn clearAndFree(self: *Self, allocator: *Allocator) void {
|
||||
pub fn clearAndFree(self: *Self, allocator: Allocator) void {
|
||||
allocator.free(self.allocatedSlice());
|
||||
self.items.len = 0;
|
||||
self.capacity = 0;
|
||||
@ -663,7 +663,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
|
||||
/// Modify the array so that it can hold at least `new_capacity` items.
|
||||
/// Invalidates pointers if additional memory is needed.
|
||||
pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
|
||||
pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_capacity: usize) !void {
|
||||
var better_capacity = self.capacity;
|
||||
if (better_capacity >= new_capacity) return;
|
||||
|
||||
@ -679,7 +679,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
/// Like `ensureTotalCapacity`, but the resulting capacity is much more likely
|
||||
/// (but not guaranteed) to be equal to `new_capacity`.
|
||||
/// Invalidates pointers if additional memory is needed.
|
||||
pub fn ensureTotalCapacityPrecise(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
|
||||
pub fn ensureTotalCapacityPrecise(self: *Self, allocator: Allocator, new_capacity: usize) !void {
|
||||
if (self.capacity >= new_capacity) return;
|
||||
|
||||
const new_memory = try allocator.reallocAtLeast(self.allocatedSlice(), new_capacity);
|
||||
@ -691,7 +691,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
/// Invalidates pointers if additional memory is needed.
|
||||
pub fn ensureUnusedCapacity(
|
||||
self: *Self,
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
additional_count: usize,
|
||||
) !void {
|
||||
return self.ensureTotalCapacity(allocator, self.items.len + additional_count);
|
||||
@ -706,7 +706,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
|
||||
/// Increase length by 1, returning pointer to the new item.
|
||||
/// The returned pointer becomes invalid when the list resized.
|
||||
pub fn addOne(self: *Self, allocator: *Allocator) !*T {
|
||||
pub fn addOne(self: *Self, allocator: Allocator) !*T {
|
||||
const newlen = self.items.len + 1;
|
||||
try self.ensureTotalCapacity(allocator, newlen);
|
||||
return self.addOneAssumeCapacity();
|
||||
@ -726,7 +726,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
/// Resize the array, adding `n` new elements, which have `undefined` values.
|
||||
/// The return value is an array pointing to the newly allocated elements.
|
||||
/// The returned pointer becomes invalid when the list is resized.
|
||||
pub fn addManyAsArray(self: *Self, allocator: *Allocator, comptime n: usize) !*[n]T {
|
||||
pub fn addManyAsArray(self: *Self, allocator: Allocator, comptime n: usize) !*[n]T {
|
||||
const prev_len = self.items.len;
|
||||
try self.resize(allocator, self.items.len + n);
|
||||
return self.items[prev_len..][0..n];
|
||||
@ -1119,7 +1119,7 @@ test "std.ArrayList/ArrayListUnmanaged.insertSlice" {
|
||||
test "std.ArrayList/ArrayListUnmanaged.replaceRange" {
|
||||
var arena = std.heap.ArenaAllocator.init(testing.allocator);
|
||||
defer arena.deinit();
|
||||
const a = &arena.allocator;
|
||||
const a = arena.getAllocator();
|
||||
|
||||
const init = [_]i32{ 1, 2, 3, 4, 5 };
|
||||
const new = [_]i32{ 0, 0, 0 };
|
||||
@ -1263,7 +1263,7 @@ test "std.ArrayList/ArrayListUnmanaged.shrink still sets length on error.OutOfMe
|
||||
// use an arena allocator to make sure realloc returns error.OutOfMemory
|
||||
var arena = std.heap.ArenaAllocator.init(testing.allocator);
|
||||
defer arena.deinit();
|
||||
const a = &arena.allocator;
|
||||
const a = arena.getAllocator();
|
||||
|
||||
{
|
||||
var list = ArrayList(i32).init(a);
|
||||
@ -1361,7 +1361,7 @@ test "ArrayListAligned/ArrayListAlignedUnmanaged accepts unaligned slices" {
|
||||
|
||||
test "std.ArrayList(u0)" {
|
||||
// An ArrayList on zero-sized types should not need to allocate
|
||||
const a = &testing.FailingAllocator.init(testing.allocator, 0).allocator;
|
||||
const a = testing.FailingAllocator.init(testing.allocator, 0).getAllocator();
|
||||
|
||||
var list = ArrayList(u0).init(a);
|
||||
defer list.deinit();
|
||||
|
||||
@ -301,7 +301,7 @@ test "lowerString" {
|
||||
|
||||
/// Allocates a lower case copy of `ascii_string`.
|
||||
/// Caller owns returned string and must free with `allocator`.
|
||||
pub fn allocLowerString(allocator: *std.mem.Allocator, ascii_string: []const u8) ![]u8 {
|
||||
pub fn allocLowerString(allocator: std.mem.Allocator, ascii_string: []const u8) ![]u8 {
|
||||
const result = try allocator.alloc(u8, ascii_string.len);
|
||||
return lowerString(result, ascii_string);
|
||||
}
|
||||
@ -330,7 +330,7 @@ test "upperString" {
|
||||
|
||||
/// Allocates an upper case copy of `ascii_string`.
|
||||
/// Caller owns returned string and must free with `allocator`.
|
||||
pub fn allocUpperString(allocator: *std.mem.Allocator, ascii_string: []const u8) ![]u8 {
|
||||
pub fn allocUpperString(allocator: std.mem.Allocator, ascii_string: []const u8) ![]u8 {
|
||||
const result = try allocator.alloc(u8, ascii_string.len);
|
||||
return upperString(result, ascii_string);
|
||||
}
|
||||
|
||||
@ -156,7 +156,7 @@ pub fn Queue(comptime T: type) type {
|
||||
}
|
||||
|
||||
const Context = struct {
|
||||
allocator: *std.mem.Allocator,
|
||||
allocator: std.mem.Allocator,
|
||||
queue: *Queue(i32),
|
||||
put_sum: isize,
|
||||
get_sum: isize,
|
||||
@ -176,8 +176,8 @@ test "std.atomic.Queue" {
|
||||
var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
|
||||
defer std.heap.page_allocator.free(plenty_of_memory);
|
||||
|
||||
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
|
||||
var a = &fixed_buffer_allocator.allocator;
|
||||
var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(plenty_of_memory);
|
||||
var a = fixed_buffer_allocator.getThreadSafeAllocator();
|
||||
|
||||
var queue = Queue(i32).init();
|
||||
var context = Context{
|
||||
|
||||
@ -69,7 +69,7 @@ pub fn Stack(comptime T: type) type {
|
||||
}
|
||||
|
||||
const Context = struct {
|
||||
allocator: *std.mem.Allocator,
|
||||
allocator: std.mem.Allocator,
|
||||
stack: *Stack(i32),
|
||||
put_sum: isize,
|
||||
get_sum: isize,
|
||||
@ -88,8 +88,8 @@ test "std.atomic.stack" {
|
||||
var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
|
||||
defer std.heap.page_allocator.free(plenty_of_memory);
|
||||
|
||||
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
|
||||
var a = &fixed_buffer_allocator.allocator;
|
||||
var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(plenty_of_memory);
|
||||
var a = fixed_buffer_allocator.getThreadSafeAllocator();
|
||||
|
||||
var stack = Stack(i32).init();
|
||||
var context = Context{
|
||||
|
||||
@ -476,7 +476,7 @@ pub const DynamicBitSetUnmanaged = struct {
|
||||
|
||||
/// Creates a bit set with no elements present.
|
||||
/// If bit_length is not zero, deinit must eventually be called.
|
||||
pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self {
|
||||
pub fn initEmpty(bit_length: usize, allocator: Allocator) !Self {
|
||||
var self = Self{};
|
||||
try self.resize(bit_length, false, allocator);
|
||||
return self;
|
||||
@ -484,7 +484,7 @@ pub const DynamicBitSetUnmanaged = struct {
|
||||
|
||||
/// Creates a bit set with all elements present.
|
||||
/// If bit_length is not zero, deinit must eventually be called.
|
||||
pub fn initFull(bit_length: usize, allocator: *Allocator) !Self {
|
||||
pub fn initFull(bit_length: usize, allocator: Allocator) !Self {
|
||||
var self = Self{};
|
||||
try self.resize(bit_length, true, allocator);
|
||||
return self;
|
||||
@ -493,7 +493,7 @@ pub const DynamicBitSetUnmanaged = struct {
|
||||
/// Resizes to a new bit_length. If the new length is larger
|
||||
/// than the old length, fills any added bits with `fill`.
|
||||
/// If new_len is not zero, deinit must eventually be called.
|
||||
pub fn resize(self: *@This(), new_len: usize, fill: bool, allocator: *Allocator) !void {
|
||||
pub fn resize(self: *@This(), new_len: usize, fill: bool, allocator: Allocator) !void {
|
||||
const old_len = self.bit_length;
|
||||
|
||||
const old_masks = numMasks(old_len);
|
||||
@ -556,12 +556,12 @@ pub const DynamicBitSetUnmanaged = struct {
|
||||
/// deinitializes the array and releases its memory.
|
||||
/// The passed allocator must be the same one used for
|
||||
/// init* or resize in the past.
|
||||
pub fn deinit(self: *Self, allocator: *Allocator) void {
|
||||
pub fn deinit(self: *Self, allocator: Allocator) void {
|
||||
self.resize(0, false, allocator) catch unreachable;
|
||||
}
|
||||
|
||||
/// Creates a duplicate of this bit set, using the new allocator.
|
||||
pub fn clone(self: *const Self, new_allocator: *Allocator) !Self {
|
||||
pub fn clone(self: *const Self, new_allocator: Allocator) !Self {
|
||||
const num_masks = numMasks(self.bit_length);
|
||||
var copy = Self{};
|
||||
try copy.resize(self.bit_length, false, new_allocator);
|
||||
@ -742,13 +742,13 @@ pub const DynamicBitSet = struct {
|
||||
pub const ShiftInt = std.math.Log2Int(MaskInt);
|
||||
|
||||
/// The allocator used by this bit set
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
|
||||
/// The number of valid items in this bit set
|
||||
unmanaged: DynamicBitSetUnmanaged = .{},
|
||||
|
||||
/// Creates a bit set with no elements present.
|
||||
pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self {
|
||||
pub fn initEmpty(bit_length: usize, allocator: Allocator) !Self {
|
||||
return Self{
|
||||
.unmanaged = try DynamicBitSetUnmanaged.initEmpty(bit_length, allocator),
|
||||
.allocator = allocator,
|
||||
@ -756,7 +756,7 @@ pub const DynamicBitSet = struct {
|
||||
}
|
||||
|
||||
/// Creates a bit set with all elements present.
|
||||
pub fn initFull(bit_length: usize, allocator: *Allocator) !Self {
|
||||
pub fn initFull(bit_length: usize, allocator: Allocator) !Self {
|
||||
return Self{
|
||||
.unmanaged = try DynamicBitSetUnmanaged.initFull(bit_length, allocator),
|
||||
.allocator = allocator,
|
||||
@ -777,7 +777,7 @@ pub const DynamicBitSet = struct {
|
||||
}
|
||||
|
||||
/// Creates a duplicate of this bit set, using the new allocator.
|
||||
pub fn clone(self: *const Self, new_allocator: *Allocator) !Self {
|
||||
pub fn clone(self: *const Self, new_allocator: Allocator) !Self {
|
||||
return Self{
|
||||
.unmanaged = try self.unmanaged.clone(new_allocator),
|
||||
.allocator = new_allocator,
|
||||
|
||||
@ -14,7 +14,7 @@ pub const BufMap = struct {
|
||||
/// Create a BufMap backed by a specific allocator.
|
||||
/// That allocator will be used for both backing allocations
|
||||
/// and string deduplication.
|
||||
pub fn init(allocator: *Allocator) BufMap {
|
||||
pub fn init(allocator: Allocator) BufMap {
|
||||
var self = BufMap{ .hash_map = BufMapHashMap.init(allocator) };
|
||||
return self;
|
||||
}
|
||||
|
||||
@ -16,7 +16,7 @@ pub const BufSet = struct {
|
||||
/// Create a BufSet using an allocator. The allocator will
|
||||
/// be used internally for both backing allocations and
|
||||
/// string duplication.
|
||||
pub fn init(a: *Allocator) BufSet {
|
||||
pub fn init(a: Allocator) BufSet {
|
||||
var self = BufSet{ .hash_map = BufSetHashMap.init(a) };
|
||||
return self;
|
||||
}
|
||||
@ -67,7 +67,7 @@ pub const BufSet = struct {
|
||||
}
|
||||
|
||||
/// Get the allocator used by this set
|
||||
pub fn allocator(self: *const BufSet) *Allocator {
|
||||
pub fn allocator(self: *const BufSet) Allocator {
|
||||
return self.hash_map.allocator;
|
||||
}
|
||||
|
||||
|
||||
@ -28,7 +28,7 @@ pub const OptionsStep = @import("build/OptionsStep.zig");
|
||||
pub const Builder = struct {
|
||||
install_tls: TopLevelStep,
|
||||
uninstall_tls: TopLevelStep,
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
user_input_options: UserInputOptionsMap,
|
||||
available_options_map: AvailableOptionsMap,
|
||||
available_options_list: ArrayList(AvailableOption),
|
||||
@ -134,7 +134,7 @@ pub const Builder = struct {
|
||||
};
|
||||
|
||||
pub fn create(
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
zig_exe: []const u8,
|
||||
build_root: []const u8,
|
||||
cache_root: []const u8,
|
||||
@ -1285,7 +1285,7 @@ test "builder.findProgram compiles" {
|
||||
defer arena.deinit();
|
||||
|
||||
const builder = try Builder.create(
|
||||
&arena.allocator,
|
||||
arena.getAllocator(),
|
||||
"zig",
|
||||
"zig-cache",
|
||||
"zig-cache",
|
||||
@ -3077,7 +3077,7 @@ pub const Step = struct {
|
||||
custom,
|
||||
};
|
||||
|
||||
pub fn init(id: Id, name: []const u8, allocator: *Allocator, makeFn: fn (*Step) anyerror!void) Step {
|
||||
pub fn init(id: Id, name: []const u8, allocator: Allocator, makeFn: fn (*Step) anyerror!void) Step {
|
||||
return Step{
|
||||
.id = id,
|
||||
.name = allocator.dupe(u8, name) catch unreachable,
|
||||
@ -3087,7 +3087,7 @@ pub const Step = struct {
|
||||
.done_flag = false,
|
||||
};
|
||||
}
|
||||
pub fn initNoOp(id: Id, name: []const u8, allocator: *Allocator) Step {
|
||||
pub fn initNoOp(id: Id, name: []const u8, allocator: Allocator) Step {
|
||||
return init(id, name, allocator, makeNoOp);
|
||||
}
|
||||
|
||||
@ -3114,7 +3114,7 @@ pub const Step = struct {
|
||||
}
|
||||
};
|
||||
|
||||
fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
|
||||
fn doAtomicSymLinks(allocator: Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
|
||||
const out_dir = fs.path.dirname(output_path) orelse ".";
|
||||
const out_basename = fs.path.basename(output_path);
|
||||
// sym link for libfoo.so.1 to libfoo.so.1.2.3
|
||||
@ -3138,7 +3138,7 @@ fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_maj
|
||||
}
|
||||
|
||||
/// Returned slice must be freed by the caller.
|
||||
fn findVcpkgRoot(allocator: *Allocator) !?[]const u8 {
|
||||
fn findVcpkgRoot(allocator: Allocator) !?[]const u8 {
|
||||
const appdata_path = try fs.getAppDataDir(allocator, "vcpkg");
|
||||
defer allocator.free(appdata_path);
|
||||
|
||||
@ -3207,7 +3207,7 @@ test "Builder.dupePkg()" {
|
||||
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
|
||||
defer arena.deinit();
|
||||
var builder = try Builder.create(
|
||||
&arena.allocator,
|
||||
arena.getAllocator(),
|
||||
"test",
|
||||
"test",
|
||||
"test",
|
||||
@ -3252,7 +3252,7 @@ test "LibExeObjStep.addPackage" {
|
||||
defer arena.deinit();
|
||||
|
||||
var builder = try Builder.create(
|
||||
&arena.allocator,
|
||||
arena.getAllocator(),
|
||||
"test",
|
||||
"test",
|
||||
"test",
|
||||
|
||||
@ -40,7 +40,7 @@ const BinaryElfOutput = struct {
|
||||
self.segments.deinit();
|
||||
}
|
||||
|
||||
pub fn parse(allocator: *Allocator, elf_file: File) !Self {
|
||||
pub fn parse(allocator: Allocator, elf_file: File) !Self {
|
||||
var self: Self = .{
|
||||
.segments = ArrayList(*BinaryElfSegment).init(allocator),
|
||||
.sections = ArrayList(*BinaryElfSection).init(allocator),
|
||||
@ -298,7 +298,7 @@ fn containsValidAddressRange(segments: []*BinaryElfSegment) bool {
|
||||
return true;
|
||||
}
|
||||
|
||||
fn emitRaw(allocator: *Allocator, elf_path: []const u8, raw_path: []const u8, format: RawFormat) !void {
|
||||
fn emitRaw(allocator: Allocator, elf_path: []const u8, raw_path: []const u8, format: RawFormat) !void {
|
||||
var elf_file = try fs.cwd().openFile(elf_path, .{});
|
||||
defer elf_file.close();
|
||||
|
||||
|
||||
@ -274,7 +274,7 @@ test "OptionsStep" {
|
||||
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
|
||||
defer arena.deinit();
|
||||
var builder = try Builder.create(
|
||||
&arena.allocator,
|
||||
arena.getAllocator(),
|
||||
"test",
|
||||
"test",
|
||||
"test",
|
||||
|
||||
@ -75,7 +75,7 @@ pub const StackTrace = struct {
|
||||
};
|
||||
const tty_config = std.debug.detectTTYConfig();
|
||||
try writer.writeAll("\n");
|
||||
std.debug.writeStackTrace(self, writer, &arena.allocator, debug_info, tty_config) catch |err| {
|
||||
std.debug.writeStackTrace(self, writer, arena.getAllocator(), debug_info, tty_config) catch |err| {
|
||||
try writer.print("Unable to print stack trace: {s}\n", .{@errorName(err)});
|
||||
};
|
||||
try writer.writeAll("\n");
|
||||
|
||||
@ -23,7 +23,7 @@ pub const ChildProcess = struct {
|
||||
handle: if (builtin.os.tag == .windows) windows.HANDLE else void,
|
||||
thread_handle: if (builtin.os.tag == .windows) windows.HANDLE else void,
|
||||
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
|
||||
stdin: ?File,
|
||||
stdout: ?File,
|
||||
@ -90,7 +90,7 @@ pub const ChildProcess = struct {
|
||||
|
||||
/// First argument in argv is the executable.
|
||||
/// On success must call deinit.
|
||||
pub fn init(argv: []const []const u8, allocator: *mem.Allocator) !*ChildProcess {
|
||||
pub fn init(argv: []const []const u8, allocator: mem.Allocator) !*ChildProcess {
|
||||
const child = try allocator.create(ChildProcess);
|
||||
child.* = ChildProcess{
|
||||
.allocator = allocator,
|
||||
@ -329,7 +329,7 @@ pub const ChildProcess = struct {
|
||||
/// Spawns a child process, waits for it, collecting stdout and stderr, and then returns.
|
||||
/// If it succeeds, the caller owns result.stdout and result.stderr memory.
|
||||
pub fn exec(args: struct {
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
argv: []const []const u8,
|
||||
cwd: ?[]const u8 = null,
|
||||
cwd_dir: ?fs.Dir = null,
|
||||
@ -541,7 +541,7 @@ pub const ChildProcess = struct {
|
||||
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(self.allocator);
|
||||
defer arena_allocator.deinit();
|
||||
const arena = &arena_allocator.allocator;
|
||||
const arena = arena_allocator.getAllocator();
|
||||
|
||||
// The POSIX standard does not allow malloc() between fork() and execve(),
|
||||
// and `self.allocator` may be a libc allocator.
|
||||
@ -931,7 +931,7 @@ fn windowsCreateProcess(app_name: [*:0]u16, cmd_line: [*:0]u16, envp_ptr: ?[*]u1
|
||||
}
|
||||
|
||||
/// Caller must dealloc.
|
||||
fn windowsCreateCommandLine(allocator: *mem.Allocator, argv: []const []const u8) ![:0]u8 {
|
||||
fn windowsCreateCommandLine(allocator: mem.Allocator, argv: []const []const u8) ![:0]u8 {
|
||||
var buf = std.ArrayList(u8).init(allocator);
|
||||
defer buf.deinit();
|
||||
|
||||
@ -1081,7 +1081,7 @@ fn readIntFd(fd: i32) !ErrInt {
|
||||
}
|
||||
|
||||
/// Caller must free result.
|
||||
pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap) ![]u16 {
|
||||
pub fn createWindowsEnvBlock(allocator: mem.Allocator, env_map: *const BufMap) ![]u16 {
|
||||
// count bytes needed
|
||||
const max_chars_needed = x: {
|
||||
var max_chars_needed: usize = 4; // 4 for the final 4 null bytes
|
||||
@ -1117,7 +1117,7 @@ pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap)
|
||||
return allocator.shrink(result, i);
|
||||
}
|
||||
|
||||
pub fn createNullDelimitedEnvMap(arena: *mem.Allocator, env_map: *const std.BufMap) ![:null]?[*:0]u8 {
|
||||
pub fn createNullDelimitedEnvMap(arena: mem.Allocator, env_map: *const std.BufMap) ![:null]?[*:0]u8 {
|
||||
const envp_count = env_map.count();
|
||||
const envp_buf = try arena.allocSentinel(?[*:0]u8, envp_count, null);
|
||||
{
|
||||
@ -1149,7 +1149,7 @@ test "createNullDelimitedEnvMap" {
|
||||
|
||||
var arena = std.heap.ArenaAllocator.init(allocator);
|
||||
defer arena.deinit();
|
||||
const environ = try createNullDelimitedEnvMap(&arena.allocator, &envmap);
|
||||
const environ = try createNullDelimitedEnvMap(arena.getAllocator(), &envmap);
|
||||
|
||||
try testing.expectEqual(@as(usize, 5), environ.len);
|
||||
|
||||
|
||||
@ -98,7 +98,7 @@ pub const CoffError = error{
|
||||
// Official documentation of the format: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
|
||||
pub const Coff = struct {
|
||||
in_file: File,
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
|
||||
coff_header: CoffHeader,
|
||||
pe_header: OptionalHeader,
|
||||
@ -107,7 +107,7 @@ pub const Coff = struct {
|
||||
guid: [16]u8,
|
||||
age: u32,
|
||||
|
||||
pub fn init(allocator: *mem.Allocator, in_file: File) Coff {
|
||||
pub fn init(allocator: mem.Allocator, in_file: File) Coff {
|
||||
return Coff{
|
||||
.in_file = in_file,
|
||||
.allocator = allocator,
|
||||
@ -324,7 +324,7 @@ pub const Coff = struct {
|
||||
}
|
||||
|
||||
// Return an owned slice full of the section data
|
||||
pub fn getSectionData(self: *Coff, comptime name: []const u8, allocator: *mem.Allocator) ![]u8 {
|
||||
pub fn getSectionData(self: *Coff, comptime name: []const u8, allocator: mem.Allocator) ![]u8 {
|
||||
const sec = for (self.sections.items) |*sec| {
|
||||
if (mem.eql(u8, sec.header.name[0..name.len], name)) {
|
||||
break sec;
|
||||
|
||||
@ -24,7 +24,7 @@ pub fn GzipStream(comptime ReaderType: type) type {
|
||||
error{ CorruptedData, WrongChecksum };
|
||||
pub const Reader = io.Reader(*Self, Error, read);
|
||||
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
inflater: deflate.InflateStream(ReaderType),
|
||||
in_reader: ReaderType,
|
||||
hasher: std.hash.Crc32,
|
||||
@ -37,7 +37,7 @@ pub fn GzipStream(comptime ReaderType: type) type {
|
||||
modification_time: u32,
|
||||
},
|
||||
|
||||
fn init(allocator: *mem.Allocator, source: ReaderType) !Self {
|
||||
fn init(allocator: mem.Allocator, source: ReaderType) !Self {
|
||||
// gzip header format is specified in RFC1952
|
||||
const header = try source.readBytesNoEof(10);
|
||||
|
||||
@ -152,7 +152,7 @@ pub fn GzipStream(comptime ReaderType: type) type {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn gzipStream(allocator: *mem.Allocator, reader: anytype) !GzipStream(@TypeOf(reader)) {
|
||||
pub fn gzipStream(allocator: mem.Allocator, reader: anytype) !GzipStream(@TypeOf(reader)) {
|
||||
return GzipStream(@TypeOf(reader)).init(allocator, reader);
|
||||
}
|
||||
|
||||
|
||||
@ -17,13 +17,13 @@ pub fn ZlibStream(comptime ReaderType: type) type {
|
||||
error{ WrongChecksum, Unsupported };
|
||||
pub const Reader = io.Reader(*Self, Error, read);
|
||||
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
inflater: deflate.InflateStream(ReaderType),
|
||||
in_reader: ReaderType,
|
||||
hasher: std.hash.Adler32,
|
||||
window_slice: []u8,
|
||||
|
||||
fn init(allocator: *mem.Allocator, source: ReaderType) !Self {
|
||||
fn init(allocator: mem.Allocator, source: ReaderType) !Self {
|
||||
// Zlib header format is specified in RFC1950
|
||||
const header = try source.readBytesNoEof(2);
|
||||
|
||||
@ -88,7 +88,7 @@ pub fn ZlibStream(comptime ReaderType: type) type {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn zlibStream(allocator: *mem.Allocator, reader: anytype) !ZlibStream(@TypeOf(reader)) {
|
||||
pub fn zlibStream(allocator: mem.Allocator, reader: anytype) !ZlibStream(@TypeOf(reader)) {
|
||||
return ZlibStream(@TypeOf(reader)).init(allocator, reader);
|
||||
}
|
||||
|
||||
|
||||
@ -201,7 +201,7 @@ fn initBlocks(
|
||||
}
|
||||
|
||||
fn processBlocks(
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
blocks: *Blocks,
|
||||
time: u32,
|
||||
memory: u32,
|
||||
@ -240,7 +240,7 @@ fn processBlocksSt(
|
||||
}
|
||||
|
||||
fn processBlocksMt(
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
blocks: *Blocks,
|
||||
time: u32,
|
||||
memory: u32,
|
||||
@ -480,7 +480,7 @@ fn indexAlpha(
|
||||
///
|
||||
/// Salt has to be at least 8 bytes length.
|
||||
pub fn kdf(
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
derived_key: []u8,
|
||||
password: []const u8,
|
||||
salt: []const u8,
|
||||
@ -524,7 +524,7 @@ const PhcFormatHasher = struct {
|
||||
};
|
||||
|
||||
pub fn create(
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
password: []const u8,
|
||||
params: Params,
|
||||
mode: Mode,
|
||||
@ -550,7 +550,7 @@ const PhcFormatHasher = struct {
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
str: []const u8,
|
||||
password: []const u8,
|
||||
) HasherError!void {
|
||||
@ -579,7 +579,7 @@ const PhcFormatHasher = struct {
|
||||
///
|
||||
/// Only phc encoding is supported.
|
||||
pub const HashOptions = struct {
|
||||
allocator: ?*mem.Allocator,
|
||||
allocator: ?mem.Allocator,
|
||||
params: Params,
|
||||
mode: Mode = .argon2id,
|
||||
encoding: pwhash.Encoding = .phc,
|
||||
@ -609,7 +609,7 @@ pub fn strHash(
|
||||
///
|
||||
/// Allocator is required for argon2.
|
||||
pub const VerifyOptions = struct {
|
||||
allocator: ?*mem.Allocator,
|
||||
allocator: ?mem.Allocator,
|
||||
};
|
||||
|
||||
/// Verify that a previously computed hash is valid for a given password.
|
||||
|
||||
@ -368,7 +368,7 @@ const CryptFormatHasher = struct {
|
||||
|
||||
/// Options for hashing a password.
|
||||
pub const HashOptions = struct {
|
||||
allocator: ?*mem.Allocator = null,
|
||||
allocator: ?mem.Allocator = null,
|
||||
params: Params,
|
||||
encoding: pwhash.Encoding,
|
||||
};
|
||||
@ -394,7 +394,7 @@ pub fn strHash(
|
||||
|
||||
/// Options for hash verification.
|
||||
pub const VerifyOptions = struct {
|
||||
allocator: ?*mem.Allocator = null,
|
||||
allocator: ?mem.Allocator = null,
|
||||
};
|
||||
|
||||
/// Verify that a previously computed hash is valid for a given password.
|
||||
|
||||
@ -363,7 +363,7 @@ pub fn main() !void {
|
||||
|
||||
var buffer: [1024]u8 = undefined;
|
||||
var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
|
||||
const args = try std.process.argsAlloc(&fixed.allocator);
|
||||
const args = try std.process.argsAlloc(fixed.getAllocator());
|
||||
|
||||
var filter: ?[]u8 = "";
|
||||
|
||||
|
||||
@ -161,7 +161,7 @@ pub const Params = struct {
|
||||
///
|
||||
/// scrypt is defined in RFC 7914.
|
||||
///
|
||||
/// allocator: *mem.Allocator.
|
||||
/// allocator: mem.Allocator.
|
||||
///
|
||||
/// derived_key: Slice of appropriate size for generated key. Generally 16 or 32 bytes in length.
|
||||
/// May be uninitialized. All bytes will be overwritten.
|
||||
@ -173,7 +173,7 @@ pub const Params = struct {
|
||||
///
|
||||
/// params: Params.
|
||||
pub fn kdf(
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
derived_key: []u8,
|
||||
password: []const u8,
|
||||
salt: []const u8,
|
||||
@ -406,7 +406,7 @@ const PhcFormatHasher = struct {
|
||||
|
||||
/// Return a non-deterministic hash of the password encoded as a PHC-format string
|
||||
pub fn create(
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
password: []const u8,
|
||||
params: Params,
|
||||
buf: []u8,
|
||||
@ -429,7 +429,7 @@ const PhcFormatHasher = struct {
|
||||
|
||||
/// Verify a password against a PHC-format encoded string
|
||||
pub fn verify(
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
str: []const u8,
|
||||
password: []const u8,
|
||||
) HasherError!void {
|
||||
@ -455,7 +455,7 @@ const CryptFormatHasher = struct {
|
||||
|
||||
/// Return a non-deterministic hash of the password encoded into the modular crypt format
|
||||
pub fn create(
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
password: []const u8,
|
||||
params: Params,
|
||||
buf: []u8,
|
||||
@ -478,7 +478,7 @@ const CryptFormatHasher = struct {
|
||||
|
||||
/// Verify a password against a string in modular crypt format
|
||||
pub fn verify(
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
str: []const u8,
|
||||
password: []const u8,
|
||||
) HasherError!void {
|
||||
@ -497,7 +497,7 @@ const CryptFormatHasher = struct {
|
||||
///
|
||||
/// Allocator is required for scrypt.
|
||||
pub const HashOptions = struct {
|
||||
allocator: ?*mem.Allocator,
|
||||
allocator: ?mem.Allocator,
|
||||
params: Params,
|
||||
encoding: pwhash.Encoding,
|
||||
};
|
||||
@ -520,7 +520,7 @@ pub fn strHash(
|
||||
///
|
||||
/// Allocator is required for scrypt.
|
||||
pub const VerifyOptions = struct {
|
||||
allocator: ?*mem.Allocator,
|
||||
allocator: ?mem.Allocator,
|
||||
};
|
||||
|
||||
/// Verify that a previously computed hash is valid for a given password.
|
||||
|
||||
@ -33,7 +33,7 @@ fn testCStrFnsImpl() !void {
|
||||
|
||||
/// Returns a mutable, null-terminated slice with the same length as `slice`.
|
||||
/// Caller owns the returned memory.
|
||||
pub fn addNullByte(allocator: *mem.Allocator, slice: []const u8) ![:0]u8 {
|
||||
pub fn addNullByte(allocator: mem.Allocator, slice: []const u8) ![:0]u8 {
|
||||
const result = try allocator.alloc(u8, slice.len + 1);
|
||||
mem.copy(u8, result, slice);
|
||||
result[slice.len] = 0;
|
||||
@ -48,13 +48,13 @@ test "addNullByte" {
|
||||
}
|
||||
|
||||
pub const NullTerminated2DArray = struct {
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
byte_count: usize,
|
||||
ptr: ?[*:null]?[*:0]u8,
|
||||
|
||||
/// Takes N lists of strings, concatenates the lists together, and adds a null terminator
|
||||
/// Caller must deinit result
|
||||
pub fn fromSlices(allocator: *mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
|
||||
pub fn fromSlices(allocator: mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
|
||||
var new_len: usize = 1; // 1 for the list null
|
||||
var byte_count: usize = 0;
|
||||
for (slices) |slice| {
|
||||
|
||||
@ -29,7 +29,7 @@ pub const LineInfo = struct {
|
||||
line: u64,
|
||||
column: u64,
|
||||
file_name: []const u8,
|
||||
allocator: ?*mem.Allocator,
|
||||
allocator: ?mem.Allocator,
|
||||
|
||||
pub fn deinit(self: LineInfo) void {
|
||||
const allocator = self.allocator orelse return;
|
||||
@ -339,7 +339,7 @@ const RESET = "\x1b[0m";
|
||||
pub fn writeStackTrace(
|
||||
stack_trace: std.builtin.StackTrace,
|
||||
out_stream: anytype,
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
debug_info: *DebugInfo,
|
||||
tty_config: TTY.Config,
|
||||
) !void {
|
||||
@ -662,7 +662,7 @@ pub const OpenSelfDebugInfoError = error{
|
||||
};
|
||||
|
||||
/// TODO resources https://github.com/ziglang/zig/issues/4353
|
||||
pub fn openSelfDebugInfo(allocator: *mem.Allocator) anyerror!DebugInfo {
|
||||
pub fn openSelfDebugInfo(allocator: mem.Allocator) anyerror!DebugInfo {
|
||||
nosuspend {
|
||||
if (builtin.strip_debug_info)
|
||||
return error.MissingDebugInfo;
|
||||
@ -688,7 +688,7 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) anyerror!DebugInfo {
|
||||
/// it themselves, even on error.
|
||||
/// TODO resources https://github.com/ziglang/zig/issues/4353
|
||||
/// TODO it's weird to take ownership even on error, rework this code.
|
||||
fn readCoffDebugInfo(allocator: *mem.Allocator, coff_file: File) !ModuleDebugInfo {
|
||||
fn readCoffDebugInfo(allocator: mem.Allocator, coff_file: File) !ModuleDebugInfo {
|
||||
nosuspend {
|
||||
errdefer coff_file.close();
|
||||
|
||||
@ -755,7 +755,7 @@ fn chopSlice(ptr: []const u8, offset: u64, size: u64) ![]const u8 {
|
||||
/// it themselves, even on error.
|
||||
/// TODO resources https://github.com/ziglang/zig/issues/4353
|
||||
/// TODO it's weird to take ownership even on error, rework this code.
|
||||
pub fn readElfDebugInfo(allocator: *mem.Allocator, elf_file: File) !ModuleDebugInfo {
|
||||
pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugInfo {
|
||||
nosuspend {
|
||||
const mapped_mem = try mapWholeFile(elf_file);
|
||||
const hdr = @ptrCast(*const elf.Ehdr, &mapped_mem[0]);
|
||||
@ -827,7 +827,7 @@ pub fn readElfDebugInfo(allocator: *mem.Allocator, elf_file: File) !ModuleDebugI
|
||||
/// This takes ownership of macho_file: users of this function should not close
|
||||
/// it themselves, even on error.
|
||||
/// TODO it's weird to take ownership even on error, rework this code.
|
||||
fn readMachODebugInfo(allocator: *mem.Allocator, macho_file: File) !ModuleDebugInfo {
|
||||
fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugInfo {
|
||||
const mapped_mem = try mapWholeFile(macho_file);
|
||||
|
||||
const hdr = @ptrCast(
|
||||
@ -1025,10 +1025,10 @@ fn mapWholeFile(file: File) ![]align(mem.page_size) const u8 {
|
||||
}
|
||||
|
||||
pub const DebugInfo = struct {
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
address_map: std.AutoHashMap(usize, *ModuleDebugInfo),
|
||||
|
||||
pub fn init(allocator: *mem.Allocator) DebugInfo {
|
||||
pub fn init(allocator: mem.Allocator) DebugInfo {
|
||||
return DebugInfo{
|
||||
.allocator = allocator,
|
||||
.address_map = std.AutoHashMap(usize, *ModuleDebugInfo).init(allocator),
|
||||
@ -1278,7 +1278,7 @@ pub const ModuleDebugInfo = switch (native_os) {
|
||||
addr_table: std.StringHashMap(u64),
|
||||
};
|
||||
|
||||
pub fn allocator(self: @This()) *mem.Allocator {
|
||||
pub fn allocator(self: @This()) mem.Allocator {
|
||||
return self.ofiles.allocator;
|
||||
}
|
||||
|
||||
@ -1470,7 +1470,7 @@ pub const ModuleDebugInfo = switch (native_os) {
|
||||
debug_data: PdbOrDwarf,
|
||||
coff: *coff.Coff,
|
||||
|
||||
pub fn allocator(self: @This()) *mem.Allocator {
|
||||
pub fn allocator(self: @This()) mem.Allocator {
|
||||
return self.coff.allocator;
|
||||
}
|
||||
|
||||
@ -1560,14 +1560,15 @@ fn getSymbolFromDwarf(address: u64, di: *DW.DwarfInfo) !SymbolInfo {
|
||||
}
|
||||
|
||||
/// TODO multithreaded awareness
|
||||
var debug_info_allocator: ?*mem.Allocator = null;
|
||||
var debug_info_allocator: ?mem.Allocator = null;
|
||||
var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined;
|
||||
fn getDebugInfoAllocator() *mem.Allocator {
|
||||
fn getDebugInfoAllocator() mem.Allocator {
|
||||
if (debug_info_allocator) |a| return a;
|
||||
|
||||
debug_info_arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
debug_info_allocator = &debug_info_arena_allocator.allocator;
|
||||
return &debug_info_arena_allocator.allocator;
|
||||
const allocator = debug_info_arena_allocator.getAllocator();
|
||||
debug_info_allocator = allocator;
|
||||
return allocator;
|
||||
}
|
||||
|
||||
/// Whether or not the current target can print useful debug information when a segfault occurs.
|
||||
|
||||
@ -466,7 +466,7 @@ fn readUnitLength(in_stream: anytype, endian: std.builtin.Endian, is_64: *bool)
|
||||
}
|
||||
|
||||
// TODO the nosuspends here are workarounds
|
||||
fn readAllocBytes(allocator: *mem.Allocator, in_stream: anytype, size: usize) ![]u8 {
|
||||
fn readAllocBytes(allocator: mem.Allocator, in_stream: anytype, size: usize) ![]u8 {
|
||||
const buf = try allocator.alloc(u8, size);
|
||||
errdefer allocator.free(buf);
|
||||
if ((try nosuspend in_stream.read(buf)) < size) return error.EndOfFile;
|
||||
@ -481,18 +481,18 @@ fn readAddress(in_stream: anytype, endian: std.builtin.Endian, is_64: bool) !u64
|
||||
@as(u64, try in_stream.readInt(u32, endian));
|
||||
}
|
||||
|
||||
fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: anytype, size: usize) !FormValue {
|
||||
fn parseFormValueBlockLen(allocator: mem.Allocator, in_stream: anytype, size: usize) !FormValue {
|
||||
const buf = try readAllocBytes(allocator, in_stream, size);
|
||||
return FormValue{ .Block = buf };
|
||||
}
|
||||
|
||||
// TODO the nosuspends here are workarounds
|
||||
fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: usize) !FormValue {
|
||||
fn parseFormValueBlock(allocator: mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: usize) !FormValue {
|
||||
const block_len = try nosuspend in_stream.readVarInt(usize, endian, size);
|
||||
return parseFormValueBlockLen(allocator, in_stream, block_len);
|
||||
}
|
||||
|
||||
fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed: bool, endian: std.builtin.Endian, comptime size: i32) !FormValue {
|
||||
fn parseFormValueConstant(allocator: mem.Allocator, in_stream: anytype, signed: bool, endian: std.builtin.Endian, comptime size: i32) !FormValue {
|
||||
_ = allocator;
|
||||
// TODO: Please forgive me, I've worked around zig not properly spilling some intermediate values here.
|
||||
// `nosuspend` should be removed from all the function calls once it is fixed.
|
||||
@ -520,7 +520,7 @@ fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed:
|
||||
}
|
||||
|
||||
// TODO the nosuspends here are workarounds
|
||||
fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: i32) !FormValue {
|
||||
fn parseFormValueRef(allocator: mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: i32) !FormValue {
|
||||
_ = allocator;
|
||||
return FormValue{
|
||||
.Ref = switch (size) {
|
||||
@ -535,7 +535,7 @@ fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: std.
|
||||
}
|
||||
|
||||
// TODO the nosuspends here are workarounds
|
||||
fn parseFormValue(allocator: *mem.Allocator, in_stream: anytype, form_id: u64, endian: std.builtin.Endian, is_64: bool) anyerror!FormValue {
|
||||
fn parseFormValue(allocator: mem.Allocator, in_stream: anytype, form_id: u64, endian: std.builtin.Endian, is_64: bool) anyerror!FormValue {
|
||||
return switch (form_id) {
|
||||
FORM.addr => FormValue{ .Address = try readAddress(in_stream, endian, @sizeOf(usize) == 8) },
|
||||
FORM.block1 => parseFormValueBlock(allocator, in_stream, endian, 1),
|
||||
@ -604,7 +604,7 @@ pub const DwarfInfo = struct {
|
||||
compile_unit_list: ArrayList(CompileUnit) = undefined,
|
||||
func_list: ArrayList(Func) = undefined,
|
||||
|
||||
pub fn allocator(self: DwarfInfo) *mem.Allocator {
|
||||
pub fn allocator(self: DwarfInfo) mem.Allocator {
|
||||
return self.abbrev_table_list.allocator;
|
||||
}
|
||||
|
||||
@ -1092,7 +1092,7 @@ pub const DwarfInfo = struct {
|
||||
/// the DwarfInfo fields before calling. These fields can be left undefined:
|
||||
/// * abbrev_table_list
|
||||
/// * compile_unit_list
|
||||
pub fn openDwarfDebugInfo(di: *DwarfInfo, allocator: *mem.Allocator) !void {
|
||||
pub fn openDwarfDebugInfo(di: *DwarfInfo, allocator: mem.Allocator) !void {
|
||||
di.abbrev_table_list = ArrayList(AbbrevTableHeader).init(allocator);
|
||||
di.compile_unit_list = ArrayList(CompileUnit).init(allocator);
|
||||
di.func_list = ArrayList(Func).init(allocator);
|
||||
|
||||
@ -15,7 +15,7 @@ pub fn Group(comptime ReturnType: type) type {
|
||||
frame_stack: Stack,
|
||||
alloc_stack: AllocStack,
|
||||
lock: Lock,
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
@ -31,7 +31,7 @@ pub fn Group(comptime ReturnType: type) type {
|
||||
handle: anyframe->ReturnType,
|
||||
};
|
||||
|
||||
pub fn init(allocator: *Allocator) Self {
|
||||
pub fn init(allocator: Allocator) Self {
|
||||
return Self{
|
||||
.frame_stack = Stack.init(),
|
||||
.alloc_stack = AllocStack.init(),
|
||||
@ -127,7 +127,7 @@ test "std.event.Group" {
|
||||
|
||||
_ = async testGroup(std.heap.page_allocator);
|
||||
}
|
||||
fn testGroup(allocator: *Allocator) callconv(.Async) void {
|
||||
fn testGroup(allocator: Allocator) callconv(.Async) void {
|
||||
var count: usize = 0;
|
||||
var group = Group(void).init(allocator);
|
||||
var sleep_a_little_frame = async sleepALittle(&count);
|
||||
|
||||
@ -727,7 +727,7 @@ pub const Loop = struct {
|
||||
/// with `allocator` and freed when the function returns.
|
||||
/// `func` must return void and it can be an async function.
|
||||
/// Yields to the event loop, running the function on the next tick.
|
||||
pub fn runDetached(self: *Loop, alloc: *mem.Allocator, comptime func: anytype, args: anytype) error{OutOfMemory}!void {
|
||||
pub fn runDetached(self: *Loop, alloc: mem.Allocator, comptime func: anytype, args: anytype) error{OutOfMemory}!void {
|
||||
if (!std.io.is_async) @compileError("Can't use runDetached in non-async mode!");
|
||||
if (@TypeOf(@call(.{}, func, args)) != void) {
|
||||
@compileError("`func` must not have a return value");
|
||||
@ -735,7 +735,7 @@ pub const Loop = struct {
|
||||
|
||||
const Wrapper = struct {
|
||||
const Args = @TypeOf(args);
|
||||
fn run(func_args: Args, loop: *Loop, allocator: *mem.Allocator) void {
|
||||
fn run(func_args: Args, loop: *Loop, allocator: mem.Allocator) void {
|
||||
loop.beginOneEvent();
|
||||
loop.yield();
|
||||
@call(.{}, func, func_args); // compile error when called with non-void ret type
|
||||
|
||||
@ -226,7 +226,7 @@ test "std.event.RwLock" {
|
||||
const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len;
|
||||
try testing.expectEqualSlices(i32, expected_result, shared_test_data);
|
||||
}
|
||||
fn testLock(allocator: *Allocator, lock: *RwLock) callconv(.Async) void {
|
||||
fn testLock(allocator: Allocator, lock: *RwLock) callconv(.Async) void {
|
||||
var read_nodes: [100]Loop.NextTickNode = undefined;
|
||||
for (read_nodes) |*read_node| {
|
||||
const frame = allocator.create(@Frame(readRunner)) catch @panic("memory");
|
||||
|
||||
@ -33,7 +33,7 @@ pub fn LinearFifo(
|
||||
};
|
||||
|
||||
return struct {
|
||||
allocator: if (buffer_type == .Dynamic) *Allocator else void,
|
||||
allocator: if (buffer_type == .Dynamic) Allocator else void,
|
||||
buf: if (buffer_type == .Static) [buffer_type.Static]T else []T,
|
||||
head: usize,
|
||||
count: usize,
|
||||
@ -69,7 +69,7 @@ pub fn LinearFifo(
|
||||
}
|
||||
},
|
||||
.Dynamic => struct {
|
||||
pub fn init(allocator: *Allocator) Self {
|
||||
pub fn init(allocator: Allocator) Self {
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
.buf = &[_]T{},
|
||||
|
||||
@ -1803,7 +1803,7 @@ pub fn count(comptime fmt: []const u8, args: anytype) u64 {
|
||||
|
||||
pub const AllocPrintError = error{OutOfMemory};
|
||||
|
||||
pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![]u8 {
|
||||
pub fn allocPrint(allocator: mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![]u8 {
|
||||
const size = math.cast(usize, count(fmt, args)) catch |err| switch (err) {
|
||||
// Output too long. Can't possibly allocate enough memory to display it.
|
||||
error.Overflow => return error.OutOfMemory,
|
||||
@ -1816,7 +1816,7 @@ pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: any
|
||||
|
||||
pub const allocPrint0 = @compileError("deprecated; use allocPrintZ");
|
||||
|
||||
pub fn allocPrintZ(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![:0]u8 {
|
||||
pub fn allocPrintZ(allocator: mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![:0]u8 {
|
||||
const result = try allocPrint(allocator, fmt ++ "\x00", args);
|
||||
return result[0 .. result.len - 1 :0];
|
||||
}
|
||||
|
||||
@ -64,7 +64,7 @@ pub const need_async_thread = std.io.is_async and switch (builtin.os.tag) {
|
||||
};
|
||||
|
||||
/// TODO remove the allocator requirement from this API
|
||||
pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
|
||||
pub fn atomicSymLink(allocator: Allocator, existing_path: []const u8, new_path: []const u8) !void {
|
||||
if (cwd().symLink(existing_path, new_path, .{})) {
|
||||
return;
|
||||
} else |err| switch (err) {
|
||||
@ -875,7 +875,7 @@ pub const Dir = struct {
|
||||
/// Must call `Walker.deinit` when done.
|
||||
/// The order of returned file system entries is undefined.
|
||||
/// `self` will not be closed after walking it.
|
||||
pub fn walk(self: Dir, allocator: *Allocator) !Walker {
|
||||
pub fn walk(self: Dir, allocator: Allocator) !Walker {
|
||||
var name_buffer = std.ArrayList(u8).init(allocator);
|
||||
errdefer name_buffer.deinit();
|
||||
|
||||
@ -1393,7 +1393,7 @@ pub const Dir = struct {
|
||||
|
||||
/// Same as `Dir.realpath` except caller must free the returned memory.
|
||||
/// See also `Dir.realpath`.
|
||||
pub fn realpathAlloc(self: Dir, allocator: *Allocator, pathname: []const u8) ![]u8 {
|
||||
pub fn realpathAlloc(self: Dir, allocator: Allocator, pathname: []const u8) ![]u8 {
|
||||
// Use of MAX_PATH_BYTES here is valid as the realpath function does not
|
||||
// have a variant that takes an arbitrary-size buffer.
|
||||
// TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008
|
||||
@ -1804,7 +1804,7 @@ pub const Dir = struct {
|
||||
|
||||
/// On success, caller owns returned buffer.
|
||||
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
|
||||
pub fn readFileAlloc(self: Dir, allocator: *mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 {
|
||||
pub fn readFileAlloc(self: Dir, allocator: mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 {
|
||||
return self.readFileAllocOptions(allocator, file_path, max_bytes, null, @alignOf(u8), null);
|
||||
}
|
||||
|
||||
@ -1815,7 +1815,7 @@ pub const Dir = struct {
|
||||
/// Allows specifying alignment and a sentinel value.
|
||||
pub fn readFileAllocOptions(
|
||||
self: Dir,
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
file_path: []const u8,
|
||||
max_bytes: usize,
|
||||
size_hint: ?usize,
|
||||
@ -2464,7 +2464,7 @@ pub const SelfExePathError = os.ReadLinkError || os.SysCtlError || os.RealPathEr
|
||||
|
||||
/// `selfExePath` except allocates the result on the heap.
|
||||
/// Caller owns returned memory.
|
||||
pub fn selfExePathAlloc(allocator: *Allocator) ![]u8 {
|
||||
pub fn selfExePathAlloc(allocator: Allocator) ![]u8 {
|
||||
// Use of MAX_PATH_BYTES here is justified as, at least on one tested Linux
|
||||
// system, readlink will completely fail to return a result larger than
|
||||
// PATH_MAX even if given a sufficiently large buffer. This makes it
|
||||
@ -2573,7 +2573,7 @@ pub fn selfExePathW() [:0]const u16 {
|
||||
|
||||
/// `selfExeDirPath` except allocates the result on the heap.
|
||||
/// Caller owns returned memory.
|
||||
pub fn selfExeDirPathAlloc(allocator: *Allocator) ![]u8 {
|
||||
pub fn selfExeDirPathAlloc(allocator: Allocator) ![]u8 {
|
||||
// Use of MAX_PATH_BYTES here is justified as, at least on one tested Linux
|
||||
// system, readlink will completely fail to return a result larger than
|
||||
// PATH_MAX even if given a sufficiently large buffer. This makes it
|
||||
@ -2596,7 +2596,7 @@ pub fn selfExeDirPath(out_buffer: []u8) SelfExePathError![]const u8 {
|
||||
|
||||
/// `realpath`, except caller must free the returned memory.
|
||||
/// See also `Dir.realpath`.
|
||||
pub fn realpathAlloc(allocator: *Allocator, pathname: []const u8) ![]u8 {
|
||||
pub fn realpathAlloc(allocator: Allocator, pathname: []const u8) ![]u8 {
|
||||
// Use of MAX_PATH_BYTES here is valid as the realpath function does not
|
||||
// have a variant that takes an arbitrary-size buffer.
|
||||
// TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008
|
||||
|
||||
@ -420,7 +420,7 @@ pub const File = struct {
|
||||
/// Reads all the bytes from the current position to the end of the file.
|
||||
/// On success, caller owns returned buffer.
|
||||
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
|
||||
pub fn readToEndAlloc(self: File, allocator: *mem.Allocator, max_bytes: usize) ![]u8 {
|
||||
pub fn readToEndAlloc(self: File, allocator: mem.Allocator, max_bytes: usize) ![]u8 {
|
||||
return self.readToEndAllocOptions(allocator, max_bytes, null, @alignOf(u8), null);
|
||||
}
|
||||
|
||||
@ -432,7 +432,7 @@ pub const File = struct {
|
||||
/// Allows specifying alignment and a sentinel value.
|
||||
pub fn readToEndAllocOptions(
|
||||
self: File,
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
max_bytes: usize,
|
||||
size_hint: ?usize,
|
||||
comptime alignment: u29,
|
||||
|
||||
@ -12,7 +12,7 @@ pub const GetAppDataDirError = error{
|
||||
|
||||
/// Caller owns returned memory.
|
||||
/// TODO determine if we can remove the allocator requirement
|
||||
pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataDirError![]u8 {
|
||||
pub fn getAppDataDir(allocator: mem.Allocator, appname: []const u8) GetAppDataDirError![]u8 {
|
||||
switch (builtin.os.tag) {
|
||||
.windows => {
|
||||
var dir_path_ptr: [*:0]u16 = undefined;
|
||||
|
||||
@ -35,7 +35,7 @@ pub fn isSep(byte: u8) bool {
|
||||
|
||||
/// This is different from mem.join in that the separator will not be repeated if
|
||||
/// it is found at the end or beginning of a pair of consecutive paths.
|
||||
fn joinSepMaybeZ(allocator: *Allocator, separator: u8, sepPredicate: fn (u8) bool, paths: []const []const u8, zero: bool) ![]u8 {
|
||||
fn joinSepMaybeZ(allocator: Allocator, separator: u8, sepPredicate: fn (u8) bool, paths: []const []const u8, zero: bool) ![]u8 {
|
||||
if (paths.len == 0) return if (zero) try allocator.dupe(u8, &[1]u8{0}) else &[0]u8{};
|
||||
|
||||
// Find first non-empty path index.
|
||||
@ -99,13 +99,13 @@ fn joinSepMaybeZ(allocator: *Allocator, separator: u8, sepPredicate: fn (u8) boo
|
||||
|
||||
/// Naively combines a series of paths with the native path seperator.
|
||||
/// Allocates memory for the result, which must be freed by the caller.
|
||||
pub fn join(allocator: *Allocator, paths: []const []const u8) ![]u8 {
|
||||
pub fn join(allocator: Allocator, paths: []const []const u8) ![]u8 {
|
||||
return joinSepMaybeZ(allocator, sep, isSep, paths, false);
|
||||
}
|
||||
|
||||
/// Naively combines a series of paths with the native path seperator and null terminator.
|
||||
/// Allocates memory for the result, which must be freed by the caller.
|
||||
pub fn joinZ(allocator: *Allocator, paths: []const []const u8) ![:0]u8 {
|
||||
pub fn joinZ(allocator: Allocator, paths: []const []const u8) ![:0]u8 {
|
||||
const out = try joinSepMaybeZ(allocator, sep, isSep, paths, true);
|
||||
return out[0 .. out.len - 1 :0];
|
||||
}
|
||||
@ -445,7 +445,7 @@ fn asciiEqlIgnoreCase(s1: []const u8, s2: []const u8) bool {
|
||||
}
|
||||
|
||||
/// On Windows, this calls `resolveWindows` and on POSIX it calls `resolvePosix`.
|
||||
pub fn resolve(allocator: *Allocator, paths: []const []const u8) ![]u8 {
|
||||
pub fn resolve(allocator: Allocator, paths: []const []const u8) ![]u8 {
|
||||
if (native_os == .windows) {
|
||||
return resolveWindows(allocator, paths);
|
||||
} else {
|
||||
@ -461,7 +461,7 @@ pub fn resolve(allocator: *Allocator, paths: []const []const u8) ![]u8 {
|
||||
/// Path separators are canonicalized to '\\' and drives are canonicalized to capital letters.
|
||||
/// Note: all usage of this function should be audited due to the existence of symlinks.
|
||||
/// Without performing actual syscalls, resolving `..` could be incorrect.
|
||||
pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
|
||||
pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
|
||||
if (paths.len == 0) {
|
||||
assert(native_os == .windows); // resolveWindows called on non windows can't use getCwd
|
||||
return process.getCwdAlloc(allocator);
|
||||
@ -647,7 +647,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
|
||||
/// If all paths are relative it uses the current working directory as a starting point.
|
||||
/// Note: all usage of this function should be audited due to the existence of symlinks.
|
||||
/// Without performing actual syscalls, resolving `..` could be incorrect.
|
||||
pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
|
||||
pub fn resolvePosix(allocator: Allocator, paths: []const []const u8) ![]u8 {
|
||||
if (paths.len == 0) {
|
||||
assert(native_os != .windows); // resolvePosix called on windows can't use getCwd
|
||||
return process.getCwdAlloc(allocator);
|
||||
@ -1058,7 +1058,7 @@ fn testBasenameWindows(input: []const u8, expected_output: []const u8) !void {
|
||||
/// resolve to the same path (after calling `resolve` on each), a zero-length
|
||||
/// string is returned.
|
||||
/// On Windows this canonicalizes the drive to a capital letter and paths to `\\`.
|
||||
pub fn relative(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
|
||||
pub fn relative(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 {
|
||||
if (native_os == .windows) {
|
||||
return relativeWindows(allocator, from, to);
|
||||
} else {
|
||||
@ -1066,7 +1066,7 @@ pub fn relative(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
|
||||
pub fn relativeWindows(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 {
|
||||
const resolved_from = try resolveWindows(allocator, &[_][]const u8{from});
|
||||
defer allocator.free(resolved_from);
|
||||
|
||||
@ -1139,7 +1139,7 @@ pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8)
|
||||
return [_]u8{};
|
||||
}
|
||||
|
||||
pub fn relativePosix(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
|
||||
pub fn relativePosix(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 {
|
||||
const resolved_from = try resolvePosix(allocator, &[_][]const u8{from});
|
||||
defer allocator.free(resolved_from);
|
||||
|
||||
|
||||
@ -52,9 +52,11 @@ test "accessAbsolute" {
|
||||
|
||||
var arena = ArenaAllocator.init(testing.allocator);
|
||||
defer arena.deinit();
|
||||
const allocator = arena.getAllocator();
|
||||
|
||||
const base_path = blk: {
|
||||
const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
|
||||
break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
|
||||
const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
|
||||
break :blk try fs.realpathAlloc(allocator, relative_path);
|
||||
};
|
||||
|
||||
try fs.accessAbsolute(base_path, .{});
|
||||
@ -69,9 +71,11 @@ test "openDirAbsolute" {
|
||||
try tmp.dir.makeDir("subdir");
|
||||
var arena = ArenaAllocator.init(testing.allocator);
|
||||
defer arena.deinit();
|
||||
const allocator = arena.getAllocator();
|
||||
|
||||
const base_path = blk: {
|
||||
const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..], "subdir" });
|
||||
break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
|
||||
const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..], "subdir" });
|
||||
break :blk try fs.realpathAlloc(allocator, relative_path);
|
||||
};
|
||||
|
||||
{
|
||||
@ -80,8 +84,8 @@ test "openDirAbsolute" {
|
||||
}
|
||||
|
||||
for ([_][]const u8{ ".", ".." }) |sub_path| {
|
||||
const dir_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, sub_path });
|
||||
defer arena.allocator.free(dir_path);
|
||||
const dir_path = try fs.path.join(allocator, &[_][]const u8{ base_path, sub_path });
|
||||
defer allocator.free(dir_path);
|
||||
var dir = try fs.openDirAbsolute(dir_path, .{});
|
||||
defer dir.close();
|
||||
}
|
||||
@ -107,12 +111,12 @@ test "readLinkAbsolute" {
|
||||
// Get base abs path
|
||||
var arena = ArenaAllocator.init(testing.allocator);
|
||||
defer arena.deinit();
|
||||
const allocator = arena.getAllocator();
|
||||
|
||||
const base_path = blk: {
|
||||
const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
|
||||
break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
|
||||
const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
|
||||
break :blk try fs.realpathAlloc(allocator, relative_path);
|
||||
};
|
||||
const allocator = &arena.allocator;
|
||||
|
||||
{
|
||||
const target_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "file.txt" });
|
||||
@ -158,15 +162,16 @@ test "Dir.Iterator" {
|
||||
|
||||
var arena = ArenaAllocator.init(testing.allocator);
|
||||
defer arena.deinit();
|
||||
const allocator = arena.getAllocator();
|
||||
|
||||
var entries = std.ArrayList(Dir.Entry).init(&arena.allocator);
|
||||
var entries = std.ArrayList(Dir.Entry).init(allocator);
|
||||
|
||||
// Create iterator.
|
||||
var iter = tmp_dir.dir.iterate();
|
||||
while (try iter.next()) |entry| {
|
||||
// We cannot just store `entry` as on Windows, we're re-using the name buffer
|
||||
// which means we'll actually share the `name` pointer between entries!
|
||||
const name = try arena.allocator.dupe(u8, entry.name);
|
||||
const name = try allocator.dupe(u8, entry.name);
|
||||
try entries.append(Dir.Entry{ .name = name, .kind = entry.kind });
|
||||
}
|
||||
|
||||
@ -202,25 +207,26 @@ test "Dir.realpath smoke test" {
|
||||
|
||||
var arena = ArenaAllocator.init(testing.allocator);
|
||||
defer arena.deinit();
|
||||
const allocator = arena.getAllocator();
|
||||
|
||||
const base_path = blk: {
|
||||
const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
|
||||
break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
|
||||
const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
|
||||
break :blk try fs.realpathAlloc(allocator, relative_path);
|
||||
};
|
||||
|
||||
// First, test non-alloc version
|
||||
{
|
||||
var buf1: [fs.MAX_PATH_BYTES]u8 = undefined;
|
||||
const file_path = try tmp_dir.dir.realpath("test_file", buf1[0..]);
|
||||
const expected_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "test_file" });
|
||||
const expected_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "test_file" });
|
||||
|
||||
try testing.expect(mem.eql(u8, file_path, expected_path));
|
||||
}
|
||||
|
||||
// Next, test alloc version
|
||||
{
|
||||
const file_path = try tmp_dir.dir.realpathAlloc(&arena.allocator, "test_file");
|
||||
const expected_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "test_file" });
|
||||
const file_path = try tmp_dir.dir.realpathAlloc(allocator, "test_file");
|
||||
const expected_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "test_file" });
|
||||
|
||||
try testing.expect(mem.eql(u8, file_path, expected_path));
|
||||
}
|
||||
@ -476,11 +482,11 @@ test "renameAbsolute" {
|
||||
// Get base abs path
|
||||
var arena = ArenaAllocator.init(testing.allocator);
|
||||
defer arena.deinit();
|
||||
const allocator = &arena.allocator;
|
||||
const allocator = arena.getAllocator();
|
||||
|
||||
const base_path = blk: {
|
||||
const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
|
||||
break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
|
||||
const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
|
||||
break :blk try fs.realpathAlloc(allocator, relative_path);
|
||||
};
|
||||
|
||||
try testing.expectError(error.FileNotFound, fs.renameAbsolute(
|
||||
@ -987,11 +993,11 @@ test ". and .. in absolute functions" {
|
||||
|
||||
var arena = ArenaAllocator.init(testing.allocator);
|
||||
defer arena.deinit();
|
||||
const allocator = &arena.allocator;
|
||||
const allocator = arena.getAllocator();
|
||||
|
||||
const base_path = blk: {
|
||||
const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
|
||||
break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
|
||||
const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
|
||||
break :blk try fs.realpathAlloc(allocator, relative_path);
|
||||
};
|
||||
|
||||
const subdir_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "./subdir" });
|
||||
|
||||
@ -80,7 +80,7 @@ pub const PreopenList = struct {
|
||||
pub const Error = error{ OutOfMemory, Overflow } || os.UnexpectedError;
|
||||
|
||||
/// Deinitialize with `deinit`.
|
||||
pub fn init(allocator: *Allocator) Self {
|
||||
pub fn init(allocator: Allocator) Self {
|
||||
return Self{ .buffer = InnerList.init(allocator) };
|
||||
}
|
||||
|
||||
|
||||
@ -30,7 +30,7 @@ pub fn Watch(comptime V: type) type {
|
||||
return struct {
|
||||
channel: event.Channel(Event.Error!Event),
|
||||
os_data: OsData,
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
|
||||
const OsData = switch (builtin.os.tag) {
|
||||
// TODO https://github.com/ziglang/zig/issues/3778
|
||||
@ -96,7 +96,7 @@ pub fn Watch(comptime V: type) type {
|
||||
pub const Error = WatchEventError;
|
||||
};
|
||||
|
||||
pub fn init(allocator: *Allocator, event_buf_count: usize) !*Self {
|
||||
pub fn init(allocator: Allocator, event_buf_count: usize) !*Self {
|
||||
const self = try allocator.create(Self);
|
||||
errdefer allocator.destroy(self);
|
||||
|
||||
@ -648,7 +648,7 @@ test "write a file, watch it, write it again, delete it" {
|
||||
return testWriteWatchWriteDelete(std.testing.allocator);
|
||||
}
|
||||
|
||||
fn testWriteWatchWriteDelete(allocator: *Allocator) !void {
|
||||
fn testWriteWatchWriteDelete(allocator: Allocator) !void {
|
||||
const file_path = try std.fs.path.join(allocator, &[_][]const u8{ test_tmp_dir, "file.txt" });
|
||||
defer allocator.free(file_path);
|
||||
|
||||
|
||||
@ -309,7 +309,7 @@ test "hash struct deep" {
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(allocator: *mem.Allocator, a_: u32, b_: u16, c_: bool) !Self {
|
||||
pub fn init(allocator: mem.Allocator, a_: u32, b_: u16, c_: bool) !Self {
|
||||
const ptr = try allocator.create(bool);
|
||||
ptr.* = c_;
|
||||
return Self{ .a = a_, .b = b_, .c = ptr };
|
||||
|
||||
@ -165,7 +165,7 @@ pub fn main() !void {
|
||||
|
||||
var buffer: [1024]u8 = undefined;
|
||||
var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
|
||||
const args = try std.process.argsAlloc(&fixed.allocator);
|
||||
const args = try std.process.argsAlloc(fixed.getAllocator());
|
||||
|
||||
var filter: ?[]u8 = "";
|
||||
var count: usize = mode(128 * MiB);
|
||||
|
||||
@ -363,7 +363,7 @@ pub fn HashMap(
|
||||
comptime verifyContext(Context, K, K, u64);
|
||||
return struct {
|
||||
unmanaged: Unmanaged,
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
ctx: Context,
|
||||
|
||||
/// The type of the unmanaged hash map underlying this wrapper
|
||||
@ -390,7 +390,7 @@ pub fn HashMap(
|
||||
/// Create a managed hash map with an empty context.
|
||||
/// If the context is not zero-sized, you must use
|
||||
/// initContext(allocator, ctx) instead.
|
||||
pub fn init(allocator: *Allocator) Self {
|
||||
pub fn init(allocator: Allocator) Self {
|
||||
if (@sizeOf(Context) != 0) {
|
||||
@compileError("Context must be specified! Call initContext(allocator, ctx) instead.");
|
||||
}
|
||||
@ -402,7 +402,7 @@ pub fn HashMap(
|
||||
}
|
||||
|
||||
/// Create a managed hash map with a context
|
||||
pub fn initContext(allocator: *Allocator, ctx: Context) Self {
|
||||
pub fn initContext(allocator: Allocator, ctx: Context) Self {
|
||||
return .{
|
||||
.unmanaged = .{},
|
||||
.allocator = allocator,
|
||||
@ -636,7 +636,7 @@ pub fn HashMap(
|
||||
}
|
||||
|
||||
/// Creates a copy of this map, using a specified allocator
|
||||
pub fn cloneWithAllocator(self: Self, new_allocator: *Allocator) !Self {
|
||||
pub fn cloneWithAllocator(self: Self, new_allocator: Allocator) !Self {
|
||||
var other = try self.unmanaged.cloneContext(new_allocator, self.ctx);
|
||||
return other.promoteContext(new_allocator, self.ctx);
|
||||
}
|
||||
@ -650,7 +650,7 @@ pub fn HashMap(
|
||||
/// Creates a copy of this map, using a specified allocator and context.
|
||||
pub fn cloneWithAllocatorAndContext(
|
||||
self: Self,
|
||||
new_allocator: *Allocator,
|
||||
new_allocator: Allocator,
|
||||
new_ctx: anytype,
|
||||
) !HashMap(K, V, @TypeOf(new_ctx), max_load_percentage) {
|
||||
var other = try self.unmanaged.cloneContext(new_allocator, new_ctx);
|
||||
@ -841,13 +841,13 @@ pub fn HashMapUnmanaged(
|
||||
|
||||
pub const Managed = HashMap(K, V, Context, max_load_percentage);
|
||||
|
||||
pub fn promote(self: Self, allocator: *Allocator) Managed {
|
||||
pub fn promote(self: Self, allocator: Allocator) Managed {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead.");
|
||||
return promoteContext(self, allocator, undefined);
|
||||
}
|
||||
|
||||
pub fn promoteContext(self: Self, allocator: *Allocator, ctx: Context) Managed {
|
||||
pub fn promoteContext(self: Self, allocator: Allocator, ctx: Context) Managed {
|
||||
return .{
|
||||
.unmanaged = self,
|
||||
.allocator = allocator,
|
||||
@ -859,7 +859,7 @@ pub fn HashMapUnmanaged(
|
||||
return size * 100 < max_load_percentage * cap;
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self, allocator: *Allocator) void {
|
||||
pub fn deinit(self: *Self, allocator: Allocator) void {
|
||||
self.deallocate(allocator);
|
||||
self.* = undefined;
|
||||
}
|
||||
@ -872,20 +872,20 @@ pub fn HashMapUnmanaged(
|
||||
|
||||
pub const ensureCapacity = @compileError("deprecated; call `ensureUnusedCapacity` or `ensureTotalCapacity`");
|
||||
|
||||
pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_size: Size) !void {
|
||||
pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_size: Size) !void {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead.");
|
||||
return ensureTotalCapacityContext(self, allocator, new_size, undefined);
|
||||
}
|
||||
pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_size: Size, ctx: Context) !void {
|
||||
pub fn ensureTotalCapacityContext(self: *Self, allocator: Allocator, new_size: Size, ctx: Context) !void {
|
||||
if (new_size > self.size)
|
||||
try self.growIfNeeded(allocator, new_size - self.size, ctx);
|
||||
}
|
||||
|
||||
pub fn ensureUnusedCapacity(self: *Self, allocator: *Allocator, additional_size: Size) !void {
|
||||
pub fn ensureUnusedCapacity(self: *Self, allocator: Allocator, additional_size: Size) !void {
|
||||
return ensureUnusedCapacityContext(self, allocator, additional_size, undefined);
|
||||
}
|
||||
pub fn ensureUnusedCapacityContext(self: *Self, allocator: *Allocator, additional_size: Size, ctx: Context) !void {
|
||||
pub fn ensureUnusedCapacityContext(self: *Self, allocator: Allocator, additional_size: Size, ctx: Context) !void {
|
||||
return ensureTotalCapacityContext(self, allocator, self.count() + additional_size, ctx);
|
||||
}
|
||||
|
||||
@ -897,7 +897,7 @@ pub fn HashMapUnmanaged(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn clearAndFree(self: *Self, allocator: *Allocator) void {
|
||||
pub fn clearAndFree(self: *Self, allocator: Allocator) void {
|
||||
self.deallocate(allocator);
|
||||
self.size = 0;
|
||||
self.available = 0;
|
||||
@ -962,12 +962,12 @@ pub fn HashMapUnmanaged(
|
||||
}
|
||||
|
||||
/// Insert an entry in the map. Assumes it is not already present.
|
||||
pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void {
|
||||
pub fn putNoClobber(self: *Self, allocator: Allocator, key: K, value: V) !void {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putNoClobberContext instead.");
|
||||
return self.putNoClobberContext(allocator, key, value, undefined);
|
||||
}
|
||||
pub fn putNoClobberContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
|
||||
pub fn putNoClobberContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void {
|
||||
assert(!self.containsContext(key, ctx));
|
||||
try self.growIfNeeded(allocator, 1, ctx);
|
||||
|
||||
@ -1021,12 +1021,12 @@ pub fn HashMapUnmanaged(
|
||||
}
|
||||
|
||||
/// Inserts a new `Entry` into the hash map, returning the previous one, if any.
|
||||
pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?KV {
|
||||
pub fn fetchPut(self: *Self, allocator: Allocator, key: K, value: V) !?KV {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutContext instead.");
|
||||
return self.fetchPutContext(allocator, key, value, undefined);
|
||||
}
|
||||
pub fn fetchPutContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !?KV {
|
||||
pub fn fetchPutContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !?KV {
|
||||
const gop = try self.getOrPutContext(allocator, key, ctx);
|
||||
var result: ?KV = null;
|
||||
if (gop.found_existing) {
|
||||
@ -1157,12 +1157,12 @@ pub fn HashMapUnmanaged(
|
||||
}
|
||||
|
||||
/// Insert an entry if the associated key is not already present, otherwise update preexisting value.
|
||||
pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void {
|
||||
pub fn put(self: *Self, allocator: Allocator, key: K, value: V) !void {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putContext instead.");
|
||||
return self.putContext(allocator, key, value, undefined);
|
||||
}
|
||||
pub fn putContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
|
||||
pub fn putContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void {
|
||||
const result = try self.getOrPutContext(allocator, key, ctx);
|
||||
result.value_ptr.* = value;
|
||||
}
|
||||
@ -1231,24 +1231,24 @@ pub fn HashMapUnmanaged(
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult {
|
||||
pub fn getOrPut(self: *Self, allocator: Allocator, key: K) !GetOrPutResult {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead.");
|
||||
return self.getOrPutContext(allocator, key, undefined);
|
||||
}
|
||||
pub fn getOrPutContext(self: *Self, allocator: *Allocator, key: K, ctx: Context) !GetOrPutResult {
|
||||
pub fn getOrPutContext(self: *Self, allocator: Allocator, key: K, ctx: Context) !GetOrPutResult {
|
||||
const gop = try self.getOrPutContextAdapted(allocator, key, ctx, ctx);
|
||||
if (!gop.found_existing) {
|
||||
gop.key_ptr.* = key;
|
||||
}
|
||||
return gop;
|
||||
}
|
||||
pub fn getOrPutAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
|
||||
pub fn getOrPutAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContextAdapted instead.");
|
||||
return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined);
|
||||
}
|
||||
pub fn getOrPutContextAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
|
||||
pub fn getOrPutContextAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
|
||||
self.growIfNeeded(allocator, 1, ctx) catch |err| {
|
||||
// If allocation fails, try to do the lookup anyway.
|
||||
// If we find an existing item, we can return it.
|
||||
@ -1341,12 +1341,12 @@ pub fn HashMapUnmanaged(
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !Entry {
|
||||
pub fn getOrPutValue(self: *Self, allocator: Allocator, key: K, value: V) !Entry {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutValueContext instead.");
|
||||
return self.getOrPutValueContext(allocator, key, value, undefined);
|
||||
}
|
||||
pub fn getOrPutValueContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !Entry {
|
||||
pub fn getOrPutValueContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !Entry {
|
||||
const res = try self.getOrPutAdapted(allocator, key, ctx);
|
||||
if (!res.found_existing) {
|
||||
res.key_ptr.* = key;
|
||||
@ -1403,18 +1403,18 @@ pub fn HashMapUnmanaged(
|
||||
return @truncate(Size, max_load - self.available);
|
||||
}
|
||||
|
||||
fn growIfNeeded(self: *Self, allocator: *Allocator, new_count: Size, ctx: Context) !void {
|
||||
fn growIfNeeded(self: *Self, allocator: Allocator, new_count: Size, ctx: Context) !void {
|
||||
if (new_count > self.available) {
|
||||
try self.grow(allocator, capacityForSize(self.load() + new_count), ctx);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn clone(self: Self, allocator: *Allocator) !Self {
|
||||
pub fn clone(self: Self, allocator: Allocator) !Self {
|
||||
if (@sizeOf(Context) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead.");
|
||||
return self.cloneContext(allocator, @as(Context, undefined));
|
||||
}
|
||||
pub fn cloneContext(self: Self, allocator: *Allocator, new_ctx: anytype) !HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) {
|
||||
pub fn cloneContext(self: Self, allocator: Allocator, new_ctx: anytype) !HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) {
|
||||
var other = HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage){};
|
||||
if (self.size == 0)
|
||||
return other;
|
||||
@ -1439,7 +1439,7 @@ pub fn HashMapUnmanaged(
|
||||
return other;
|
||||
}
|
||||
|
||||
fn grow(self: *Self, allocator: *Allocator, new_capacity: Size, ctx: Context) !void {
|
||||
fn grow(self: *Self, allocator: Allocator, new_capacity: Size, ctx: Context) !void {
|
||||
@setCold(true);
|
||||
const new_cap = std.math.max(new_capacity, minimal_capacity);
|
||||
assert(new_cap > self.capacity());
|
||||
@ -1470,7 +1470,7 @@ pub fn HashMapUnmanaged(
|
||||
std.mem.swap(Self, self, &map);
|
||||
}
|
||||
|
||||
fn allocate(self: *Self, allocator: *Allocator, new_capacity: Size) !void {
|
||||
fn allocate(self: *Self, allocator: Allocator, new_capacity: Size) !void {
|
||||
const header_align = @alignOf(Header);
|
||||
const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K);
|
||||
const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V);
|
||||
@ -1503,7 +1503,7 @@ pub fn HashMapUnmanaged(
|
||||
self.metadata = @intToPtr([*]Metadata, metadata);
|
||||
}
|
||||
|
||||
fn deallocate(self: *Self, allocator: *Allocator) void {
|
||||
fn deallocate(self: *Self, allocator: Allocator) void {
|
||||
if (self.metadata == null) return;
|
||||
|
||||
const header_align = @alignOf(Header);
|
||||
|
||||
236
lib/std/heap.zig
236
lib/std/heap.zig
@ -97,13 +97,12 @@ const CAllocator = struct {
|
||||
}
|
||||
|
||||
fn alloc(
|
||||
allocator: *Allocator,
|
||||
_: *u1,
|
||||
len: usize,
|
||||
alignment: u29,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
_ = allocator;
|
||||
_ = return_address;
|
||||
assert(len > 0);
|
||||
assert(std.math.isPowerOfTwo(alignment));
|
||||
@ -124,14 +123,13 @@ const CAllocator = struct {
|
||||
}
|
||||
|
||||
fn resize(
|
||||
allocator: *Allocator,
|
||||
_: *u1,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_len: usize,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) Allocator.Error!usize {
|
||||
_ = allocator;
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
if (new_len == 0) {
|
||||
@ -154,10 +152,11 @@ const CAllocator = struct {
|
||||
/// Supports the full Allocator interface, including alignment, and exploiting
|
||||
/// `malloc_usable_size` if available. For an allocator that directly calls
|
||||
/// `malloc`/`free`, see `raw_c_allocator`.
|
||||
pub const c_allocator = &c_allocator_state;
|
||||
var c_allocator_state = Allocator{
|
||||
.allocFn = CAllocator.alloc,
|
||||
.resizeFn = CAllocator.resize,
|
||||
pub const c_allocator = blk: {
|
||||
// TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
|
||||
// allowing the use of `*void` but it would still be ugly
|
||||
var tmp: u1 = 0;
|
||||
break :blk Allocator.init(&tmp, CAllocator.alloc, CAllocator.resize);
|
||||
};
|
||||
|
||||
/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls
|
||||
@ -165,20 +164,20 @@ var c_allocator_state = Allocator{
|
||||
/// This allocator is safe to use as the backing allocator with
|
||||
/// `ArenaAllocator` for example and is more optimal in such a case
|
||||
/// than `c_allocator`.
|
||||
pub const raw_c_allocator = &raw_c_allocator_state;
|
||||
var raw_c_allocator_state = Allocator{
|
||||
.allocFn = rawCAlloc,
|
||||
.resizeFn = rawCResize,
|
||||
pub const raw_c_allocator = blk: {
|
||||
// TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
|
||||
// allowing the use of `*void` but it would still be ugly
|
||||
var tmp: u1 = 0;
|
||||
break :blk Allocator.init(&tmp, rawCAlloc, rawCResize);
|
||||
};
|
||||
|
||||
fn rawCAlloc(
|
||||
self: *Allocator,
|
||||
_: *u1,
|
||||
len: usize,
|
||||
ptr_align: u29,
|
||||
len_align: u29,
|
||||
ret_addr: usize,
|
||||
) Allocator.Error![]u8 {
|
||||
_ = self;
|
||||
_ = len_align;
|
||||
_ = ret_addr;
|
||||
assert(ptr_align <= @alignOf(std.c.max_align_t));
|
||||
@ -187,14 +186,13 @@ fn rawCAlloc(
|
||||
}
|
||||
|
||||
fn rawCResize(
|
||||
self: *Allocator,
|
||||
_: *u1,
|
||||
buf: []u8,
|
||||
old_align: u29,
|
||||
new_len: usize,
|
||||
len_align: u29,
|
||||
ret_addr: usize,
|
||||
) Allocator.Error!usize {
|
||||
_ = self;
|
||||
_ = old_align;
|
||||
_ = ret_addr;
|
||||
if (new_len == 0) {
|
||||
@ -210,19 +208,18 @@ fn rawCResize(
|
||||
/// This allocator makes a syscall directly for every allocation and free.
|
||||
/// Thread-safe and lock-free.
|
||||
pub const page_allocator = if (builtin.target.isWasm())
|
||||
&wasm_page_allocator_state
|
||||
else if (builtin.target.os.tag == .freestanding)
|
||||
blk: {
|
||||
// TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
|
||||
// allowing the use of `*void` but it would still be ugly
|
||||
var tmp: u1 = 0;
|
||||
break :blk Allocator.init(&tmp, WasmPageAllocator.alloc, WasmPageAllocator.resize);
|
||||
} else if (builtin.target.os.tag == .freestanding)
|
||||
root.os.heap.page_allocator
|
||||
else
|
||||
&page_allocator_state;
|
||||
|
||||
var page_allocator_state = Allocator{
|
||||
.allocFn = PageAllocator.alloc,
|
||||
.resizeFn = PageAllocator.resize,
|
||||
};
|
||||
var wasm_page_allocator_state = Allocator{
|
||||
.allocFn = WasmPageAllocator.alloc,
|
||||
.resizeFn = WasmPageAllocator.resize,
|
||||
else blk: {
|
||||
// TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
|
||||
// allowing the use of `*void` but it would still be ugly
|
||||
var tmp: u1 = 0;
|
||||
break :blk Allocator.init(&tmp, PageAllocator.alloc, PageAllocator.resize);
|
||||
};
|
||||
|
||||
/// Verifies that the adjusted length will still map to the full length
|
||||
@ -236,8 +233,7 @@ pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
|
||||
pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null;
|
||||
|
||||
const PageAllocator = struct {
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
|
||||
_ = allocator;
|
||||
fn alloc(_: *u1, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
|
||||
_ = ra;
|
||||
assert(n > 0);
|
||||
const aligned_len = mem.alignForward(n, mem.page_size);
|
||||
@ -335,14 +331,13 @@ const PageAllocator = struct {
|
||||
}
|
||||
|
||||
fn resize(
|
||||
allocator: *Allocator,
|
||||
_: *u1,
|
||||
buf_unaligned: []u8,
|
||||
buf_align: u29,
|
||||
new_size: usize,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) Allocator.Error!usize {
|
||||
_ = allocator;
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
const new_size_aligned = mem.alignForward(new_size, mem.page_size);
|
||||
@ -492,8 +487,7 @@ const WasmPageAllocator = struct {
|
||||
return mem.alignForward(memsize, mem.page_size) / mem.page_size;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
|
||||
_ = allocator;
|
||||
fn alloc(_: *u1, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
|
||||
_ = ra;
|
||||
const page_count = nPages(len);
|
||||
const page_idx = try allocPages(page_count, alignment);
|
||||
@ -548,14 +542,13 @@ const WasmPageAllocator = struct {
|
||||
}
|
||||
|
||||
fn resize(
|
||||
allocator: *Allocator,
|
||||
_: *u1,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_len: usize,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
_ = allocator;
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
const aligned_len = mem.alignForward(buf.len, mem.page_size);
|
||||
@ -572,21 +565,20 @@ const WasmPageAllocator = struct {
|
||||
|
||||
pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
.windows => struct {
|
||||
allocator: Allocator,
|
||||
heap_handle: ?HeapHandle,
|
||||
|
||||
const HeapHandle = os.windows.HANDLE;
|
||||
|
||||
pub fn init() HeapAllocator {
|
||||
return HeapAllocator{
|
||||
.allocator = Allocator{
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
.heap_handle = null,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getAllocator(self: *HeapAllocator) Allocator {
|
||||
return Allocator.init(self, alloc, resize);
|
||||
}
|
||||
|
||||
pub fn deinit(self: *HeapAllocator) void {
|
||||
if (self.heap_handle) |heap_handle| {
|
||||
os.windows.HeapDestroy(heap_handle);
|
||||
@ -598,14 +590,13 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
}
|
||||
|
||||
fn alloc(
|
||||
allocator: *Allocator,
|
||||
self: *HeapAllocator,
|
||||
n: usize,
|
||||
ptr_align: u29,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
_ = return_address;
|
||||
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
|
||||
|
||||
const amt = n + ptr_align - 1 + @sizeOf(usize);
|
||||
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .SeqCst);
|
||||
@ -632,7 +623,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
}
|
||||
|
||||
fn resize(
|
||||
allocator: *Allocator,
|
||||
self: *HeapAllocator,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_size: usize,
|
||||
@ -641,7 +632,6 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
) error{OutOfMemory}!usize {
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
|
||||
if (new_size == 0) {
|
||||
os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*));
|
||||
return 0;
|
||||
@ -682,21 +672,27 @@ fn sliceContainsSlice(container: []u8, slice: []u8) bool {
|
||||
}
|
||||
|
||||
pub const FixedBufferAllocator = struct {
|
||||
allocator: Allocator,
|
||||
end_index: usize,
|
||||
buffer: []u8,
|
||||
|
||||
pub fn init(buffer: []u8) FixedBufferAllocator {
|
||||
return FixedBufferAllocator{
|
||||
.allocator = Allocator{
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
.buffer = buffer,
|
||||
.end_index = 0,
|
||||
};
|
||||
}
|
||||
|
||||
/// *WARNING* using this at the same time as the interface returned by `getThreadSafeAllocator` is not thread safe
|
||||
pub fn getAllocator(self: *FixedBufferAllocator) Allocator {
|
||||
return Allocator.init(self, alloc, resize);
|
||||
}
|
||||
|
||||
/// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator`
|
||||
/// *WARNING* using this at the same time as the interface returned by `getAllocator` is not thread safe
|
||||
pub fn getThreadSafeAllocator(self: *FixedBufferAllocator) Allocator {
|
||||
return Allocator.init(self, threadSafeAlloc, Allocator.NoResize(FixedBufferAllocator).noResize);
|
||||
}
|
||||
|
||||
pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool {
|
||||
return sliceContainsPtr(self.buffer, ptr);
|
||||
}
|
||||
@ -712,10 +708,9 @@ pub const FixedBufferAllocator = struct {
|
||||
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
|
||||
fn alloc(self: *FixedBufferAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
|
||||
_ = len_align;
|
||||
_ = ra;
|
||||
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
|
||||
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse
|
||||
return error.OutOfMemory;
|
||||
const adjusted_index = self.end_index + adjust_off;
|
||||
@ -730,7 +725,7 @@ pub const FixedBufferAllocator = struct {
|
||||
}
|
||||
|
||||
fn resize(
|
||||
allocator: *Allocator,
|
||||
self: *FixedBufferAllocator,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_size: usize,
|
||||
@ -739,7 +734,6 @@ pub const FixedBufferAllocator = struct {
|
||||
) Allocator.Error!usize {
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
|
||||
assert(self.ownsSlice(buf)); // sanity check
|
||||
|
||||
if (!self.isLastAllocation(buf)) {
|
||||
@ -762,36 +756,9 @@ pub const FixedBufferAllocator = struct {
|
||||
return new_size;
|
||||
}
|
||||
|
||||
pub fn reset(self: *FixedBufferAllocator) void {
|
||||
self.end_index = 0;
|
||||
}
|
||||
};
|
||||
|
||||
pub const ThreadSafeFixedBufferAllocator = blk: {
|
||||
if (builtin.single_threaded) {
|
||||
break :blk FixedBufferAllocator;
|
||||
} else {
|
||||
// lock free
|
||||
break :blk struct {
|
||||
allocator: Allocator,
|
||||
end_index: usize,
|
||||
buffer: []u8,
|
||||
|
||||
pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator {
|
||||
return ThreadSafeFixedBufferAllocator{
|
||||
.allocator = Allocator{
|
||||
.allocFn = alloc,
|
||||
.resizeFn = Allocator.noResize,
|
||||
},
|
||||
.buffer = buffer,
|
||||
.end_index = 0,
|
||||
};
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
|
||||
fn threadSafeAlloc(self: *FixedBufferAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
|
||||
_ = len_align;
|
||||
_ = ra;
|
||||
const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
|
||||
var end_index = @atomicLoad(usize, &self.end_index, .SeqCst);
|
||||
while (true) {
|
||||
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse
|
||||
@ -805,22 +772,18 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reset(self: *ThreadSafeFixedBufferAllocator) void {
|
||||
pub fn reset(self: *FixedBufferAllocator) void {
|
||||
self.end_index = 0;
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) StackFallbackAllocator(size) {
|
||||
pub const ThreadSafeFixedBufferAllocator = @compileError("ThreadSafeFixedBufferAllocator has been replaced with `getThreadSafeAllocator` on FixedBufferAllocator");
|
||||
|
||||
pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size) {
|
||||
return StackFallbackAllocator(size){
|
||||
.buffer = undefined,
|
||||
.fallback_allocator = fallback_allocator,
|
||||
.fixed_buffer_allocator = undefined,
|
||||
.allocator = Allocator{
|
||||
.allocFn = StackFallbackAllocator(size).alloc,
|
||||
.resizeFn = StackFallbackAllocator(size).resize,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@ -829,40 +792,38 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
|
||||
const Self = @This();
|
||||
|
||||
buffer: [size]u8,
|
||||
allocator: Allocator,
|
||||
fallback_allocator: *Allocator,
|
||||
fallback_allocator: Allocator,
|
||||
fixed_buffer_allocator: FixedBufferAllocator,
|
||||
|
||||
pub fn get(self: *Self) *Allocator {
|
||||
/// WARNING: This functions both fetches a `std.mem.Allocator` interface to this allocator *and* resets the internal buffer allocator
|
||||
pub fn get(self: *Self) Allocator {
|
||||
self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
|
||||
return &self.allocator;
|
||||
return Allocator.init(self, alloc, resize);
|
||||
}
|
||||
|
||||
fn alloc(
|
||||
allocator: *Allocator,
|
||||
self: *Self,
|
||||
len: usize,
|
||||
ptr_align: u29,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator.allocator, len, ptr_align, len_align, return_address) catch
|
||||
return self.fallback_allocator.allocFn(self.fallback_allocator, len, ptr_align, len_align, return_address);
|
||||
return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align, len_align, return_address) catch
|
||||
return self.fallback_allocator.allocFn(self.fallback_allocator.ptr, len, ptr_align, len_align, return_address);
|
||||
}
|
||||
|
||||
fn resize(
|
||||
allocator: *Allocator,
|
||||
self: *Self,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_len: usize,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
|
||||
return FixedBufferAllocator.resize(&self.fixed_buffer_allocator.allocator, buf, buf_align, new_len, len_align, return_address);
|
||||
return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, buf_align, new_len, len_align, return_address);
|
||||
} else {
|
||||
return self.fallback_allocator.resizeFn(self.fallback_allocator, buf, buf_align, new_len, len_align, return_address);
|
||||
return self.fallback_allocator.resizeFn(self.fallback_allocator.ptr, buf, buf_align, new_len, len_align, return_address);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -950,8 +911,8 @@ test "HeapAllocator" {
|
||||
if (builtin.os.tag == .windows) {
|
||||
var heap_allocator = HeapAllocator.init();
|
||||
defer heap_allocator.deinit();
|
||||
const allocator = heap_allocator.getAllocator();
|
||||
|
||||
const allocator = &heap_allocator.allocator;
|
||||
try testAllocator(allocator);
|
||||
try testAllocatorAligned(allocator);
|
||||
try testAllocatorLargeAlignment(allocator);
|
||||
@ -962,36 +923,39 @@ test "HeapAllocator" {
|
||||
test "ArenaAllocator" {
|
||||
var arena_allocator = ArenaAllocator.init(page_allocator);
|
||||
defer arena_allocator.deinit();
|
||||
const allocator = arena_allocator.getAllocator();
|
||||
|
||||
try testAllocator(&arena_allocator.allocator);
|
||||
try testAllocatorAligned(&arena_allocator.allocator);
|
||||
try testAllocatorLargeAlignment(&arena_allocator.allocator);
|
||||
try testAllocatorAlignedShrink(&arena_allocator.allocator);
|
||||
try testAllocator(allocator);
|
||||
try testAllocatorAligned(allocator);
|
||||
try testAllocatorLargeAlignment(allocator);
|
||||
try testAllocatorAlignedShrink(allocator);
|
||||
}
|
||||
|
||||
var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined;
|
||||
test "FixedBufferAllocator" {
|
||||
var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]));
|
||||
const allocator = fixed_buffer_allocator.getAllocator();
|
||||
|
||||
try testAllocator(&fixed_buffer_allocator.allocator);
|
||||
try testAllocatorAligned(&fixed_buffer_allocator.allocator);
|
||||
try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
|
||||
try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator);
|
||||
try testAllocator(allocator);
|
||||
try testAllocatorAligned(allocator);
|
||||
try testAllocatorLargeAlignment(allocator);
|
||||
try testAllocatorAlignedShrink(allocator);
|
||||
}
|
||||
|
||||
test "FixedBufferAllocator.reset" {
|
||||
var buf: [8]u8 align(@alignOf(u64)) = undefined;
|
||||
var fba = FixedBufferAllocator.init(buf[0..]);
|
||||
const allocator = fba.getAllocator();
|
||||
|
||||
const X = 0xeeeeeeeeeeeeeeee;
|
||||
const Y = 0xffffffffffffffff;
|
||||
|
||||
var x = try fba.allocator.create(u64);
|
||||
var x = try allocator.create(u64);
|
||||
x.* = X;
|
||||
try testing.expectError(error.OutOfMemory, fba.allocator.create(u64));
|
||||
try testing.expectError(error.OutOfMemory, allocator.create(u64));
|
||||
|
||||
fba.reset();
|
||||
var y = try fba.allocator.create(u64);
|
||||
var y = try allocator.create(u64);
|
||||
y.* = Y;
|
||||
|
||||
// we expect Y to have overwritten X.
|
||||
@ -1014,23 +978,25 @@ test "FixedBufferAllocator Reuse memory on realloc" {
|
||||
// check if we re-use the memory
|
||||
{
|
||||
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
|
||||
const allocator = fixed_buffer_allocator.getAllocator();
|
||||
|
||||
var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 5);
|
||||
var slice0 = try allocator.alloc(u8, 5);
|
||||
try testing.expect(slice0.len == 5);
|
||||
var slice1 = try fixed_buffer_allocator.allocator.realloc(slice0, 10);
|
||||
var slice1 = try allocator.realloc(slice0, 10);
|
||||
try testing.expect(slice1.ptr == slice0.ptr);
|
||||
try testing.expect(slice1.len == 10);
|
||||
try testing.expectError(error.OutOfMemory, fixed_buffer_allocator.allocator.realloc(slice1, 11));
|
||||
try testing.expectError(error.OutOfMemory, allocator.realloc(slice1, 11));
|
||||
}
|
||||
// check that we don't re-use the memory if it's not the most recent block
|
||||
{
|
||||
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
|
||||
const allocator = fixed_buffer_allocator.getAllocator();
|
||||
|
||||
var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
|
||||
var slice0 = try allocator.alloc(u8, 2);
|
||||
slice0[0] = 1;
|
||||
slice0[1] = 2;
|
||||
var slice1 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
|
||||
var slice2 = try fixed_buffer_allocator.allocator.realloc(slice0, 4);
|
||||
var slice1 = try allocator.alloc(u8, 2);
|
||||
var slice2 = try allocator.realloc(slice0, 4);
|
||||
try testing.expect(slice0.ptr != slice2.ptr);
|
||||
try testing.expect(slice1.ptr != slice2.ptr);
|
||||
try testing.expect(slice2[0] == 1);
|
||||
@ -1038,19 +1004,19 @@ test "FixedBufferAllocator Reuse memory on realloc" {
|
||||
}
|
||||
}
|
||||
|
||||
test "ThreadSafeFixedBufferAllocator" {
|
||||
var fixed_buffer_allocator = ThreadSafeFixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
|
||||
test "Thread safe FixedBufferAllocator" {
|
||||
var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
|
||||
|
||||
try testAllocator(&fixed_buffer_allocator.allocator);
|
||||
try testAllocatorAligned(&fixed_buffer_allocator.allocator);
|
||||
try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
|
||||
try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator);
|
||||
try testAllocator(fixed_buffer_allocator.getThreadSafeAllocator());
|
||||
try testAllocatorAligned(fixed_buffer_allocator.getThreadSafeAllocator());
|
||||
try testAllocatorLargeAlignment(fixed_buffer_allocator.getThreadSafeAllocator());
|
||||
try testAllocatorAlignedShrink(fixed_buffer_allocator.getThreadSafeAllocator());
|
||||
}
|
||||
|
||||
/// This one should not try alignments that exceed what C malloc can handle.
|
||||
pub fn testAllocator(base_allocator: *mem.Allocator) !void {
|
||||
pub fn testAllocator(base_allocator: mem.Allocator) !void {
|
||||
var validationAllocator = mem.validationWrap(base_allocator);
|
||||
const allocator = &validationAllocator.allocator;
|
||||
const allocator = validationAllocator.getAllocator();
|
||||
|
||||
var slice = try allocator.alloc(*i32, 100);
|
||||
try testing.expect(slice.len == 100);
|
||||
@ -1094,9 +1060,9 @@ pub fn testAllocator(base_allocator: *mem.Allocator) !void {
|
||||
allocator.free(oversize);
|
||||
}
|
||||
|
||||
pub fn testAllocatorAligned(base_allocator: *mem.Allocator) !void {
|
||||
pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void {
|
||||
var validationAllocator = mem.validationWrap(base_allocator);
|
||||
const allocator = &validationAllocator.allocator;
|
||||
const allocator = validationAllocator.getAllocator();
|
||||
|
||||
// Test a few alignment values, smaller and bigger than the type's one
|
||||
inline for ([_]u29{ 1, 2, 4, 8, 16, 32, 64 }) |alignment| {
|
||||
@ -1124,9 +1090,9 @@ pub fn testAllocatorAligned(base_allocator: *mem.Allocator) !void {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) !void {
|
||||
pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
|
||||
var validationAllocator = mem.validationWrap(base_allocator);
|
||||
const allocator = &validationAllocator.allocator;
|
||||
const allocator = validationAllocator.getAllocator();
|
||||
|
||||
//Maybe a platform's page_size is actually the same as or
|
||||
// very near usize?
|
||||
@ -1156,12 +1122,12 @@ pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) !void {
|
||||
allocator.free(slice);
|
||||
}
|
||||
|
||||
pub fn testAllocatorAlignedShrink(base_allocator: *mem.Allocator) !void {
|
||||
pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
|
||||
var validationAllocator = mem.validationWrap(base_allocator);
|
||||
const allocator = &validationAllocator.allocator;
|
||||
const allocator = validationAllocator.getAllocator();
|
||||
|
||||
var debug_buffer: [1000]u8 = undefined;
|
||||
const debug_allocator = &FixedBufferAllocator.init(&debug_buffer).allocator;
|
||||
const debug_allocator = FixedBufferAllocator.init(&debug_buffer).getAllocator();
|
||||
|
||||
const alloc_size = mem.page_size * 2 + 50;
|
||||
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
|
||||
|
||||
@ -6,9 +6,7 @@ const Allocator = std.mem.Allocator;
|
||||
/// This allocator takes an existing allocator, wraps it, and provides an interface
|
||||
/// where you can allocate without freeing, and then free it all together.
|
||||
pub const ArenaAllocator = struct {
|
||||
allocator: Allocator,
|
||||
|
||||
child_allocator: *Allocator,
|
||||
child_allocator: Allocator,
|
||||
state: State,
|
||||
|
||||
/// Inner state of ArenaAllocator. Can be stored rather than the entire ArenaAllocator
|
||||
@ -17,21 +15,21 @@ pub const ArenaAllocator = struct {
|
||||
buffer_list: std.SinglyLinkedList([]u8) = @as(std.SinglyLinkedList([]u8), .{}),
|
||||
end_index: usize = 0,
|
||||
|
||||
pub fn promote(self: State, child_allocator: *Allocator) ArenaAllocator {
|
||||
pub fn promote(self: State, child_allocator: Allocator) ArenaAllocator {
|
||||
return .{
|
||||
.allocator = Allocator{
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
.child_allocator = child_allocator,
|
||||
.state = self,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub fn getAllocator(self: *ArenaAllocator) Allocator {
|
||||
return Allocator.init(self, alloc, resize);
|
||||
}
|
||||
|
||||
const BufNode = std.SinglyLinkedList([]u8).Node;
|
||||
|
||||
pub fn init(child_allocator: *Allocator) ArenaAllocator {
|
||||
pub fn init(child_allocator: Allocator) ArenaAllocator {
|
||||
return (State{}).promote(child_allocator);
|
||||
}
|
||||
|
||||
@ -49,7 +47,7 @@ pub const ArenaAllocator = struct {
|
||||
const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
|
||||
const big_enough_len = prev_len + actual_min_size;
|
||||
const len = big_enough_len + big_enough_len / 2;
|
||||
const buf = try self.child_allocator.allocFn(self.child_allocator, len, @alignOf(BufNode), 1, @returnAddress());
|
||||
const buf = try self.child_allocator.allocFn(self.child_allocator.ptr, len, @alignOf(BufNode), 1, @returnAddress());
|
||||
const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr));
|
||||
buf_node.* = BufNode{
|
||||
.data = buf,
|
||||
@ -60,10 +58,9 @@ pub const ArenaAllocator = struct {
|
||||
return buf_node;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
|
||||
fn alloc(self: *ArenaAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
|
||||
_ = len_align;
|
||||
_ = ra;
|
||||
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
|
||||
|
||||
var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + ptr_align);
|
||||
while (true) {
|
||||
@ -91,11 +88,10 @@ pub const ArenaAllocator = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn resize(allocator: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Allocator.Error!usize {
|
||||
fn resize(self: *ArenaAllocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Allocator.Error!usize {
|
||||
_ = buf_align;
|
||||
_ = len_align;
|
||||
_ = ret_addr;
|
||||
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
|
||||
|
||||
const cur_node = self.state.buffer_list.first orelse return error.OutOfMemory;
|
||||
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
|
||||
|
||||
@ -172,11 +172,7 @@ pub const Config = struct {
|
||||
|
||||
pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
return struct {
|
||||
allocator: Allocator = Allocator{
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
backing_allocator: *Allocator = std.heap.page_allocator,
|
||||
backing_allocator: Allocator = std.heap.page_allocator,
|
||||
buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count,
|
||||
large_allocations: LargeAllocTable = .{},
|
||||
empty_buckets: if (config.retain_metadata) ?*BucketHeader else void =
|
||||
@ -284,6 +280,10 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn getAllocator(self: *Self) Allocator {
|
||||
return Allocator.init(self, alloc, resize);
|
||||
}
|
||||
|
||||
fn bucketStackTrace(
|
||||
bucket: *BucketHeader,
|
||||
size_class: usize,
|
||||
@ -388,7 +388,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
var it = self.large_allocations.iterator();
|
||||
while (it.next()) |large| {
|
||||
if (large.value_ptr.freed) {
|
||||
_ = self.backing_allocator.resizeFn(self.backing_allocator, large.value_ptr.bytes, large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable;
|
||||
_ = self.backing_allocator.resizeFn(self.backing_allocator.ptr, large.value_ptr.bytes, large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -571,7 +571,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
const result_len = if (config.never_unmap and new_size == 0)
|
||||
0
|
||||
else
|
||||
try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align, ret_addr);
|
||||
try self.backing_allocator.resizeFn(self.backing_allocator.ptr, old_mem, old_align, new_size, len_align, ret_addr);
|
||||
|
||||
if (config.enable_memory_limit) {
|
||||
entry.value_ptr.requested_size = new_size;
|
||||
@ -606,15 +606,13 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
}
|
||||
|
||||
fn resize(
|
||||
allocator: *Allocator,
|
||||
self: *Self,
|
||||
old_mem: []u8,
|
||||
old_align: u29,
|
||||
new_size: usize,
|
||||
len_align: u29,
|
||||
ret_addr: usize,
|
||||
) Error!usize {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
|
||||
self.mutex.lock();
|
||||
defer self.mutex.unlock();
|
||||
|
||||
@ -755,9 +753,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
return true;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
|
||||
fn alloc(self: Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
|
||||
self.mutex.lock();
|
||||
defer self.mutex.unlock();
|
||||
|
||||
@ -768,7 +764,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
const new_aligned_size = math.max(len, ptr_align);
|
||||
if (new_aligned_size > largest_bucket_object_size) {
|
||||
try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
|
||||
const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr);
|
||||
const slice = try self.backing_allocator.allocFn(self.backing_allocator.ptr, len, ptr_align, len_align, ret_addr);
|
||||
|
||||
const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
|
||||
if (config.retain_metadata and !config.never_unmap) {
|
||||
@ -834,7 +830,7 @@ const test_config = Config{};
|
||||
test "small allocations - free in same order" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
|
||||
const allocator = &gpa.allocator;
|
||||
const allocator = gpa.getAllocator();
|
||||
|
||||
var list = std.ArrayList(*u64).init(std.testing.allocator);
|
||||
defer list.deinit();
|
||||
@ -853,7 +849,7 @@ test "small allocations - free in same order" {
|
||||
test "small allocations - free in reverse order" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
|
||||
const allocator = &gpa.allocator;
|
||||
const allocator = gpa.getAllocator();
|
||||
|
||||
var list = std.ArrayList(*u64).init(std.testing.allocator);
|
||||
defer list.deinit();
|
||||
@ -872,7 +868,7 @@ test "small allocations - free in reverse order" {
|
||||
test "large allocations" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
|
||||
const allocator = &gpa.allocator;
|
||||
const allocator = gpa.getAllocator();
|
||||
|
||||
const ptr1 = try allocator.alloc(u64, 42768);
|
||||
const ptr2 = try allocator.alloc(u64, 52768);
|
||||
@ -885,7 +881,7 @@ test "large allocations" {
|
||||
test "realloc" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
|
||||
const allocator = &gpa.allocator;
|
||||
const allocator = gpa.getAllocator();
|
||||
|
||||
var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1);
|
||||
defer allocator.free(slice);
|
||||
@ -907,7 +903,7 @@ test "realloc" {
|
||||
test "shrink" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
|
||||
const allocator = &gpa.allocator;
|
||||
const allocator = gpa.getAllocator();
|
||||
|
||||
var slice = try allocator.alloc(u8, 20);
|
||||
defer allocator.free(slice);
|
||||
@ -930,7 +926,7 @@ test "shrink" {
|
||||
test "large object - grow" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
|
||||
const allocator = &gpa.allocator;
|
||||
const allocator = gpa.getAllocator();
|
||||
|
||||
var slice1 = try allocator.alloc(u8, page_size * 2 - 20);
|
||||
defer allocator.free(slice1);
|
||||
@ -948,7 +944,7 @@ test "large object - grow" {
|
||||
test "realloc small object to large object" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
|
||||
const allocator = &gpa.allocator;
|
||||
const allocator = gpa.getAllocator();
|
||||
|
||||
var slice = try allocator.alloc(u8, 70);
|
||||
defer allocator.free(slice);
|
||||
@ -965,7 +961,7 @@ test "realloc small object to large object" {
|
||||
test "shrink large object to large object" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
|
||||
const allocator = &gpa.allocator;
|
||||
const allocator = gpa.getAllocator();
|
||||
|
||||
var slice = try allocator.alloc(u8, page_size * 2 + 50);
|
||||
defer allocator.free(slice);
|
||||
@ -988,10 +984,10 @@ test "shrink large object to large object" {
|
||||
test "shrink large object to large object with larger alignment" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
|
||||
const allocator = &gpa.allocator;
|
||||
const allocator = gpa.getAllocator();
|
||||
|
||||
var debug_buffer: [1000]u8 = undefined;
|
||||
const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
|
||||
const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).getAllocator();
|
||||
|
||||
const alloc_size = page_size * 2 + 50;
|
||||
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
|
||||
@ -1023,7 +1019,7 @@ test "shrink large object to large object with larger alignment" {
|
||||
test "realloc large object to small object" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
|
||||
const allocator = &gpa.allocator;
|
||||
const allocator = gpa.getAllocator();
|
||||
|
||||
var slice = try allocator.alloc(u8, page_size * 2 + 50);
|
||||
defer allocator.free(slice);
|
||||
@ -1041,7 +1037,7 @@ test "overrideable mutexes" {
|
||||
.mutex = std.Thread.Mutex{},
|
||||
};
|
||||
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
|
||||
const allocator = &gpa.allocator;
|
||||
const allocator = gpa.getAllocator();
|
||||
|
||||
const ptr = try allocator.create(i32);
|
||||
defer allocator.destroy(ptr);
|
||||
@ -1050,7 +1046,7 @@ test "overrideable mutexes" {
|
||||
test "non-page-allocator backing allocator" {
|
||||
var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = std.testing.allocator };
|
||||
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
|
||||
const allocator = &gpa.allocator;
|
||||
const allocator = gpa.getAllocator();
|
||||
|
||||
const ptr = try allocator.create(i32);
|
||||
defer allocator.destroy(ptr);
|
||||
@ -1059,10 +1055,10 @@ test "non-page-allocator backing allocator" {
|
||||
test "realloc large object to larger alignment" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
|
||||
const allocator = &gpa.allocator;
|
||||
const allocator = gpa.getAllocator();
|
||||
|
||||
var debug_buffer: [1000]u8 = undefined;
|
||||
const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
|
||||
const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).getAllocator();
|
||||
|
||||
var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
|
||||
defer allocator.free(slice);
|
||||
@ -1098,9 +1094,9 @@ test "realloc large object to larger alignment" {
|
||||
|
||||
test "large object shrinks to small but allocation fails during shrink" {
|
||||
var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, 3);
|
||||
var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = &failing_allocator.allocator };
|
||||
var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = failing_allocator.getAllocator() };
|
||||
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
|
||||
const allocator = &gpa.allocator;
|
||||
const allocator = gpa.getAllocator();
|
||||
|
||||
var slice = try allocator.alloc(u8, page_size * 2 + 50);
|
||||
defer allocator.free(slice);
|
||||
@ -1117,7 +1113,7 @@ test "large object shrinks to small but allocation fails during shrink" {
|
||||
test "objects of size 1024 and 2048" {
|
||||
var gpa = GeneralPurposeAllocator(test_config){};
|
||||
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
|
||||
const allocator = &gpa.allocator;
|
||||
const allocator = gpa.getAllocator();
|
||||
|
||||
const slice = try allocator.alloc(u8, 1025);
|
||||
const slice2 = try allocator.alloc(u8, 3000);
|
||||
@ -1129,7 +1125,7 @@ test "objects of size 1024 and 2048" {
|
||||
test "setting a memory cap" {
|
||||
var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
|
||||
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
|
||||
const allocator = &gpa.allocator;
|
||||
const allocator = gpa.getAllocator();
|
||||
|
||||
gpa.setRequestedMemoryLimit(1010);
|
||||
|
||||
@ -1158,9 +1154,9 @@ test "double frees" {
|
||||
defer std.testing.expect(!backing_gpa.deinit()) catch @panic("leak");
|
||||
|
||||
const GPA = GeneralPurposeAllocator(.{ .safety = true, .never_unmap = true, .retain_metadata = true });
|
||||
var gpa = GPA{ .backing_allocator = &backing_gpa.allocator };
|
||||
var gpa = GPA{ .backing_allocator = backing_gpa.getAllocator() };
|
||||
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
|
||||
const allocator = &gpa.allocator;
|
||||
const allocator = gpa.getAllocator();
|
||||
|
||||
// detect a small allocation double free, even though bucket is emptied
|
||||
const index: usize = 6;
|
||||
|
||||
@ -5,33 +5,31 @@ const Allocator = std.mem.Allocator;
|
||||
/// on every call to the allocator. Writer errors are ignored.
|
||||
pub fn LogToWriterAllocator(comptime Writer: type) type {
|
||||
return struct {
|
||||
allocator: Allocator,
|
||||
parent_allocator: *Allocator,
|
||||
parent_allocator: Allocator,
|
||||
writer: Writer,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(parent_allocator: *Allocator, writer: Writer) Self {
|
||||
pub fn init(parent_allocator: Allocator, writer: Writer) Self {
|
||||
return Self{
|
||||
.allocator = Allocator{
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
.parent_allocator = parent_allocator,
|
||||
.writer = writer,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getAllocator(self: *Self) Allocator {
|
||||
return Allocator.init(self, alloc, resize);
|
||||
}
|
||||
|
||||
fn alloc(
|
||||
allocator: *Allocator,
|
||||
self: *Self,
|
||||
len: usize,
|
||||
ptr_align: u29,
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
self.writer.print("alloc : {}", .{len}) catch {};
|
||||
const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra);
|
||||
const result = self.parent_allocator.allocFn(self.parent_allocator.ptr, len, ptr_align, len_align, ra);
|
||||
if (result) |_| {
|
||||
self.writer.print(" success!\n", .{}) catch {};
|
||||
} else |_| {
|
||||
@ -41,14 +39,13 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
|
||||
}
|
||||
|
||||
fn resize(
|
||||
allocator: *Allocator,
|
||||
self: *Self,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_len: usize,
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
if (new_len == 0) {
|
||||
self.writer.print("free : {}\n", .{buf.len}) catch {};
|
||||
} else if (new_len <= buf.len) {
|
||||
@ -56,7 +53,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
|
||||
} else {
|
||||
self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
|
||||
}
|
||||
if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ra)) |resized_len| {
|
||||
if (self.parent_allocator.resizeFn(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| {
|
||||
if (new_len > buf.len) {
|
||||
self.writer.print(" success!\n", .{}) catch {};
|
||||
}
|
||||
@ -73,7 +70,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
|
||||
/// This allocator is used in front of another allocator and logs to the provided writer
|
||||
/// on every call to the allocator. Writer errors are ignored.
|
||||
pub fn logToWriterAllocator(
|
||||
parent_allocator: *Allocator,
|
||||
parent_allocator: Allocator,
|
||||
writer: anytype,
|
||||
) LogToWriterAllocator(@TypeOf(writer)) {
|
||||
return LogToWriterAllocator(@TypeOf(writer)).init(parent_allocator, writer);
|
||||
@ -85,7 +82,7 @@ test "LogToWriterAllocator" {
|
||||
|
||||
var allocator_buf: [10]u8 = undefined;
|
||||
var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf));
|
||||
const allocator = &logToWriterAllocator(&fixedBufferAllocator.allocator, fbs.writer()).allocator;
|
||||
const allocator = logToWriterAllocator(fixedBufferAllocator.getAllocator(), fbs.writer()).getAllocator();
|
||||
|
||||
var a = try allocator.alloc(u8, 10);
|
||||
a = allocator.shrink(a, 5);
|
||||
|
||||
@ -22,21 +22,20 @@ pub fn ScopedLoggingAllocator(
|
||||
const log = std.log.scoped(scope);
|
||||
|
||||
return struct {
|
||||
allocator: Allocator,
|
||||
parent_allocator: *Allocator,
|
||||
parent_allocator: Allocator,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(parent_allocator: *Allocator) Self {
|
||||
pub fn init(parent_allocator: Allocator) Self {
|
||||
return .{
|
||||
.allocator = Allocator{
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
.parent_allocator = parent_allocator,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getAllocator(self: *Self) Allocator {
|
||||
return Allocator.init(self, alloc, resize);
|
||||
}
|
||||
|
||||
// This function is required as the `std.log.log` function is not public
|
||||
inline fn logHelper(comptime log_level: std.log.Level, comptime format: []const u8, args: anytype) void {
|
||||
switch (log_level) {
|
||||
@ -48,13 +47,12 @@ pub fn ScopedLoggingAllocator(
|
||||
}
|
||||
|
||||
fn alloc(
|
||||
allocator: *Allocator,
|
||||
self: *Self,
|
||||
len: usize,
|
||||
ptr_align: u29,
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra);
|
||||
if (result) |_| {
|
||||
logHelper(
|
||||
@ -73,15 +71,13 @@ pub fn ScopedLoggingAllocator(
|
||||
}
|
||||
|
||||
fn resize(
|
||||
allocator: *Allocator,
|
||||
self: *Self,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_len: usize,
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
|
||||
if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ra)) |resized_len| {
|
||||
if (new_len == 0) {
|
||||
logHelper(success_log_level, "free - success - len: {}", .{buf.len});
|
||||
@ -116,6 +112,6 @@ pub fn ScopedLoggingAllocator(
|
||||
/// This allocator is used in front of another allocator and logs to `std.log`
|
||||
/// on every call to the allocator.
|
||||
/// For logging to a `std.io.Writer` see `std.heap.LogToWriterAllocator`
|
||||
pub fn loggingAllocator(parent_allocator: *Allocator) LoggingAllocator(.debug, .err) {
|
||||
pub fn loggingAllocator(parent_allocator: Allocator) LoggingAllocator(.debug, .err) {
|
||||
return LoggingAllocator(.debug, .err).init(parent_allocator);
|
||||
}
|
||||
|
||||
@ -7,7 +7,7 @@ pub const BufferedAtomicFile = struct {
|
||||
atomic_file: fs.AtomicFile,
|
||||
file_writer: File.Writer,
|
||||
buffered_writer: BufferedWriter,
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
|
||||
pub const buffer_size = 4096;
|
||||
pub const BufferedWriter = std.io.BufferedWriter(buffer_size, File.Writer);
|
||||
@ -16,7 +16,7 @@ pub const BufferedAtomicFile = struct {
|
||||
/// TODO when https://github.com/ziglang/zig/issues/2761 is solved
|
||||
/// this API will not need an allocator
|
||||
pub fn create(
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
dir: fs.Dir,
|
||||
dest_path: []const u8,
|
||||
atomic_file_options: fs.Dir.AtomicFileOptions,
|
||||
|
||||
@ -38,7 +38,7 @@ pub fn PeekStream(
|
||||
}
|
||||
},
|
||||
.Dynamic => struct {
|
||||
pub fn init(base: ReaderType, allocator: *mem.Allocator) Self {
|
||||
pub fn init(base: ReaderType, allocator: mem.Allocator) Self {
|
||||
return .{
|
||||
.unbuffered_reader = base,
|
||||
.fifo = FifoType.init(allocator),
|
||||
|
||||
@ -88,7 +88,7 @@ pub fn Reader(
|
||||
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
|
||||
/// Caller owns returned memory.
|
||||
/// If this function returns an error, the contents from the stream read so far are lost.
|
||||
pub fn readAllAlloc(self: Self, allocator: *mem.Allocator, max_size: usize) ![]u8 {
|
||||
pub fn readAllAlloc(self: Self, allocator: mem.Allocator, max_size: usize) ![]u8 {
|
||||
var array_list = std.ArrayList(u8).init(allocator);
|
||||
defer array_list.deinit();
|
||||
try self.readAllArrayList(&array_list, max_size);
|
||||
@ -127,7 +127,7 @@ pub fn Reader(
|
||||
/// If this function returns an error, the contents from the stream read so far are lost.
|
||||
pub fn readUntilDelimiterAlloc(
|
||||
self: Self,
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
delimiter: u8,
|
||||
max_size: usize,
|
||||
) ![]u8 {
|
||||
@ -163,7 +163,7 @@ pub fn Reader(
|
||||
/// If this function returns an error, the contents from the stream read so far are lost.
|
||||
pub fn readUntilDelimiterOrEofAlloc(
|
||||
self: Self,
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
delimiter: u8,
|
||||
max_size: usize,
|
||||
) !?[]u8 {
|
||||
|
||||
@ -1476,7 +1476,7 @@ fn parsedEqual(a: anytype, b: @TypeOf(a)) bool {
|
||||
}
|
||||
|
||||
pub const ParseOptions = struct {
|
||||
allocator: ?*Allocator = null,
|
||||
allocator: ?Allocator = null,
|
||||
|
||||
/// Behaviour when a duplicate field is encountered.
|
||||
duplicate_field_behavior: enum {
|
||||
@ -2033,7 +2033,7 @@ test "parse into tagged union" {
|
||||
|
||||
{ // failing allocations should be bubbled up instantly without trying next member
|
||||
var fail_alloc = testing.FailingAllocator.init(testing.allocator, 0);
|
||||
const options = ParseOptions{ .allocator = &fail_alloc.allocator };
|
||||
const options = ParseOptions{ .allocator = fail_alloc.getAllocator() };
|
||||
const T = union(enum) {
|
||||
// both fields here match the input
|
||||
string: []const u8,
|
||||
@ -2081,7 +2081,7 @@ test "parse union bubbles up AllocatorRequired" {
|
||||
|
||||
test "parseFree descends into tagged union" {
|
||||
var fail_alloc = testing.FailingAllocator.init(testing.allocator, 1);
|
||||
const options = ParseOptions{ .allocator = &fail_alloc.allocator };
|
||||
const options = ParseOptions{ .allocator = fail_alloc.getAllocator() };
|
||||
const T = union(enum) {
|
||||
int: i32,
|
||||
float: f64,
|
||||
@ -2328,7 +2328,7 @@ test "parse into double recursive union definition" {
|
||||
|
||||
/// A non-stream JSON parser which constructs a tree of Value's.
|
||||
pub const Parser = struct {
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
state: State,
|
||||
copy_strings: bool,
|
||||
// Stores parent nodes and un-combined Values.
|
||||
@ -2341,7 +2341,7 @@ pub const Parser = struct {
|
||||
Simple,
|
||||
};
|
||||
|
||||
pub fn init(allocator: *Allocator, copy_strings: bool) Parser {
|
||||
pub fn init(allocator: Allocator, copy_strings: bool) Parser {
|
||||
return Parser{
|
||||
.allocator = allocator,
|
||||
.state = .Simple,
|
||||
@ -2364,9 +2364,10 @@ pub const Parser = struct {
|
||||
|
||||
var arena = ArenaAllocator.init(p.allocator);
|
||||
errdefer arena.deinit();
|
||||
const allocator = arena.getAllocator();
|
||||
|
||||
while (try s.next()) |token| {
|
||||
try p.transition(&arena.allocator, input, s.i - 1, token);
|
||||
try p.transition(allocator, input, s.i - 1, token);
|
||||
}
|
||||
|
||||
debug.assert(p.stack.items.len == 1);
|
||||
@ -2379,7 +2380,7 @@ pub const Parser = struct {
|
||||
|
||||
// Even though p.allocator exists, we take an explicit allocator so that allocation state
|
||||
// can be cleaned up on error correctly during a `parse` on call.
|
||||
fn transition(p: *Parser, allocator: *Allocator, input: []const u8, i: usize, token: Token) !void {
|
||||
fn transition(p: *Parser, allocator: Allocator, input: []const u8, i: usize, token: Token) !void {
|
||||
switch (p.state) {
|
||||
.ObjectKey => switch (token) {
|
||||
.ObjectEnd => {
|
||||
@ -2536,7 +2537,7 @@ pub const Parser = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn parseString(p: *Parser, allocator: *Allocator, s: std.meta.TagPayload(Token, Token.String), input: []const u8, i: usize) !Value {
|
||||
fn parseString(p: *Parser, allocator: Allocator, s: std.meta.TagPayload(Token, Token.String), input: []const u8, i: usize) !Value {
|
||||
const slice = s.slice(input, i);
|
||||
switch (s.escapes) {
|
||||
.None => return Value{ .String = if (p.copy_strings) try allocator.dupe(u8, slice) else slice },
|
||||
@ -2737,7 +2738,7 @@ test "write json then parse it" {
|
||||
try testing.expect(mem.eql(u8, tree.root.Object.get("str").?.String, "hello"));
|
||||
}
|
||||
|
||||
fn testParse(arena_allocator: *std.mem.Allocator, json_str: []const u8) !Value {
|
||||
fn testParse(arena_allocator: std.mem.Allocator, json_str: []const u8) !Value {
|
||||
var p = Parser.init(arena_allocator, false);
|
||||
return (try p.parse(json_str)).root;
|
||||
}
|
||||
@ -2745,13 +2746,13 @@ fn testParse(arena_allocator: *std.mem.Allocator, json_str: []const u8) !Value {
|
||||
test "parsing empty string gives appropriate error" {
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
|
||||
defer arena_allocator.deinit();
|
||||
try testing.expectError(error.UnexpectedEndOfJson, testParse(&arena_allocator.allocator, ""));
|
||||
try testing.expectError(error.UnexpectedEndOfJson, testParse(arena_allocator.getAllocator(), ""));
|
||||
}
|
||||
|
||||
test "integer after float has proper type" {
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
|
||||
defer arena_allocator.deinit();
|
||||
const json = try testParse(&arena_allocator.allocator,
|
||||
const json = try testParse(arena_allocator.getAllocator(),
|
||||
\\{
|
||||
\\ "float": 3.14,
|
||||
\\ "ints": [1, 2, 3]
|
||||
@ -2786,7 +2787,7 @@ test "escaped characters" {
|
||||
\\}
|
||||
;
|
||||
|
||||
const obj = (try testParse(&arena_allocator.allocator, input)).Object;
|
||||
const obj = (try testParse(arena_allocator.getAllocator(), input)).Object;
|
||||
|
||||
try testing.expectEqualSlices(u8, obj.get("backslash").?.String, "\\");
|
||||
try testing.expectEqualSlices(u8, obj.get("forwardslash").?.String, "/");
|
||||
@ -2812,11 +2813,12 @@ test "string copy option" {
|
||||
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
|
||||
defer arena_allocator.deinit();
|
||||
const allocator = arena_allocator.getAllocator();
|
||||
|
||||
const tree_nocopy = try Parser.init(&arena_allocator.allocator, false).parse(input);
|
||||
const tree_nocopy = try Parser.init(allocator, false).parse(input);
|
||||
const obj_nocopy = tree_nocopy.root.Object;
|
||||
|
||||
const tree_copy = try Parser.init(&arena_allocator.allocator, true).parse(input);
|
||||
const tree_copy = try Parser.init(allocator, true).parse(input);
|
||||
const obj_copy = tree_copy.root.Object;
|
||||
|
||||
for ([_][]const u8{ "noescape", "simple", "unicode", "surrogatepair" }) |field_name| {
|
||||
|
||||
@ -243,7 +243,7 @@ test "json write stream" {
|
||||
try w.beginObject();
|
||||
|
||||
try w.objectField("object");
|
||||
try w.emitJson(try getJsonObject(&arena_allocator.allocator));
|
||||
try w.emitJson(try getJsonObject(arena_allocator.getAllocator()));
|
||||
|
||||
try w.objectField("string");
|
||||
try w.emitString("This is a string");
|
||||
@ -286,7 +286,7 @@ test "json write stream" {
|
||||
try std.testing.expect(std.mem.eql(u8, expected, result));
|
||||
}
|
||||
|
||||
fn getJsonObject(allocator: *std.mem.Allocator) !std.json.Value {
|
||||
fn getJsonObject(allocator: std.mem.Allocator) !std.json.Value {
|
||||
var value = std.json.Value{ .Object = std.json.ObjectMap.init(allocator) };
|
||||
try value.Object.put("one", std.json.Value{ .Integer = @intCast(i64, 1) });
|
||||
try value.Object.put("two", std.json.Value{ .Float = 2.0 });
|
||||
|
||||
@ -142,7 +142,7 @@ pub const Mutable = struct {
|
||||
|
||||
/// Asserts that the allocator owns the limbs memory. If this is not the case,
|
||||
/// use `toConst().toManaged()`.
|
||||
pub fn toManaged(self: Mutable, allocator: *Allocator) Managed {
|
||||
pub fn toManaged(self: Mutable, allocator: Allocator) Managed {
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
.limbs = self.limbs,
|
||||
@ -283,7 +283,7 @@ pub const Mutable = struct {
|
||||
base: u8,
|
||||
value: []const u8,
|
||||
limbs_buffer: []Limb,
|
||||
allocator: ?*Allocator,
|
||||
allocator: ?Allocator,
|
||||
) error{InvalidCharacter}!void {
|
||||
assert(base >= 2 and base <= 16);
|
||||
|
||||
@ -608,7 +608,7 @@ pub const Mutable = struct {
|
||||
/// rma is given by `a.limbs.len + b.limbs.len`.
|
||||
///
|
||||
/// `limbs_buffer` is used for temporary storage. The amount required is given by `calcMulLimbsBufferLen`.
|
||||
pub fn mul(rma: *Mutable, a: Const, b: Const, limbs_buffer: []Limb, allocator: ?*Allocator) void {
|
||||
pub fn mul(rma: *Mutable, a: Const, b: Const, limbs_buffer: []Limb, allocator: ?Allocator) void {
|
||||
var buf_index: usize = 0;
|
||||
|
||||
const a_copy = if (rma.limbs.ptr == a.limbs.ptr) blk: {
|
||||
@ -638,7 +638,7 @@ pub const Mutable = struct {
|
||||
///
|
||||
/// If `allocator` is provided, it will be used for temporary storage to improve
|
||||
/// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm.
|
||||
pub fn mulNoAlias(rma: *Mutable, a: Const, b: Const, allocator: ?*Allocator) void {
|
||||
pub fn mulNoAlias(rma: *Mutable, a: Const, b: Const, allocator: ?Allocator) void {
|
||||
assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing
|
||||
assert(rma.limbs.ptr != b.limbs.ptr); // illegal aliasing
|
||||
|
||||
@ -674,7 +674,7 @@ pub const Mutable = struct {
|
||||
signedness: Signedness,
|
||||
bit_count: usize,
|
||||
limbs_buffer: []Limb,
|
||||
allocator: ?*Allocator,
|
||||
allocator: ?Allocator,
|
||||
) void {
|
||||
var buf_index: usize = 0;
|
||||
const req_limbs = calcTwosCompLimbCount(bit_count);
|
||||
@ -714,7 +714,7 @@ pub const Mutable = struct {
|
||||
b: Const,
|
||||
signedness: Signedness,
|
||||
bit_count: usize,
|
||||
allocator: ?*Allocator,
|
||||
allocator: ?Allocator,
|
||||
) void {
|
||||
assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing
|
||||
assert(rma.limbs.ptr != b.limbs.ptr); // illegal aliasing
|
||||
@ -763,7 +763,7 @@ pub const Mutable = struct {
|
||||
///
|
||||
/// If `allocator` is provided, it will be used for temporary storage to improve
|
||||
/// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm.
|
||||
pub fn sqrNoAlias(rma: *Mutable, a: Const, opt_allocator: ?*Allocator) void {
|
||||
pub fn sqrNoAlias(rma: *Mutable, a: Const, opt_allocator: ?Allocator) void {
|
||||
_ = opt_allocator;
|
||||
assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing
|
||||
|
||||
@ -1660,7 +1660,7 @@ pub const Const = struct {
|
||||
positive: bool,
|
||||
|
||||
/// The result is an independent resource which is managed by the caller.
|
||||
pub fn toManaged(self: Const, allocator: *Allocator) Allocator.Error!Managed {
|
||||
pub fn toManaged(self: Const, allocator: Allocator) Allocator.Error!Managed {
|
||||
const limbs = try allocator.alloc(Limb, math.max(Managed.default_capacity, self.limbs.len));
|
||||
mem.copy(Limb, limbs, self.limbs);
|
||||
return Managed{
|
||||
@ -1873,7 +1873,7 @@ pub const Const = struct {
|
||||
/// Caller owns returned memory.
|
||||
/// Asserts that `base` is in the range [2, 16].
|
||||
/// See also `toString`, a lower level function than this.
|
||||
pub fn toStringAlloc(self: Const, allocator: *Allocator, base: u8, case: std.fmt.Case) Allocator.Error![]u8 {
|
||||
pub fn toStringAlloc(self: Const, allocator: Allocator, base: u8, case: std.fmt.Case) Allocator.Error![]u8 {
|
||||
assert(base >= 2);
|
||||
assert(base <= 16);
|
||||
|
||||
@ -2092,7 +2092,7 @@ pub const Managed = struct {
|
||||
pub const default_capacity = 4;
|
||||
|
||||
/// Allocator used by the Managed when requesting memory.
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
|
||||
/// Raw digits. These are:
|
||||
///
|
||||
@ -2109,7 +2109,7 @@ pub const Managed = struct {
|
||||
|
||||
/// Creates a new `Managed`. `default_capacity` limbs will be allocated immediately.
|
||||
/// The integer value after initializing is `0`.
|
||||
pub fn init(allocator: *Allocator) !Managed {
|
||||
pub fn init(allocator: Allocator) !Managed {
|
||||
return initCapacity(allocator, default_capacity);
|
||||
}
|
||||
|
||||
@ -2131,7 +2131,7 @@ pub const Managed = struct {
|
||||
/// Creates a new `Managed` with value `value`.
|
||||
///
|
||||
/// This is identical to an `init`, followed by a `set`.
|
||||
pub fn initSet(allocator: *Allocator, value: anytype) !Managed {
|
||||
pub fn initSet(allocator: Allocator, value: anytype) !Managed {
|
||||
var s = try Managed.init(allocator);
|
||||
try s.set(value);
|
||||
return s;
|
||||
@ -2140,7 +2140,7 @@ pub const Managed = struct {
|
||||
/// Creates a new Managed with a specific capacity. If capacity < default_capacity then the
|
||||
/// default capacity will be used instead.
|
||||
/// The integer value after initializing is `0`.
|
||||
pub fn initCapacity(allocator: *Allocator, capacity: usize) !Managed {
|
||||
pub fn initCapacity(allocator: Allocator, capacity: usize) !Managed {
|
||||
return Managed{
|
||||
.allocator = allocator,
|
||||
.metadata = 1,
|
||||
@ -2206,7 +2206,7 @@ pub const Managed = struct {
|
||||
return other.cloneWithDifferentAllocator(other.allocator);
|
||||
}
|
||||
|
||||
pub fn cloneWithDifferentAllocator(other: Managed, allocator: *Allocator) !Managed {
|
||||
pub fn cloneWithDifferentAllocator(other: Managed, allocator: Allocator) !Managed {
|
||||
return Managed{
|
||||
.allocator = allocator,
|
||||
.metadata = other.metadata,
|
||||
@ -2347,7 +2347,7 @@ pub const Managed = struct {
|
||||
|
||||
/// Converts self to a string in the requested base. Memory is allocated from the provided
|
||||
/// allocator and not the one present in self.
|
||||
pub fn toString(self: Managed, allocator: *Allocator, base: u8, case: std.fmt.Case) ![]u8 {
|
||||
pub fn toString(self: Managed, allocator: Allocator, base: u8, case: std.fmt.Case) ![]u8 {
|
||||
_ = allocator;
|
||||
if (base < 2 or base > 16) return error.InvalidBase;
|
||||
return self.toConst().toStringAlloc(self.allocator, base, case);
|
||||
@ -2784,7 +2784,7 @@ const AccOp = enum {
|
||||
/// r MUST NOT alias any of a or b.
|
||||
///
|
||||
/// The result is computed modulo `r.len`. When `r.len >= a.len + b.len`, no overflow occurs.
|
||||
fn llmulacc(comptime op: AccOp, opt_allocator: ?*Allocator, r: []Limb, a: []const Limb, b: []const Limb) void {
|
||||
fn llmulacc(comptime op: AccOp, opt_allocator: ?Allocator, r: []Limb, a: []const Limb, b: []const Limb) void {
|
||||
@setRuntimeSafety(debug_safety);
|
||||
assert(r.len >= a.len);
|
||||
assert(r.len >= b.len);
|
||||
@ -2819,7 +2819,7 @@ fn llmulacc(comptime op: AccOp, opt_allocator: ?*Allocator, r: []Limb, a: []cons
|
||||
/// The result is computed modulo `r.len`. When `r.len >= a.len + b.len`, no overflow occurs.
|
||||
fn llmulaccKaratsuba(
|
||||
comptime op: AccOp,
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
r: []Limb,
|
||||
a: []const Limb,
|
||||
b: []const Limb,
|
||||
|
||||
@ -29,7 +29,7 @@ pub const Rational = struct {
|
||||
|
||||
/// Create a new Rational. A small amount of memory will be allocated on initialization.
|
||||
/// This will be 2 * Int.default_capacity.
|
||||
pub fn init(a: *Allocator) !Rational {
|
||||
pub fn init(a: Allocator) !Rational {
|
||||
return Rational{
|
||||
.p = try Int.init(a),
|
||||
.q = try Int.initSet(a, 1),
|
||||
|
||||
@ -37,24 +37,26 @@ pub const Allocator = @import("mem/Allocator.zig");
|
||||
pub fn ValidationAllocator(comptime T: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
allocator: Allocator,
|
||||
|
||||
underlying_allocator: T,
|
||||
|
||||
pub fn init(allocator: T) @This() {
|
||||
return .{
|
||||
.allocator = .{
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
.underlying_allocator = allocator,
|
||||
};
|
||||
}
|
||||
fn getUnderlyingAllocatorPtr(self: *@This()) *Allocator {
|
||||
if (T == *Allocator) return self.underlying_allocator;
|
||||
if (*T == *Allocator) return &self.underlying_allocator;
|
||||
return &self.underlying_allocator.allocator;
|
||||
|
||||
pub fn getAllocator(self: *Self) Allocator {
|
||||
return Allocator.init(self, alloc, resize);
|
||||
}
|
||||
|
||||
fn getUnderlyingAllocatorPtr(self: *Self) Allocator {
|
||||
if (T == Allocator) return self.underlying_allocator;
|
||||
return self.underlying_allocator.getAllocator();
|
||||
}
|
||||
|
||||
pub fn alloc(
|
||||
allocator: *Allocator,
|
||||
self: *Self,
|
||||
n: usize,
|
||||
ptr_align: u29,
|
||||
len_align: u29,
|
||||
@ -67,9 +69,8 @@ pub fn ValidationAllocator(comptime T: type) type {
|
||||
assert(n >= len_align);
|
||||
}
|
||||
|
||||
const self = @fieldParentPtr(@This(), "allocator", allocator);
|
||||
const underlying = self.getUnderlyingAllocatorPtr();
|
||||
const result = try underlying.allocFn(underlying, n, ptr_align, len_align, ret_addr);
|
||||
const result = try underlying.allocFn(underlying.ptr, n, ptr_align, len_align, ret_addr);
|
||||
assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align));
|
||||
if (len_align == 0) {
|
||||
assert(result.len == n);
|
||||
@ -79,8 +80,9 @@ pub fn ValidationAllocator(comptime T: type) type {
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn resize(
|
||||
allocator: *Allocator,
|
||||
self: *Self,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_len: usize,
|
||||
@ -92,9 +94,8 @@ pub fn ValidationAllocator(comptime T: type) type {
|
||||
assert(mem.isAlignedAnyAlign(new_len, len_align));
|
||||
assert(new_len >= len_align);
|
||||
}
|
||||
const self = @fieldParentPtr(@This(), "allocator", allocator);
|
||||
const underlying = self.getUnderlyingAllocatorPtr();
|
||||
const result = try underlying.resizeFn(underlying, buf, buf_align, new_len, len_align, ret_addr);
|
||||
const result = try underlying.resizeFn(underlying.ptr, buf, buf_align, new_len, len_align, ret_addr);
|
||||
if (len_align == 0) {
|
||||
assert(result == new_len);
|
||||
} else {
|
||||
@ -103,7 +104,7 @@ pub fn ValidationAllocator(comptime T: type) type {
|
||||
}
|
||||
return result;
|
||||
}
|
||||
pub usingnamespace if (T == *Allocator or !@hasDecl(T, "reset")) struct {} else struct {
|
||||
pub usingnamespace if (T == Allocator or !@hasDecl(T, "reset")) struct {} else struct {
|
||||
pub fn reset(self: *Self) void {
|
||||
self.underlying_allocator.reset();
|
||||
}
|
||||
@ -130,12 +131,14 @@ pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize {
|
||||
return adjusted;
|
||||
}
|
||||
|
||||
var failAllocator = Allocator{
|
||||
.allocFn = failAllocatorAlloc,
|
||||
.resizeFn = Allocator.noResize,
|
||||
const failAllocator = blk: {
|
||||
// TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
|
||||
// allowing the use of `*void` but it would still be ugly
|
||||
var tmp: u1 = 0;
|
||||
break :blk Allocator.init(&tmp, failAllocatorAlloc, Allocator.NoResize(u1).noResize);
|
||||
};
|
||||
fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
|
||||
_ = self;
|
||||
|
||||
fn failAllocatorAlloc(_: *u1, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
|
||||
_ = n;
|
||||
_ = alignment;
|
||||
_ = len_align;
|
||||
@ -1786,18 +1789,18 @@ pub fn SplitIterator(comptime T: type) type {
|
||||
|
||||
/// Naively combines a series of slices with a separator.
|
||||
/// Allocates memory for the result, which must be freed by the caller.
|
||||
pub fn join(allocator: *Allocator, separator: []const u8, slices: []const []const u8) ![]u8 {
|
||||
pub fn join(allocator: Allocator, separator: []const u8, slices: []const []const u8) ![]u8 {
|
||||
return joinMaybeZ(allocator, separator, slices, false);
|
||||
}
|
||||
|
||||
/// Naively combines a series of slices with a separator and null terminator.
|
||||
/// Allocates memory for the result, which must be freed by the caller.
|
||||
pub fn joinZ(allocator: *Allocator, separator: []const u8, slices: []const []const u8) ![:0]u8 {
|
||||
pub fn joinZ(allocator: Allocator, separator: []const u8, slices: []const []const u8) ![:0]u8 {
|
||||
const out = try joinMaybeZ(allocator, separator, slices, true);
|
||||
return out[0 .. out.len - 1 :0];
|
||||
}
|
||||
|
||||
fn joinMaybeZ(allocator: *Allocator, separator: []const u8, slices: []const []const u8, zero: bool) ![]u8 {
|
||||
fn joinMaybeZ(allocator: Allocator, separator: []const u8, slices: []const []const u8, zero: bool) ![]u8 {
|
||||
if (slices.len == 0) return if (zero) try allocator.dupe(u8, &[1]u8{0}) else &[0]u8{};
|
||||
|
||||
const total_len = blk: {
|
||||
@ -1876,7 +1879,7 @@ test "mem.joinZ" {
|
||||
}
|
||||
|
||||
/// Copies each T from slices into a new slice that exactly holds all the elements.
|
||||
pub fn concat(allocator: *Allocator, comptime T: type, slices: []const []const T) ![]T {
|
||||
pub fn concat(allocator: Allocator, comptime T: type, slices: []const []const T) ![]T {
|
||||
if (slices.len == 0) return &[0]T{};
|
||||
|
||||
const total_len = blk: {
|
||||
@ -2318,7 +2321,7 @@ test "replacementSize" {
|
||||
}
|
||||
|
||||
/// Perform a replacement on an allocated buffer of pre-determined size. Caller must free returned memory.
|
||||
pub fn replaceOwned(comptime T: type, allocator: *Allocator, input: []const T, needle: []const T, replacement: []const T) Allocator.Error![]T {
|
||||
pub fn replaceOwned(comptime T: type, allocator: Allocator, input: []const T, needle: []const T, replacement: []const T) Allocator.Error![]T {
|
||||
var output = try allocator.alloc(T, replacementSize(T, input, needle, replacement));
|
||||
_ = replace(T, input, needle, replacement, output);
|
||||
return output;
|
||||
|
||||
@ -8,6 +8,9 @@ const Allocator = @This();
|
||||
|
||||
pub const Error = error{OutOfMemory};
|
||||
|
||||
// The type erased pointer to the allocator implementation
|
||||
ptr: *c_void,
|
||||
|
||||
/// Attempt to allocate at least `len` bytes aligned to `ptr_align`.
|
||||
///
|
||||
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
|
||||
@ -17,7 +20,7 @@ pub const Error = error{OutOfMemory};
|
||||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
|
||||
/// If the value is `0` it means no return address has been provided.
|
||||
allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
|
||||
allocFn: fn (ptr: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
|
||||
|
||||
/// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent
|
||||
/// length returned by `allocFn` or `resizeFn`. `buf_align` must equal the same value
|
||||
@ -39,11 +42,41 @@ allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_a
|
||||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
|
||||
/// If the value is `0` it means no return address has been provided.
|
||||
resizeFn: fn (self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
|
||||
resizeFn: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
|
||||
|
||||
/// Set to resizeFn if in-place resize is not supported.
|
||||
pub fn init(
|
||||
pointer: anytype,
|
||||
comptime allocFn: fn (ptr: @TypeOf(pointer), len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
|
||||
comptime resizeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
|
||||
) Allocator {
|
||||
const Ptr = @TypeOf(pointer);
|
||||
assert(@typeInfo(Ptr) == .Pointer); // Must be a pointer
|
||||
assert(@typeInfo(Ptr).Pointer.size == .One); // Must be a single-item pointer
|
||||
const gen = struct {
|
||||
fn alloc(ptr: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
|
||||
const alignment = @typeInfo(Ptr).Pointer.alignment;
|
||||
const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
|
||||
return allocFn(self, len, ptr_align, len_align, ret_addr);
|
||||
}
|
||||
fn resize(ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize {
|
||||
const alignment = @typeInfo(Ptr).Pointer.alignment;
|
||||
const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
|
||||
return resizeFn(self, buf, buf_align, new_len, len_align, ret_addr);
|
||||
}
|
||||
};
|
||||
|
||||
return .{
|
||||
.ptr = pointer,
|
||||
.allocFn = gen.alloc,
|
||||
.resizeFn = gen.resize,
|
||||
};
|
||||
}
|
||||
|
||||
/// Set resizeFn to `NoResize(AllocatorType).noResize` if in-place resize is not supported.
|
||||
pub fn NoResize(comptime AllocatorType: type) type {
|
||||
return struct {
|
||||
pub fn noResize(
|
||||
self: *Allocator,
|
||||
self: *AllocatorType,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_len: usize,
|
||||
@ -58,6 +91,8 @@ pub fn noResize(
|
||||
return error.OutOfMemory;
|
||||
return new_len;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Realloc is used to modify the size or alignment of an existing allocation,
|
||||
/// as well as to provide the allocator with an opportunity to move an allocation
|
||||
@ -80,8 +115,8 @@ pub fn noResize(
|
||||
/// as `old_mem` was when `reallocFn` is called. The bytes of
|
||||
/// `return_value[old_mem.len..]` have undefined values.
|
||||
/// The returned slice must have its pointer aligned at least to `new_alignment` bytes.
|
||||
pub fn reallocBytes(
|
||||
self: *Allocator,
|
||||
fn reallocBytes(
|
||||
self: Allocator,
|
||||
/// Guaranteed to be the same as what was returned from most recent call to
|
||||
/// `allocFn` or `resizeFn`.
|
||||
/// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
|
||||
@ -106,7 +141,7 @@ pub fn reallocBytes(
|
||||
return_address: usize,
|
||||
) Error![]u8 {
|
||||
if (old_mem.len == 0) {
|
||||
const new_mem = try self.allocFn(self, new_byte_count, new_alignment, len_align, return_address);
|
||||
const new_mem = try self.allocFn(self.ptr, new_byte_count, new_alignment, len_align, return_address);
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(new_mem.ptr, undefined, new_byte_count);
|
||||
return new_mem;
|
||||
@ -117,7 +152,7 @@ pub fn reallocBytes(
|
||||
const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align, return_address);
|
||||
return old_mem.ptr[0..shrunk_len];
|
||||
}
|
||||
if (self.resizeFn(self, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
|
||||
if (self.resizeFn(self.ptr, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
|
||||
assert(resized_len >= new_byte_count);
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
|
||||
@ -133,7 +168,7 @@ pub fn reallocBytes(
|
||||
/// Move the given memory to a new location in the given allocator to accomodate a new
|
||||
/// size and alignment.
|
||||
fn moveBytes(
|
||||
self: *Allocator,
|
||||
self: Allocator,
|
||||
old_mem: []u8,
|
||||
old_align: u29,
|
||||
new_len: usize,
|
||||
@ -143,7 +178,7 @@ fn moveBytes(
|
||||
) Error![]u8 {
|
||||
assert(old_mem.len > 0);
|
||||
assert(new_len > 0);
|
||||
const new_mem = try self.allocFn(self, new_len, new_alignment, len_align, return_address);
|
||||
const new_mem = try self.allocFn(self.ptr, new_len, new_alignment, len_align, return_address);
|
||||
@memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len));
|
||||
// TODO https://github.com/ziglang/zig/issues/4298
|
||||
@memset(old_mem.ptr, undefined, old_mem.len);
|
||||
@ -153,7 +188,7 @@ fn moveBytes(
|
||||
|
||||
/// Returns a pointer to undefined memory.
|
||||
/// Call `destroy` with the result to free the memory.
|
||||
pub fn create(self: *Allocator, comptime T: type) Error!*T {
|
||||
pub fn create(self: Allocator, comptime T: type) Error!*T {
|
||||
if (@sizeOf(T) == 0) return @as(*T, undefined);
|
||||
const slice = try self.allocAdvancedWithRetAddr(T, null, 1, .exact, @returnAddress());
|
||||
return &slice[0];
|
||||
@ -161,7 +196,7 @@ pub fn create(self: *Allocator, comptime T: type) Error!*T {
|
||||
|
||||
/// `ptr` should be the return value of `create`, or otherwise
|
||||
/// have the same address and alignment property.
|
||||
pub fn destroy(self: *Allocator, ptr: anytype) void {
|
||||
pub fn destroy(self: Allocator, ptr: anytype) void {
|
||||
const info = @typeInfo(@TypeOf(ptr)).Pointer;
|
||||
const T = info.child;
|
||||
if (@sizeOf(T) == 0) return;
|
||||
@ -177,12 +212,12 @@ pub fn destroy(self: *Allocator, ptr: anytype) void {
|
||||
/// call `free` when done.
|
||||
///
|
||||
/// For allocating a single item, see `create`.
|
||||
pub fn alloc(self: *Allocator, comptime T: type, n: usize) Error![]T {
|
||||
pub fn alloc(self: Allocator, comptime T: type, n: usize) Error![]T {
|
||||
return self.allocAdvancedWithRetAddr(T, null, n, .exact, @returnAddress());
|
||||
}
|
||||
|
||||
pub fn allocWithOptions(
|
||||
self: *Allocator,
|
||||
self: Allocator,
|
||||
comptime Elem: type,
|
||||
n: usize,
|
||||
/// null means naturally aligned
|
||||
@ -193,7 +228,7 @@ pub fn allocWithOptions(
|
||||
}
|
||||
|
||||
pub fn allocWithOptionsRetAddr(
|
||||
self: *Allocator,
|
||||
self: Allocator,
|
||||
comptime Elem: type,
|
||||
n: usize,
|
||||
/// null means naturally aligned
|
||||
@ -227,7 +262,7 @@ fn AllocWithOptionsPayload(comptime Elem: type, comptime alignment: ?u29, compti
|
||||
///
|
||||
/// For allocating a single item, see `create`.
|
||||
pub fn allocSentinel(
|
||||
self: *Allocator,
|
||||
self: Allocator,
|
||||
comptime Elem: type,
|
||||
n: usize,
|
||||
comptime sentinel: Elem,
|
||||
@ -236,7 +271,7 @@ pub fn allocSentinel(
|
||||
}
|
||||
|
||||
pub fn alignedAlloc(
|
||||
self: *Allocator,
|
||||
self: Allocator,
|
||||
comptime T: type,
|
||||
/// null means naturally aligned
|
||||
comptime alignment: ?u29,
|
||||
@ -246,7 +281,7 @@ pub fn alignedAlloc(
|
||||
}
|
||||
|
||||
pub fn allocAdvanced(
|
||||
self: *Allocator,
|
||||
self: Allocator,
|
||||
comptime T: type,
|
||||
/// null means naturally aligned
|
||||
comptime alignment: ?u29,
|
||||
@ -259,7 +294,7 @@ pub fn allocAdvanced(
|
||||
pub const Exact = enum { exact, at_least };
|
||||
|
||||
pub fn allocAdvancedWithRetAddr(
|
||||
self: *Allocator,
|
||||
self: Allocator,
|
||||
comptime T: type,
|
||||
/// null means naturally aligned
|
||||
comptime alignment: ?u29,
|
||||
@ -285,7 +320,7 @@ pub fn allocAdvancedWithRetAddr(
|
||||
.exact => 0,
|
||||
.at_least => size_of_T,
|
||||
};
|
||||
const byte_slice = try self.allocFn(self, byte_count, a, len_align, return_address);
|
||||
const byte_slice = try self.allocFn(self.ptr, byte_count, a, len_align, return_address);
|
||||
switch (exact) {
|
||||
.exact => assert(byte_slice.len == byte_count),
|
||||
.at_least => assert(byte_slice.len >= byte_count),
|
||||
@ -301,7 +336,7 @@ pub fn allocAdvancedWithRetAddr(
|
||||
}
|
||||
|
||||
/// Increases or decreases the size of an allocation. It is guaranteed to not move the pointer.
|
||||
pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old_mem) {
|
||||
pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old_mem) {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
const T = Slice.child;
|
||||
if (new_n == 0) {
|
||||
@ -310,7 +345,7 @@ pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(ol
|
||||
}
|
||||
const old_byte_slice = mem.sliceAsBytes(old_mem);
|
||||
const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
|
||||
const rc = try self.resizeFn(self, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
|
||||
const rc = try self.resizeFn(self.ptr, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
|
||||
assert(rc == new_byte_count);
|
||||
const new_byte_slice = old_byte_slice.ptr[0..new_byte_count];
|
||||
return mem.bytesAsSlice(T, new_byte_slice);
|
||||
@ -326,7 +361,7 @@ pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(ol
|
||||
/// in `std.ArrayList.shrink`.
|
||||
/// If you need guaranteed success, call `shrink`.
|
||||
/// If `new_n` is 0, this is the same as `free` and it always succeeds.
|
||||
pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: {
|
||||
pub fn realloc(self: Allocator, old_mem: anytype, new_n: usize) t: {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
break :t Error![]align(Slice.alignment) Slice.child;
|
||||
} {
|
||||
@ -334,7 +369,7 @@ pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: {
|
||||
return self.reallocAdvancedWithRetAddr(old_mem, old_alignment, new_n, .exact, @returnAddress());
|
||||
}
|
||||
|
||||
pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: {
|
||||
pub fn reallocAtLeast(self: Allocator, old_mem: anytype, new_n: usize) t: {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
break :t Error![]align(Slice.alignment) Slice.child;
|
||||
} {
|
||||
@ -346,7 +381,7 @@ pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: {
|
||||
/// a new alignment, which can be larger, smaller, or the same as the old
|
||||
/// allocation.
|
||||
pub fn reallocAdvanced(
|
||||
self: *Allocator,
|
||||
self: Allocator,
|
||||
old_mem: anytype,
|
||||
comptime new_alignment: u29,
|
||||
new_n: usize,
|
||||
@ -356,7 +391,7 @@ pub fn reallocAdvanced(
|
||||
}
|
||||
|
||||
pub fn reallocAdvancedWithRetAddr(
|
||||
self: *Allocator,
|
||||
self: Allocator,
|
||||
old_mem: anytype,
|
||||
comptime new_alignment: u29,
|
||||
new_n: usize,
|
||||
@ -389,7 +424,7 @@ pub fn reallocAdvancedWithRetAddr(
|
||||
/// Shrink always succeeds, and `new_n` must be <= `old_mem.len`.
|
||||
/// Returned slice has same alignment as old_mem.
|
||||
/// Shrinking to 0 is the same as calling `free`.
|
||||
pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: {
|
||||
pub fn shrink(self: Allocator, old_mem: anytype, new_n: usize) t: {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
break :t []align(Slice.alignment) Slice.child;
|
||||
} {
|
||||
@ -401,7 +436,7 @@ pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: {
|
||||
/// a new alignment, which must be smaller or the same as the old
|
||||
/// allocation.
|
||||
pub fn alignedShrink(
|
||||
self: *Allocator,
|
||||
self: Allocator,
|
||||
old_mem: anytype,
|
||||
comptime new_alignment: u29,
|
||||
new_n: usize,
|
||||
@ -413,7 +448,7 @@ pub fn alignedShrink(
|
||||
/// the return address of the first stack frame, which may be relevant for
|
||||
/// allocators which collect stack traces.
|
||||
pub fn alignedShrinkWithRetAddr(
|
||||
self: *Allocator,
|
||||
self: Allocator,
|
||||
old_mem: anytype,
|
||||
comptime new_alignment: u29,
|
||||
new_n: usize,
|
||||
@ -440,7 +475,7 @@ pub fn alignedShrinkWithRetAddr(
|
||||
|
||||
/// Free an array allocated with `alloc`. To free a single item,
|
||||
/// see `destroy`.
|
||||
pub fn free(self: *Allocator, memory: anytype) void {
|
||||
pub fn free(self: Allocator, memory: anytype) void {
|
||||
const Slice = @typeInfo(@TypeOf(memory)).Pointer;
|
||||
const bytes = mem.sliceAsBytes(memory);
|
||||
const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0;
|
||||
@ -452,14 +487,14 @@ pub fn free(self: *Allocator, memory: anytype) void {
|
||||
}
|
||||
|
||||
/// Copies `m` to newly allocated memory. Caller owns the memory.
|
||||
pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T {
|
||||
pub fn dupe(allocator: Allocator, comptime T: type, m: []const T) ![]T {
|
||||
const new_buf = try allocator.alloc(T, m.len);
|
||||
mem.copy(T, new_buf, m);
|
||||
return new_buf;
|
||||
}
|
||||
|
||||
/// Copies `m` to newly allocated memory, with a null-terminated element. Caller owns the memory.
|
||||
pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T {
|
||||
pub fn dupeZ(allocator: Allocator, comptime T: type, m: []const T) ![:0]T {
|
||||
const new_buf = try allocator.alloc(T, m.len + 1);
|
||||
mem.copy(T, new_buf, m);
|
||||
new_buf[m.len] = 0;
|
||||
@ -471,7 +506,7 @@ pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T {
|
||||
/// This function allows a runtime `buf_align` value. Callers should generally prefer
|
||||
/// to call `shrink` directly.
|
||||
pub fn shrinkBytes(
|
||||
self: *Allocator,
|
||||
self: Allocator,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_len: usize,
|
||||
@ -479,5 +514,5 @@ pub fn shrinkBytes(
|
||||
return_address: usize,
|
||||
) usize {
|
||||
assert(new_len <= buf.len);
|
||||
return self.resizeFn(self, buf, buf_align, new_len, len_align, return_address) catch unreachable;
|
||||
return self.resizeFn(self.ptr, buf, buf_align, new_len, len_align, return_address) catch unreachable;
|
||||
}
|
||||
|
||||
@ -59,7 +59,7 @@ pub fn MultiArrayList(comptime S: type) type {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Slice, gpa: *Allocator) void {
|
||||
pub fn deinit(self: *Slice, gpa: Allocator) void {
|
||||
var other = self.toMultiArrayList();
|
||||
other.deinit(gpa);
|
||||
self.* = undefined;
|
||||
@ -106,7 +106,7 @@ pub fn MultiArrayList(comptime S: type) type {
|
||||
};
|
||||
|
||||
/// Release all allocated memory.
|
||||
pub fn deinit(self: *Self, gpa: *Allocator) void {
|
||||
pub fn deinit(self: *Self, gpa: Allocator) void {
|
||||
gpa.free(self.allocatedBytes());
|
||||
self.* = undefined;
|
||||
}
|
||||
@ -161,7 +161,7 @@ pub fn MultiArrayList(comptime S: type) type {
|
||||
}
|
||||
|
||||
/// Extend the list by 1 element. Allocates more memory as necessary.
|
||||
pub fn append(self: *Self, gpa: *Allocator, elem: S) !void {
|
||||
pub fn append(self: *Self, gpa: Allocator, elem: S) !void {
|
||||
try self.ensureUnusedCapacity(gpa, 1);
|
||||
self.appendAssumeCapacity(elem);
|
||||
}
|
||||
@ -188,7 +188,7 @@ pub fn MultiArrayList(comptime S: type) type {
|
||||
/// after and including the specified index back by one and
|
||||
/// sets the given index to the specified element. May reallocate
|
||||
/// and invalidate iterators.
|
||||
pub fn insert(self: *Self, gpa: *Allocator, index: usize, elem: S) void {
|
||||
pub fn insert(self: *Self, gpa: Allocator, index: usize, elem: S) void {
|
||||
try self.ensureUnusedCapacity(gpa, 1);
|
||||
self.insertAssumeCapacity(index, elem);
|
||||
}
|
||||
@ -242,7 +242,7 @@ pub fn MultiArrayList(comptime S: type) type {
|
||||
|
||||
/// Adjust the list's length to `new_len`.
|
||||
/// Does not initialize added items, if any.
|
||||
pub fn resize(self: *Self, gpa: *Allocator, new_len: usize) !void {
|
||||
pub fn resize(self: *Self, gpa: Allocator, new_len: usize) !void {
|
||||
try self.ensureTotalCapacity(gpa, new_len);
|
||||
self.len = new_len;
|
||||
}
|
||||
@ -250,7 +250,7 @@ pub fn MultiArrayList(comptime S: type) type {
|
||||
/// Attempt to reduce allocated capacity to `new_len`.
|
||||
/// If `new_len` is greater than zero, this may fail to reduce the capacity,
|
||||
/// but the data remains intact and the length is updated to new_len.
|
||||
pub fn shrinkAndFree(self: *Self, gpa: *Allocator, new_len: usize) void {
|
||||
pub fn shrinkAndFree(self: *Self, gpa: Allocator, new_len: usize) void {
|
||||
if (new_len == 0) {
|
||||
gpa.free(self.allocatedBytes());
|
||||
self.* = .{};
|
||||
@ -314,7 +314,7 @@ pub fn MultiArrayList(comptime S: type) type {
|
||||
/// Modify the array so that it can hold at least `new_capacity` items.
|
||||
/// Implements super-linear growth to achieve amortized O(1) append operations.
|
||||
/// Invalidates pointers if additional memory is needed.
|
||||
pub fn ensureTotalCapacity(self: *Self, gpa: *Allocator, new_capacity: usize) !void {
|
||||
pub fn ensureTotalCapacity(self: *Self, gpa: Allocator, new_capacity: usize) !void {
|
||||
var better_capacity = self.capacity;
|
||||
if (better_capacity >= new_capacity) return;
|
||||
|
||||
@ -328,14 +328,14 @@ pub fn MultiArrayList(comptime S: type) type {
|
||||
|
||||
/// Modify the array so that it can hold at least `additional_count` **more** items.
|
||||
/// Invalidates pointers if additional memory is needed.
|
||||
pub fn ensureUnusedCapacity(self: *Self, gpa: *Allocator, additional_count: usize) !void {
|
||||
pub fn ensureUnusedCapacity(self: *Self, gpa: Allocator, additional_count: usize) !void {
|
||||
return self.ensureTotalCapacity(gpa, self.len + additional_count);
|
||||
}
|
||||
|
||||
/// Modify the array so that it can hold exactly `new_capacity` items.
|
||||
/// Invalidates pointers if additional memory is needed.
|
||||
/// `new_capacity` must be greater or equal to `len`.
|
||||
pub fn setCapacity(self: *Self, gpa: *Allocator, new_capacity: usize) !void {
|
||||
pub fn setCapacity(self: *Self, gpa: Allocator, new_capacity: usize) !void {
|
||||
assert(new_capacity >= self.len);
|
||||
const new_bytes = try gpa.allocAdvanced(
|
||||
u8,
|
||||
@ -372,7 +372,7 @@ pub fn MultiArrayList(comptime S: type) type {
|
||||
|
||||
/// Create a copy of this list with a new backing store,
|
||||
/// using the specified allocator.
|
||||
pub fn clone(self: Self, gpa: *Allocator) !Self {
|
||||
pub fn clone(self: Self, gpa: Allocator) !Self {
|
||||
var result = Self{};
|
||||
errdefer result.deinit(gpa);
|
||||
try result.ensureTotalCapacity(gpa, self.len);
|
||||
|
||||
@ -664,7 +664,7 @@ pub const AddressList = struct {
|
||||
};
|
||||
|
||||
/// All memory allocated with `allocator` will be freed before this function returns.
|
||||
pub fn tcpConnectToHost(allocator: *mem.Allocator, name: []const u8, port: u16) !Stream {
|
||||
pub fn tcpConnectToHost(allocator: mem.Allocator, name: []const u8, port: u16) !Stream {
|
||||
const list = try getAddressList(allocator, name, port);
|
||||
defer list.deinit();
|
||||
|
||||
@ -699,12 +699,12 @@ pub fn tcpConnectToAddress(address: Address) !Stream {
|
||||
}
|
||||
|
||||
/// Call `AddressList.deinit` on the result.
|
||||
pub fn getAddressList(allocator: *mem.Allocator, name: []const u8, port: u16) !*AddressList {
|
||||
pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) !*AddressList {
|
||||
const result = blk: {
|
||||
var arena = std.heap.ArenaAllocator.init(allocator);
|
||||
errdefer arena.deinit();
|
||||
|
||||
const result = try arena.allocator.create(AddressList);
|
||||
const result = try arena.getAllocator().create(AddressList);
|
||||
result.* = AddressList{
|
||||
.arena = arena,
|
||||
.addrs = undefined,
|
||||
@ -712,7 +712,7 @@ pub fn getAddressList(allocator: *mem.Allocator, name: []const u8, port: u16) !*
|
||||
};
|
||||
break :blk result;
|
||||
};
|
||||
const arena = &result.arena.allocator;
|
||||
const arena = result.arena.getAllocator();
|
||||
errdefer result.arena.deinit();
|
||||
|
||||
if (builtin.target.os.tag == .windows or builtin.link_libc) {
|
||||
@ -1303,7 +1303,7 @@ const ResolvConf = struct {
|
||||
|
||||
/// Ignores lines longer than 512 bytes.
|
||||
/// TODO: https://github.com/ziglang/zig/issues/2765 and https://github.com/ziglang/zig/issues/2761
|
||||
fn getResolvConf(allocator: *mem.Allocator, rc: *ResolvConf) !void {
|
||||
fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void {
|
||||
rc.* = ResolvConf{
|
||||
.ns = std.ArrayList(LookupAddr).init(allocator),
|
||||
.search = std.ArrayList(u8).init(allocator),
|
||||
|
||||
@ -230,7 +230,7 @@ test "listen on ipv4 try connect on ipv6 then ipv4" {
|
||||
try await client_frame;
|
||||
}
|
||||
|
||||
fn testClientToHost(allocator: *mem.Allocator, name: []const u8, port: u16) anyerror!void {
|
||||
fn testClientToHost(allocator: mem.Allocator, name: []const u8, port: u16) anyerror!void {
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
|
||||
const connection = try net.tcpConnectToHost(allocator, name, port);
|
||||
|
||||
@ -58,10 +58,11 @@ test "open smoke test" {
|
||||
// Get base abs path
|
||||
var arena = ArenaAllocator.init(testing.allocator);
|
||||
defer arena.deinit();
|
||||
const allocator = arena.getAllocator();
|
||||
|
||||
const base_path = blk: {
|
||||
const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
|
||||
break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
|
||||
const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
|
||||
break :blk try fs.realpathAlloc(allocator, relative_path);
|
||||
};
|
||||
|
||||
var file_path: []u8 = undefined;
|
||||
@ -69,34 +70,34 @@ test "open smoke test" {
|
||||
const mode: os.mode_t = if (native_os == .windows) 0 else 0o666;
|
||||
|
||||
// Create some file using `open`.
|
||||
file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" });
|
||||
file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" });
|
||||
fd = try os.open(file_path, os.O.RDWR | os.O.CREAT | os.O.EXCL, mode);
|
||||
os.close(fd);
|
||||
|
||||
// Try this again with the same flags. This op should fail with error.PathAlreadyExists.
|
||||
file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" });
|
||||
file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" });
|
||||
try expectError(error.PathAlreadyExists, os.open(file_path, os.O.RDWR | os.O.CREAT | os.O.EXCL, mode));
|
||||
|
||||
// Try opening without `O.EXCL` flag.
|
||||
file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" });
|
||||
file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" });
|
||||
fd = try os.open(file_path, os.O.RDWR | os.O.CREAT, mode);
|
||||
os.close(fd);
|
||||
|
||||
// Try opening as a directory which should fail.
|
||||
file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" });
|
||||
file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" });
|
||||
try expectError(error.NotDir, os.open(file_path, os.O.RDWR | os.O.DIRECTORY, mode));
|
||||
|
||||
// Create some directory
|
||||
file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" });
|
||||
file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" });
|
||||
try os.mkdir(file_path, mode);
|
||||
|
||||
// Open dir using `open`
|
||||
file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" });
|
||||
file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" });
|
||||
fd = try os.open(file_path, os.O.RDONLY | os.O.DIRECTORY, mode);
|
||||
os.close(fd);
|
||||
|
||||
// Try opening as file which should fail.
|
||||
file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" });
|
||||
file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" });
|
||||
try expectError(error.IsDir, os.open(file_path, os.O.RDWR, mode));
|
||||
}
|
||||
|
||||
|
||||
@ -460,7 +460,7 @@ pub const PDBStringTableHeader = packed struct {
|
||||
ByteSize: u32,
|
||||
};
|
||||
|
||||
fn readSparseBitVector(stream: anytype, allocator: *mem.Allocator) ![]u32 {
|
||||
fn readSparseBitVector(stream: anytype, allocator: mem.Allocator) ![]u32 {
|
||||
const num_words = try stream.readIntLittle(u32);
|
||||
var list = ArrayList(u32).init(allocator);
|
||||
errdefer list.deinit();
|
||||
@ -481,7 +481,7 @@ fn readSparseBitVector(stream: anytype, allocator: *mem.Allocator) ![]u32 {
|
||||
pub const Pdb = struct {
|
||||
in_file: File,
|
||||
msf: Msf,
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
string_table: ?*MsfStream,
|
||||
dbi: ?*MsfStream,
|
||||
modules: []Module,
|
||||
@ -500,7 +500,7 @@ pub const Pdb = struct {
|
||||
checksum_offset: ?usize,
|
||||
};
|
||||
|
||||
pub fn init(allocator: *mem.Allocator, path: []const u8) !Pdb {
|
||||
pub fn init(allocator: mem.Allocator, path: []const u8) !Pdb {
|
||||
const file = try fs.cwd().openFile(path, .{ .intended_io_mode = .blocking });
|
||||
errdefer file.close();
|
||||
|
||||
@ -858,7 +858,7 @@ const Msf = struct {
|
||||
directory: MsfStream,
|
||||
streams: []MsfStream,
|
||||
|
||||
fn init(allocator: *mem.Allocator, file: File) !Msf {
|
||||
fn init(allocator: mem.Allocator, file: File) !Msf {
|
||||
const in = file.reader();
|
||||
|
||||
const superblock = try in.readStruct(SuperBlock);
|
||||
|
||||
@ -21,10 +21,10 @@ pub fn PriorityDequeue(comptime T: type, comptime compareFn: fn (T, T) Order) ty
|
||||
|
||||
items: []T,
|
||||
len: usize,
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
|
||||
/// Initialize and return a new priority dequeue.
|
||||
pub fn init(allocator: *Allocator) Self {
|
||||
pub fn init(allocator: Allocator) Self {
|
||||
return Self{
|
||||
.items = &[_]T{},
|
||||
.len = 0,
|
||||
@ -336,7 +336,7 @@ pub fn PriorityDequeue(comptime T: type, comptime compareFn: fn (T, T) Order) ty
|
||||
/// Dequeue takes ownership of the passed in slice. The slice must have been
|
||||
/// allocated with `allocator`.
|
||||
/// De-initialize with `deinit`.
|
||||
pub fn fromOwnedSlice(allocator: *Allocator, items: []T) Self {
|
||||
pub fn fromOwnedSlice(allocator: Allocator, items: []T) Self {
|
||||
var queue = Self{
|
||||
.items = items,
|
||||
.len = items.len,
|
||||
@ -945,7 +945,7 @@ fn fuzzTestMinMax(rng: std.rand.Random, queue_size: usize) !void {
|
||||
}
|
||||
}
|
||||
|
||||
fn generateRandomSlice(allocator: *std.mem.Allocator, rng: std.rand.Random, size: usize) ![]u32 {
|
||||
fn generateRandomSlice(allocator: std.mem.Allocator, rng: std.rand.Random, size: usize) ![]u32 {
|
||||
var array = std.ArrayList(u32).init(allocator);
|
||||
try array.ensureTotalCapacity(size);
|
||||
|
||||
|
||||
@ -20,10 +20,10 @@ pub fn PriorityQueue(comptime T: type, comptime compareFn: fn (a: T, b: T) Order
|
||||
|
||||
items: []T,
|
||||
len: usize,
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
|
||||
/// Initialize and return a priority queue.
|
||||
pub fn init(allocator: *Allocator) Self {
|
||||
pub fn init(allocator: Allocator) Self {
|
||||
return Self{
|
||||
.items = &[_]T{},
|
||||
.len = 0,
|
||||
@ -153,7 +153,7 @@ pub fn PriorityQueue(comptime T: type, comptime compareFn: fn (a: T, b: T) Order
|
||||
/// PriorityQueue takes ownership of the passed in slice. The slice must have been
|
||||
/// allocated with `allocator`.
|
||||
/// Deinitialize with `deinit`.
|
||||
pub fn fromOwnedSlice(allocator: *Allocator, items: []T) Self {
|
||||
pub fn fromOwnedSlice(allocator: Allocator, items: []T) Self {
|
||||
var queue = Self{
|
||||
.items = items,
|
||||
.len = items.len,
|
||||
|
||||
@ -21,7 +21,7 @@ pub fn getCwd(out_buffer: []u8) ![]u8 {
|
||||
}
|
||||
|
||||
/// Caller must free the returned memory.
|
||||
pub fn getCwdAlloc(allocator: *Allocator) ![]u8 {
|
||||
pub fn getCwdAlloc(allocator: Allocator) ![]u8 {
|
||||
// The use of MAX_PATH_BYTES here is just a heuristic: most paths will fit
|
||||
// in stack_buf, avoiding an extra allocation in the common case.
|
||||
var stack_buf: [fs.MAX_PATH_BYTES]u8 = undefined;
|
||||
@ -54,7 +54,7 @@ test "getCwdAlloc" {
|
||||
}
|
||||
|
||||
/// Caller owns resulting `BufMap`.
|
||||
pub fn getEnvMap(allocator: *Allocator) !BufMap {
|
||||
pub fn getEnvMap(allocator: Allocator) !BufMap {
|
||||
var result = BufMap.init(allocator);
|
||||
errdefer result.deinit();
|
||||
|
||||
@ -154,7 +154,7 @@ pub const GetEnvVarOwnedError = error{
|
||||
};
|
||||
|
||||
/// Caller must free returned memory.
|
||||
pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) GetEnvVarOwnedError![]u8 {
|
||||
pub fn getEnvVarOwned(allocator: mem.Allocator, key: []const u8) GetEnvVarOwnedError![]u8 {
|
||||
if (builtin.os.tag == .windows) {
|
||||
const result_w = blk: {
|
||||
const key_w = try std.unicode.utf8ToUtf16LeWithNull(allocator, key);
|
||||
@ -183,10 +183,10 @@ pub fn hasEnvVarConstant(comptime key: []const u8) bool {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hasEnvVar(allocator: *Allocator, key: []const u8) error{OutOfMemory}!bool {
|
||||
pub fn hasEnvVar(allocator: Allocator, key: []const u8) error{OutOfMemory}!bool {
|
||||
if (builtin.os.tag == .windows) {
|
||||
var stack_alloc = std.heap.stackFallback(256 * @sizeOf(u16), allocator);
|
||||
const key_w = try std.unicode.utf8ToUtf16LeWithNull(&stack_alloc.allocator, key);
|
||||
const key_w = try std.unicode.utf8ToUtf16LeWithNull(stack_alloc.get(), key);
|
||||
defer stack_alloc.allocator.free(key_w);
|
||||
return std.os.getenvW(key_w) != null;
|
||||
} else {
|
||||
@ -227,7 +227,7 @@ pub const ArgIteratorPosix = struct {
|
||||
};
|
||||
|
||||
pub const ArgIteratorWasi = struct {
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
index: usize,
|
||||
args: [][:0]u8,
|
||||
|
||||
@ -235,7 +235,7 @@ pub const ArgIteratorWasi = struct {
|
||||
|
||||
/// You must call deinit to free the internal buffer of the
|
||||
/// iterator after you are done.
|
||||
pub fn init(allocator: *mem.Allocator) InitError!ArgIteratorWasi {
|
||||
pub fn init(allocator: mem.Allocator) InitError!ArgIteratorWasi {
|
||||
const fetched_args = try ArgIteratorWasi.internalInit(allocator);
|
||||
return ArgIteratorWasi{
|
||||
.allocator = allocator,
|
||||
@ -244,7 +244,7 @@ pub const ArgIteratorWasi = struct {
|
||||
};
|
||||
}
|
||||
|
||||
fn internalInit(allocator: *mem.Allocator) InitError![][:0]u8 {
|
||||
fn internalInit(allocator: mem.Allocator) InitError![][:0]u8 {
|
||||
const w = os.wasi;
|
||||
var count: usize = undefined;
|
||||
var buf_size: usize = undefined;
|
||||
@ -325,7 +325,7 @@ pub const ArgIteratorWindows = struct {
|
||||
}
|
||||
|
||||
/// You must free the returned memory when done.
|
||||
pub fn next(self: *ArgIteratorWindows, allocator: *Allocator) ?(NextError![:0]u8) {
|
||||
pub fn next(self: *ArgIteratorWindows, allocator: Allocator) ?(NextError![:0]u8) {
|
||||
// march forward over whitespace
|
||||
while (true) : (self.index += 1) {
|
||||
const character = self.getPointAtIndex();
|
||||
@ -379,7 +379,7 @@ pub const ArgIteratorWindows = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn internalNext(self: *ArgIteratorWindows, allocator: *Allocator) NextError![:0]u8 {
|
||||
fn internalNext(self: *ArgIteratorWindows, allocator: Allocator) NextError![:0]u8 {
|
||||
var buf = std.ArrayList(u16).init(allocator);
|
||||
defer buf.deinit();
|
||||
|
||||
@ -423,7 +423,7 @@ pub const ArgIteratorWindows = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn convertFromWindowsCmdLineToUTF8(allocator: *Allocator, buf: []u16) NextError![:0]u8 {
|
||||
fn convertFromWindowsCmdLineToUTF8(allocator: Allocator, buf: []u16) NextError![:0]u8 {
|
||||
return std.unicode.utf16leToUtf8AllocZ(allocator, buf) catch |err| switch (err) {
|
||||
error.ExpectedSecondSurrogateHalf,
|
||||
error.DanglingSurrogateHalf,
|
||||
@ -463,7 +463,7 @@ pub const ArgIterator = struct {
|
||||
pub const InitError = ArgIteratorWasi.InitError;
|
||||
|
||||
/// You must deinitialize iterator's internal buffers by calling `deinit` when done.
|
||||
pub fn initWithAllocator(allocator: *mem.Allocator) InitError!ArgIterator {
|
||||
pub fn initWithAllocator(allocator: mem.Allocator) InitError!ArgIterator {
|
||||
if (builtin.os.tag == .wasi and !builtin.link_libc) {
|
||||
return ArgIterator{ .inner = try InnerType.init(allocator) };
|
||||
}
|
||||
@ -474,7 +474,7 @@ pub const ArgIterator = struct {
|
||||
pub const NextError = ArgIteratorWindows.NextError;
|
||||
|
||||
/// You must free the returned memory when done.
|
||||
pub fn next(self: *ArgIterator, allocator: *Allocator) ?(NextError![:0]u8) {
|
||||
pub fn next(self: *ArgIterator, allocator: Allocator) ?(NextError![:0]u8) {
|
||||
if (builtin.os.tag == .windows) {
|
||||
return self.inner.next(allocator);
|
||||
} else {
|
||||
@ -513,7 +513,7 @@ pub fn args() ArgIterator {
|
||||
}
|
||||
|
||||
/// You must deinitialize iterator's internal buffers by calling `deinit` when done.
|
||||
pub fn argsWithAllocator(allocator: *mem.Allocator) ArgIterator.InitError!ArgIterator {
|
||||
pub fn argsWithAllocator(allocator: mem.Allocator) ArgIterator.InitError!ArgIterator {
|
||||
return ArgIterator.initWithAllocator(allocator);
|
||||
}
|
||||
|
||||
@ -539,7 +539,7 @@ test "args iterator" {
|
||||
}
|
||||
|
||||
/// Caller must call argsFree on result.
|
||||
pub fn argsAlloc(allocator: *mem.Allocator) ![][:0]u8 {
|
||||
pub fn argsAlloc(allocator: mem.Allocator) ![][:0]u8 {
|
||||
// TODO refactor to only make 1 allocation.
|
||||
var it = if (builtin.os.tag == .wasi) try argsWithAllocator(allocator) else args();
|
||||
defer it.deinit();
|
||||
@ -579,7 +579,7 @@ pub fn argsAlloc(allocator: *mem.Allocator) ![][:0]u8 {
|
||||
return result_slice_list;
|
||||
}
|
||||
|
||||
pub fn argsFree(allocator: *mem.Allocator, args_alloc: []const [:0]u8) void {
|
||||
pub fn argsFree(allocator: mem.Allocator, args_alloc: []const [:0]u8) void {
|
||||
var total_bytes: usize = 0;
|
||||
for (args_alloc) |arg| {
|
||||
total_bytes += @sizeOf([]u8) + arg.len + 1;
|
||||
@ -741,7 +741,7 @@ pub fn getBaseAddress() usize {
|
||||
/// requirement from `std.zig.system.NativeTargetInfo.detect`. Most likely this will require
|
||||
/// introducing a new, lower-level function which takes a callback function, and then this
|
||||
/// function which takes an allocator can exist on top of it.
|
||||
pub fn getSelfExeSharedLibPaths(allocator: *Allocator) error{OutOfMemory}![][:0]u8 {
|
||||
pub fn getSelfExeSharedLibPaths(allocator: Allocator) error{OutOfMemory}![][:0]u8 {
|
||||
switch (builtin.link_mode) {
|
||||
.Static => return &[_][:0]u8{},
|
||||
.Dynamic => {},
|
||||
@ -833,7 +833,7 @@ pub const ExecvError = std.os.ExecveError || error{OutOfMemory};
|
||||
/// This function also uses the PATH environment variable to get the full path to the executable.
|
||||
/// Due to the heap-allocation, it is illegal to call this function in a fork() child.
|
||||
/// For that use case, use the `std.os` functions directly.
|
||||
pub fn execv(allocator: *mem.Allocator, argv: []const []const u8) ExecvError {
|
||||
pub fn execv(allocator: mem.Allocator, argv: []const []const u8) ExecvError {
|
||||
return execve(allocator, argv, null);
|
||||
}
|
||||
|
||||
@ -846,7 +846,7 @@ pub fn execv(allocator: *mem.Allocator, argv: []const []const u8) ExecvError {
|
||||
/// Due to the heap-allocation, it is illegal to call this function in a fork() child.
|
||||
/// For that use case, use the `std.os` functions directly.
|
||||
pub fn execve(
|
||||
allocator: *mem.Allocator,
|
||||
allocator: mem.Allocator,
|
||||
argv: []const []const u8,
|
||||
env_map: ?*const std.BufMap,
|
||||
) ExecvError {
|
||||
@ -854,7 +854,7 @@ pub fn execve(
|
||||
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(allocator);
|
||||
defer arena_allocator.deinit();
|
||||
const arena = &arena_allocator.allocator;
|
||||
const arena = arena_allocator.getAllocator();
|
||||
|
||||
const argv_buf = try arena.allocSentinel(?[*:0]u8, argv.len, null);
|
||||
for (argv) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
|
||||
|
||||
@ -16,7 +16,7 @@ pub fn main() !void {
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena.deinit();
|
||||
|
||||
const allocator = &arena.allocator;
|
||||
const allocator = arena.getAllocator();
|
||||
var args = try process.argsAlloc(allocator);
|
||||
defer process.argsFree(allocator, args);
|
||||
|
||||
|
||||
@ -10,7 +10,7 @@ var args_buffer: [std.fs.MAX_PATH_BYTES + std.mem.page_size]u8 = undefined;
|
||||
var args_allocator = std.heap.FixedBufferAllocator.init(&args_buffer);
|
||||
|
||||
fn processArgs() void {
|
||||
const args = std.process.argsAlloc(&args_allocator.allocator) catch {
|
||||
const args = std.process.argsAlloc(args_allocator.getAllocator()) catch {
|
||||
@panic("Too many bytes passed over the CLI to the test runner");
|
||||
};
|
||||
if (args.len != 2) {
|
||||
|
||||
@ -1323,15 +1323,15 @@ pub const Target = struct {
|
||||
|
||||
pub const stack_align = 16;
|
||||
|
||||
pub fn zigTriple(self: Target, allocator: *mem.Allocator) ![]u8 {
|
||||
pub fn zigTriple(self: Target, allocator: mem.Allocator) ![]u8 {
|
||||
return std.zig.CrossTarget.fromTarget(self).zigTriple(allocator);
|
||||
}
|
||||
|
||||
pub fn linuxTripleSimple(allocator: *mem.Allocator, cpu_arch: Cpu.Arch, os_tag: Os.Tag, abi: Abi) ![]u8 {
|
||||
pub fn linuxTripleSimple(allocator: mem.Allocator, cpu_arch: Cpu.Arch, os_tag: Os.Tag, abi: Abi) ![]u8 {
|
||||
return std.fmt.allocPrint(allocator, "{s}-{s}-{s}", .{ @tagName(cpu_arch), @tagName(os_tag), @tagName(abi) });
|
||||
}
|
||||
|
||||
pub fn linuxTriple(self: Target, allocator: *mem.Allocator) ![]u8 {
|
||||
pub fn linuxTriple(self: Target, allocator: mem.Allocator) ![]u8 {
|
||||
return linuxTripleSimple(allocator, self.cpu.arch, self.os.tag, self.abi);
|
||||
}
|
||||
|
||||
|
||||
@ -7,11 +7,11 @@ const print = std.debug.print;
|
||||
pub const FailingAllocator = @import("testing/failing_allocator.zig").FailingAllocator;
|
||||
|
||||
/// This should only be used in temporary test programs.
|
||||
pub const allocator = &allocator_instance.allocator;
|
||||
pub const allocator = allocator_instance.getAllocator();
|
||||
pub var allocator_instance = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
|
||||
pub const failing_allocator = &failing_allocator_instance.allocator;
|
||||
pub var failing_allocator_instance = FailingAllocator.init(&base_allocator_instance.allocator, 0);
|
||||
pub const failing_allocator = failing_allocator_instance.getAllocator();
|
||||
pub var failing_allocator_instance = FailingAllocator.init(base_allocator_instance.getAllocator(), 0);
|
||||
|
||||
pub var base_allocator_instance = std.heap.FixedBufferAllocator.init("");
|
||||
|
||||
|
||||
@ -12,10 +12,9 @@ const mem = std.mem;
|
||||
/// Then use `failing_allocator` anywhere you would have used a
|
||||
/// different allocator.
|
||||
pub const FailingAllocator = struct {
|
||||
allocator: mem.Allocator,
|
||||
index: usize,
|
||||
fail_index: usize,
|
||||
internal_allocator: *mem.Allocator,
|
||||
internal_allocator: mem.Allocator,
|
||||
allocated_bytes: usize,
|
||||
freed_bytes: usize,
|
||||
allocations: usize,
|
||||
@ -29,7 +28,7 @@ pub const FailingAllocator = struct {
|
||||
/// var a = try failing_alloc.create(i32);
|
||||
/// var b = try failing_alloc.create(i32);
|
||||
/// testing.expectError(error.OutOfMemory, failing_alloc.create(i32));
|
||||
pub fn init(allocator: *mem.Allocator, fail_index: usize) FailingAllocator {
|
||||
pub fn init(allocator: mem.Allocator, fail_index: usize) FailingAllocator {
|
||||
return FailingAllocator{
|
||||
.internal_allocator = allocator,
|
||||
.fail_index = fail_index,
|
||||
@ -38,25 +37,24 @@ pub const FailingAllocator = struct {
|
||||
.freed_bytes = 0,
|
||||
.allocations = 0,
|
||||
.deallocations = 0,
|
||||
.allocator = mem.Allocator{
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getAllocator(self: *FailingAllocator) mem.Allocator {
|
||||
return mem.Allocator.init(self, alloc, resize);
|
||||
}
|
||||
|
||||
fn alloc(
|
||||
allocator: *std.mem.Allocator,
|
||||
self: *FailingAllocator,
|
||||
len: usize,
|
||||
ptr_align: u29,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}![]u8 {
|
||||
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
|
||||
if (self.index == self.fail_index) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
const result = try self.internal_allocator.allocFn(self.internal_allocator, len, ptr_align, len_align, return_address);
|
||||
const result = try self.internal_allocator.allocFn(self.internal_allocator.ptr, len, ptr_align, len_align, return_address);
|
||||
self.allocated_bytes += result.len;
|
||||
self.allocations += 1;
|
||||
self.index += 1;
|
||||
@ -64,15 +62,14 @@ pub const FailingAllocator = struct {
|
||||
}
|
||||
|
||||
fn resize(
|
||||
allocator: *std.mem.Allocator,
|
||||
self: *FailingAllocator,
|
||||
old_mem: []u8,
|
||||
old_align: u29,
|
||||
new_len: usize,
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
|
||||
const r = self.internal_allocator.resizeFn(self.internal_allocator, old_mem, old_align, new_len, len_align, ra) catch |e| {
|
||||
const r = self.internal_allocator.resizeFn(self.internal_allocator.ptr, old_mem, old_align, new_len, len_align, ra) catch |e| {
|
||||
std.debug.assert(new_len > old_mem.len);
|
||||
return e;
|
||||
};
|
||||
|
||||
@ -550,7 +550,7 @@ fn testDecode(bytes: []const u8) !u21 {
|
||||
}
|
||||
|
||||
/// Caller must free returned memory.
|
||||
pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8 {
|
||||
pub fn utf16leToUtf8Alloc(allocator: mem.Allocator, utf16le: []const u16) ![]u8 {
|
||||
// optimistically guess that it will all be ascii.
|
||||
var result = try std.ArrayList(u8).initCapacity(allocator, utf16le.len);
|
||||
errdefer result.deinit();
|
||||
@ -567,7 +567,7 @@ pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8
|
||||
}
|
||||
|
||||
/// Caller must free returned memory.
|
||||
pub fn utf16leToUtf8AllocZ(allocator: *mem.Allocator, utf16le: []const u16) ![:0]u8 {
|
||||
pub fn utf16leToUtf8AllocZ(allocator: mem.Allocator, utf16le: []const u16) ![:0]u8 {
|
||||
// optimistically guess that it will all be ascii.
|
||||
var result = try std.ArrayList(u8).initCapacity(allocator, utf16le.len);
|
||||
errdefer result.deinit();
|
||||
@ -661,7 +661,7 @@ test "utf16leToUtf8" {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn utf8ToUtf16LeWithNull(allocator: *mem.Allocator, utf8: []const u8) ![:0]u16 {
|
||||
pub fn utf8ToUtf16LeWithNull(allocator: mem.Allocator, utf8: []const u8) ![:0]u16 {
|
||||
// optimistically guess that it will not require surrogate pairs
|
||||
var result = try std.ArrayList(u16).initCapacity(allocator, utf8.len + 1);
|
||||
errdefer result.deinit();
|
||||
|
||||
@ -361,7 +361,7 @@ pub const Type = struct {
|
||||
std.mem.eql(Valtype, self.returns, other.returns);
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Type, gpa: *std.mem.Allocator) void {
|
||||
pub fn deinit(self: *Type, gpa: std.mem.Allocator) void {
|
||||
gpa.free(self.params);
|
||||
gpa.free(self.returns);
|
||||
self.* = undefined;
|
||||
|
||||
@ -100,7 +100,7 @@ pub const BinNameOptions = struct {
|
||||
};
|
||||
|
||||
/// Returns the standard file system basename of a binary generated by the Zig compiler.
|
||||
pub fn binNameAlloc(allocator: *std.mem.Allocator, options: BinNameOptions) error{OutOfMemory}![]u8 {
|
||||
pub fn binNameAlloc(allocator: std.mem.Allocator, options: BinNameOptions) error{OutOfMemory}![]u8 {
|
||||
const root_name = options.root_name;
|
||||
const target = options.target;
|
||||
const ofmt = options.object_format orelse target.getObjectFormat();
|
||||
|
||||
@ -34,7 +34,7 @@ pub const Location = struct {
|
||||
line_end: usize,
|
||||
};
|
||||
|
||||
pub fn deinit(tree: *Tree, gpa: *mem.Allocator) void {
|
||||
pub fn deinit(tree: *Tree, gpa: mem.Allocator) void {
|
||||
tree.tokens.deinit(gpa);
|
||||
tree.nodes.deinit(gpa);
|
||||
gpa.free(tree.extra_data);
|
||||
@ -52,7 +52,7 @@ pub const RenderError = error{
|
||||
/// for allocating extra stack memory if needed, because this function utilizes recursion.
|
||||
/// Note: that's not actually true yet, see https://github.com/ziglang/zig/issues/1006.
|
||||
/// Caller owns the returned slice of bytes, allocated with `gpa`.
|
||||
pub fn render(tree: Tree, gpa: *mem.Allocator) RenderError![]u8 {
|
||||
pub fn render(tree: Tree, gpa: mem.Allocator) RenderError![]u8 {
|
||||
var buffer = std.ArrayList(u8).init(gpa);
|
||||
defer buffer.deinit();
|
||||
|
||||
|
||||
@ -520,7 +520,7 @@ pub fn isNative(self: CrossTarget) bool {
|
||||
return self.isNativeCpu() and self.isNativeOs() and self.isNativeAbi();
|
||||
}
|
||||
|
||||
pub fn zigTriple(self: CrossTarget, allocator: *mem.Allocator) error{OutOfMemory}![]u8 {
|
||||
pub fn zigTriple(self: CrossTarget, allocator: mem.Allocator) error{OutOfMemory}![]u8 {
|
||||
if (self.isNative()) {
|
||||
return allocator.dupe(u8, "native");
|
||||
}
|
||||
@ -559,13 +559,13 @@ pub fn zigTriple(self: CrossTarget, allocator: *mem.Allocator) error{OutOfMemory
|
||||
return result.toOwnedSlice();
|
||||
}
|
||||
|
||||
pub fn allocDescription(self: CrossTarget, allocator: *mem.Allocator) ![]u8 {
|
||||
pub fn allocDescription(self: CrossTarget, allocator: mem.Allocator) ![]u8 {
|
||||
// TODO is there anything else worthy of the description that is not
|
||||
// already captured in the triple?
|
||||
return self.zigTriple(allocator);
|
||||
}
|
||||
|
||||
pub fn linuxTriple(self: CrossTarget, allocator: *mem.Allocator) ![]u8 {
|
||||
pub fn linuxTriple(self: CrossTarget, allocator: mem.Allocator) ![]u8 {
|
||||
return Target.linuxTripleSimple(allocator, self.getCpuArch(), self.getOsTag(), self.getAbi());
|
||||
}
|
||||
|
||||
@ -576,7 +576,7 @@ pub fn wantSharedLibSymLinks(self: CrossTarget) bool {
|
||||
pub const VcpkgLinkage = std.builtin.LinkMode;
|
||||
|
||||
/// Returned slice must be freed by the caller.
|
||||
pub fn vcpkgTriplet(self: CrossTarget, allocator: *mem.Allocator, linkage: VcpkgLinkage) ![]u8 {
|
||||
pub fn vcpkgTriplet(self: CrossTarget, allocator: mem.Allocator, linkage: VcpkgLinkage) ![]u8 {
|
||||
const arch = switch (self.getCpuArch()) {
|
||||
.i386 => "x86",
|
||||
.x86_64 => "x64",
|
||||
|
||||
@ -11,7 +11,7 @@ pub const Error = error{ParseError} || Allocator.Error;
|
||||
|
||||
/// Result should be freed with tree.deinit() when there are
|
||||
/// no more references to any of the tokens or nodes.
|
||||
pub fn parse(gpa: *Allocator, source: [:0]const u8) Allocator.Error!Ast {
|
||||
pub fn parse(gpa: Allocator, source: [:0]const u8) Allocator.Error!Ast {
|
||||
var tokens = Ast.TokenList{};
|
||||
defer tokens.deinit(gpa);
|
||||
|
||||
@ -81,7 +81,7 @@ const null_node: Node.Index = 0;
|
||||
|
||||
/// Represents in-progress parsing, will be converted to an Ast after completion.
|
||||
const Parser = struct {
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
source: []const u8,
|
||||
token_tags: []const Token.Tag,
|
||||
token_starts: []const Ast.ByteOffset,
|
||||
|
||||
@ -1220,7 +1220,7 @@ test "zig fmt: doc comments on param decl" {
|
||||
try testCanonical(
|
||||
\\pub const Allocator = struct {
|
||||
\\ shrinkFn: fn (
|
||||
\\ self: *Allocator,
|
||||
\\ self: Allocator,
|
||||
\\ /// Guaranteed to be the same as what was returned from most recent call to
|
||||
\\ /// `allocFn`, `reallocFn`, or `shrinkFn`.
|
||||
\\ old_mem: []u8,
|
||||
@ -4250,7 +4250,7 @@ test "zig fmt: Only indent multiline string literals in function calls" {
|
||||
|
||||
test "zig fmt: Don't add extra newline after if" {
|
||||
try testCanonical(
|
||||
\\pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
|
||||
\\pub fn atomicSymLink(allocator: Allocator, existing_path: []const u8, new_path: []const u8) !void {
|
||||
\\ if (cwd().symLink(existing_path, new_path, .{})) {
|
||||
\\ return;
|
||||
\\ }
|
||||
@ -5319,7 +5319,7 @@ const maxInt = std.math.maxInt;
|
||||
|
||||
var fixed_buffer_mem: [100 * 1024]u8 = undefined;
|
||||
|
||||
fn testParse(source: [:0]const u8, allocator: *mem.Allocator, anything_changed: *bool) ![]u8 {
|
||||
fn testParse(source: [:0]const u8, allocator: mem.Allocator, anything_changed: *bool) ![]u8 {
|
||||
const stderr = io.getStdErr().writer();
|
||||
|
||||
var tree = try std.zig.parse(allocator, source);
|
||||
@ -5351,9 +5351,10 @@ fn testTransform(source: [:0]const u8, expected_source: []const u8) !void {
|
||||
const needed_alloc_count = x: {
|
||||
// Try it once with unlimited memory, make sure it works
|
||||
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
|
||||
var failing_allocator = std.testing.FailingAllocator.init(&fixed_allocator.allocator, maxInt(usize));
|
||||
var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.getAllocator(), maxInt(usize));
|
||||
const allocator = failing_allocator.getAllocator();
|
||||
var anything_changed: bool = undefined;
|
||||
const result_source = try testParse(source, &failing_allocator.allocator, &anything_changed);
|
||||
const result_source = try testParse(source, allocator, &anything_changed);
|
||||
try std.testing.expectEqualStrings(expected_source, result_source);
|
||||
const changes_expected = source.ptr != expected_source.ptr;
|
||||
if (anything_changed != changes_expected) {
|
||||
@ -5361,16 +5362,16 @@ fn testTransform(source: [:0]const u8, expected_source: []const u8) !void {
|
||||
return error.TestFailed;
|
||||
}
|
||||
try std.testing.expect(anything_changed == changes_expected);
|
||||
failing_allocator.allocator.free(result_source);
|
||||
allocator.free(result_source);
|
||||
break :x failing_allocator.index;
|
||||
};
|
||||
|
||||
var fail_index: usize = 0;
|
||||
while (fail_index < needed_alloc_count) : (fail_index += 1) {
|
||||
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
|
||||
var failing_allocator = std.testing.FailingAllocator.init(&fixed_allocator.allocator, fail_index);
|
||||
var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.getAllocator(), fail_index);
|
||||
var anything_changed: bool = undefined;
|
||||
if (testParse(source, &failing_allocator.allocator, &anything_changed)) |_| {
|
||||
if (testParse(source, failing_allocator.getAllocator(), &anything_changed)) |_| {
|
||||
return error.NondeterministicMemoryUsage;
|
||||
} else |err| switch (err) {
|
||||
error.OutOfMemory => {
|
||||
|
||||
@ -33,7 +33,7 @@ pub fn main() !void {
|
||||
|
||||
fn testOnce() usize {
|
||||
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
|
||||
var allocator = &fixed_buf_alloc.allocator;
|
||||
var allocator = fixed_buf_alloc.getAllocator();
|
||||
_ = std.zig.parse(allocator, source) catch @panic("parse failure");
|
||||
return fixed_buf_alloc.end_index;
|
||||
}
|
||||
|
||||
@ -37,7 +37,7 @@ pub fn renderTree(buffer: *std.ArrayList(u8), tree: Ast) Error!void {
|
||||
}
|
||||
|
||||
/// Render all members in the given slice, keeping empty lines where appropriate
|
||||
fn renderMembers(gpa: *Allocator, ais: *Ais, tree: Ast, members: []const Ast.Node.Index) Error!void {
|
||||
fn renderMembers(gpa: Allocator, ais: *Ais, tree: Ast, members: []const Ast.Node.Index) Error!void {
|
||||
if (members.len == 0) return;
|
||||
try renderMember(gpa, ais, tree, members[0], .newline);
|
||||
for (members[1..]) |member| {
|
||||
@ -46,7 +46,7 @@ fn renderMembers(gpa: *Allocator, ais: *Ais, tree: Ast, members: []const Ast.Nod
|
||||
}
|
||||
}
|
||||
|
||||
fn renderMember(gpa: *Allocator, ais: *Ais, tree: Ast, decl: Ast.Node.Index, space: Space) Error!void {
|
||||
fn renderMember(gpa: Allocator, ais: *Ais, tree: Ast, decl: Ast.Node.Index, space: Space) Error!void {
|
||||
const token_tags = tree.tokens.items(.tag);
|
||||
const main_tokens = tree.nodes.items(.main_token);
|
||||
const datas = tree.nodes.items(.data);
|
||||
@ -168,7 +168,7 @@ fn renderMember(gpa: *Allocator, ais: *Ais, tree: Ast, decl: Ast.Node.Index, spa
|
||||
}
|
||||
|
||||
/// Render all expressions in the slice, keeping empty lines where appropriate
|
||||
fn renderExpressions(gpa: *Allocator, ais: *Ais, tree: Ast, expressions: []const Ast.Node.Index, space: Space) Error!void {
|
||||
fn renderExpressions(gpa: Allocator, ais: *Ais, tree: Ast, expressions: []const Ast.Node.Index, space: Space) Error!void {
|
||||
if (expressions.len == 0) return;
|
||||
try renderExpression(gpa, ais, tree, expressions[0], space);
|
||||
for (expressions[1..]) |expression| {
|
||||
@ -177,7 +177,7 @@ fn renderExpressions(gpa: *Allocator, ais: *Ais, tree: Ast, expressions: []const
|
||||
}
|
||||
}
|
||||
|
||||
fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
|
||||
fn renderExpression(gpa: Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
|
||||
const token_tags = tree.tokens.items(.tag);
|
||||
const main_tokens = tree.nodes.items(.main_token);
|
||||
const node_tags = tree.nodes.items(.tag);
|
||||
@ -710,7 +710,7 @@ fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index,
|
||||
}
|
||||
|
||||
fn renderArrayType(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
ais: *Ais,
|
||||
tree: Ast,
|
||||
array_type: Ast.full.ArrayType,
|
||||
@ -732,7 +732,7 @@ fn renderArrayType(
|
||||
}
|
||||
|
||||
fn renderPtrType(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
ais: *Ais,
|
||||
tree: Ast,
|
||||
ptr_type: Ast.full.PtrType,
|
||||
@ -825,7 +825,7 @@ fn renderPtrType(
|
||||
}
|
||||
|
||||
fn renderSlice(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
ais: *Ais,
|
||||
tree: Ast,
|
||||
slice_node: Ast.Node.Index,
|
||||
@ -861,7 +861,7 @@ fn renderSlice(
|
||||
}
|
||||
|
||||
fn renderAsmOutput(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
ais: *Ais,
|
||||
tree: Ast,
|
||||
asm_output: Ast.Node.Index,
|
||||
@ -891,7 +891,7 @@ fn renderAsmOutput(
|
||||
}
|
||||
|
||||
fn renderAsmInput(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
ais: *Ais,
|
||||
tree: Ast,
|
||||
asm_input: Ast.Node.Index,
|
||||
@ -912,7 +912,7 @@ fn renderAsmInput(
|
||||
return renderToken(ais, tree, datas[asm_input].rhs, space); // rparen
|
||||
}
|
||||
|
||||
fn renderVarDecl(gpa: *Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDecl) Error!void {
|
||||
fn renderVarDecl(gpa: Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDecl) Error!void {
|
||||
if (var_decl.visib_token) |visib_token| {
|
||||
try renderToken(ais, tree, visib_token, Space.space); // pub
|
||||
}
|
||||
@ -1019,7 +1019,7 @@ fn renderVarDecl(gpa: *Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDe
|
||||
return renderToken(ais, tree, var_decl.ast.mut_token + 2, .newline); // ;
|
||||
}
|
||||
|
||||
fn renderIf(gpa: *Allocator, ais: *Ais, tree: Ast, if_node: Ast.full.If, space: Space) Error!void {
|
||||
fn renderIf(gpa: Allocator, ais: *Ais, tree: Ast, if_node: Ast.full.If, space: Space) Error!void {
|
||||
return renderWhile(gpa, ais, tree, .{
|
||||
.ast = .{
|
||||
.while_token = if_node.ast.if_token,
|
||||
@ -1038,7 +1038,7 @@ fn renderIf(gpa: *Allocator, ais: *Ais, tree: Ast, if_node: Ast.full.If, space:
|
||||
|
||||
/// Note that this function is additionally used to render if and for expressions, with
|
||||
/// respective values set to null.
|
||||
fn renderWhile(gpa: *Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While, space: Space) Error!void {
|
||||
fn renderWhile(gpa: Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While, space: Space) Error!void {
|
||||
const node_tags = tree.nodes.items(.tag);
|
||||
const token_tags = tree.tokens.items(.tag);
|
||||
|
||||
@ -1141,7 +1141,7 @@ fn renderWhile(gpa: *Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While
|
||||
}
|
||||
|
||||
fn renderContainerField(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
ais: *Ais,
|
||||
tree: Ast,
|
||||
field: Ast.full.ContainerField,
|
||||
@ -1215,7 +1215,7 @@ fn renderContainerField(
|
||||
}
|
||||
|
||||
fn renderBuiltinCall(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
ais: *Ais,
|
||||
tree: Ast,
|
||||
builtin_token: Ast.TokenIndex,
|
||||
@ -1272,7 +1272,7 @@ fn renderBuiltinCall(
|
||||
}
|
||||
}
|
||||
|
||||
fn renderFnProto(gpa: *Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnProto, space: Space) Error!void {
|
||||
fn renderFnProto(gpa: Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnProto, space: Space) Error!void {
|
||||
const token_tags = tree.tokens.items(.tag);
|
||||
const token_starts = tree.tokens.items(.start);
|
||||
|
||||
@ -1488,7 +1488,7 @@ fn renderFnProto(gpa: *Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnPro
|
||||
}
|
||||
|
||||
fn renderSwitchCase(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
ais: *Ais,
|
||||
tree: Ast,
|
||||
switch_case: Ast.full.SwitchCase,
|
||||
@ -1541,7 +1541,7 @@ fn renderSwitchCase(
|
||||
}
|
||||
|
||||
fn renderBlock(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
ais: *Ais,
|
||||
tree: Ast,
|
||||
block_node: Ast.Node.Index,
|
||||
@ -1581,7 +1581,7 @@ fn renderBlock(
|
||||
}
|
||||
|
||||
fn renderStructInit(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
ais: *Ais,
|
||||
tree: Ast,
|
||||
struct_node: Ast.Node.Index,
|
||||
@ -1640,7 +1640,7 @@ fn renderStructInit(
|
||||
}
|
||||
|
||||
fn renderArrayInit(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
ais: *Ais,
|
||||
tree: Ast,
|
||||
array_init: Ast.full.ArrayInit,
|
||||
@ -1859,7 +1859,7 @@ fn renderArrayInit(
|
||||
}
|
||||
|
||||
fn renderContainerDecl(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
ais: *Ais,
|
||||
tree: Ast,
|
||||
container_decl_node: Ast.Node.Index,
|
||||
@ -1956,7 +1956,7 @@ fn renderContainerDecl(
|
||||
}
|
||||
|
||||
fn renderAsm(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
ais: *Ais,
|
||||
tree: Ast,
|
||||
asm_node: Ast.full.Asm,
|
||||
@ -2105,7 +2105,7 @@ fn renderAsm(
|
||||
}
|
||||
|
||||
fn renderCall(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
ais: *Ais,
|
||||
tree: Ast,
|
||||
call: Ast.full.Call,
|
||||
@ -2180,7 +2180,7 @@ fn renderCall(
|
||||
|
||||
/// Renders the given expression indented, popping the indent before rendering
|
||||
/// any following line comments
|
||||
fn renderExpressionIndented(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
|
||||
fn renderExpressionIndented(gpa: Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
|
||||
const token_starts = tree.tokens.items(.start);
|
||||
const token_tags = tree.tokens.items(.tag);
|
||||
|
||||
@ -2238,7 +2238,7 @@ fn renderExpressionIndented(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Nod
|
||||
|
||||
/// Render an expression, and the comma that follows it, if it is present in the source.
|
||||
/// If a comma is present, and `space` is `Space.comma`, render only a single comma.
|
||||
fn renderExpressionComma(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
|
||||
fn renderExpressionComma(gpa: Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
|
||||
const token_tags = tree.tokens.items(.tag);
|
||||
const maybe_comma = tree.lastToken(node) + 1;
|
||||
if (token_tags[maybe_comma] == .comma and space != .comma) {
|
||||
|
||||
@ -131,7 +131,7 @@ pub fn parseAppend(buf: *std.ArrayList(u8), bytes: []const u8) error{OutOfMemory
|
||||
|
||||
/// Higher level API. Does not return extra info about parse errors.
|
||||
/// Caller owns returned memory.
|
||||
pub fn parseAlloc(allocator: *std.mem.Allocator, bytes: []const u8) ParseError![]u8 {
|
||||
pub fn parseAlloc(allocator: std.mem.Allocator, bytes: []const u8) ParseError![]u8 {
|
||||
var buf = std.ArrayList(u8).init(allocator);
|
||||
defer buf.deinit();
|
||||
|
||||
@ -147,7 +147,7 @@ test "parse" {
|
||||
|
||||
var fixed_buf_mem: [32]u8 = undefined;
|
||||
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buf_mem[0..]);
|
||||
var alloc = &fixed_buf_alloc.allocator;
|
||||
var alloc = fixed_buf_alloc.getAllocator();
|
||||
|
||||
try expect(eql(u8, "foo", try parseAlloc(alloc, "\"foo\"")));
|
||||
try expect(eql(u8, "foo", try parseAlloc(alloc, "\"f\x6f\x6f\"")));
|
||||
|
||||
@ -21,7 +21,7 @@ pub const NativePaths = struct {
|
||||
rpaths: ArrayList([:0]u8),
|
||||
warnings: ArrayList([:0]u8),
|
||||
|
||||
pub fn detect(allocator: *Allocator, native_info: NativeTargetInfo) !NativePaths {
|
||||
pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths {
|
||||
const native_target = native_info.target;
|
||||
|
||||
var self: NativePaths = .{
|
||||
@ -237,7 +237,7 @@ pub const NativeTargetInfo = struct {
|
||||
/// Any resources this function allocates are released before returning, and so there is no
|
||||
/// deinitialization method.
|
||||
/// TODO Remove the Allocator requirement from this function.
|
||||
pub fn detect(allocator: *Allocator, cross_target: CrossTarget) DetectError!NativeTargetInfo {
|
||||
pub fn detect(allocator: Allocator, cross_target: CrossTarget) DetectError!NativeTargetInfo {
|
||||
var os = cross_target.getOsTag().defaultVersionRange(cross_target.getCpuArch());
|
||||
if (cross_target.os_tag == null) {
|
||||
switch (builtin.target.os.tag) {
|
||||
@ -441,7 +441,7 @@ pub const NativeTargetInfo = struct {
|
||||
/// we fall back to the defaults.
|
||||
/// TODO Remove the Allocator requirement from this function.
|
||||
fn detectAbiAndDynamicLinker(
|
||||
allocator: *Allocator,
|
||||
allocator: Allocator,
|
||||
cpu: Target.Cpu,
|
||||
os: Target.Os,
|
||||
cross_target: CrossTarget,
|
||||
|
||||
@ -11,7 +11,7 @@ pub const macos = @import("darwin/macos.zig");
|
||||
/// Therefore, we resort to the same tool used by Homebrew, namely, invoking `xcode-select --print-path`
|
||||
/// and checking if the status is nonzero or the returned string in nonempty.
|
||||
/// https://github.com/Homebrew/brew/blob/e119bdc571dcb000305411bc1e26678b132afb98/Library/Homebrew/brew.sh#L630
|
||||
pub fn isDarwinSDKInstalled(allocator: *Allocator) bool {
|
||||
pub fn isDarwinSDKInstalled(allocator: Allocator) bool {
|
||||
const argv = &[_][]const u8{ "/usr/bin/xcode-select", "--print-path" };
|
||||
const result = std.ChildProcess.exec(.{ .allocator = allocator, .argv = argv }) catch return false;
|
||||
defer {
|
||||
@ -29,7 +29,7 @@ pub fn isDarwinSDKInstalled(allocator: *Allocator) bool {
|
||||
/// Calls `xcrun --sdk <target_sdk> --show-sdk-path` which fetches the path to the SDK sysroot (if any).
|
||||
/// Subsequently calls `xcrun --sdk <target_sdk> --show-sdk-version` which fetches version of the SDK.
|
||||
/// The caller needs to deinit the resulting struct.
|
||||
pub fn getDarwinSDK(allocator: *Allocator, target: Target) ?DarwinSDK {
|
||||
pub fn getDarwinSDK(allocator: Allocator, target: Target) ?DarwinSDK {
|
||||
const is_simulator_abi = target.abi == .simulator;
|
||||
const sdk = switch (target.os.tag) {
|
||||
.macos => "macosx",
|
||||
@ -82,7 +82,7 @@ pub const DarwinSDK = struct {
|
||||
path: []const u8,
|
||||
version: Version,
|
||||
|
||||
pub fn deinit(self: DarwinSDK, allocator: *Allocator) void {
|
||||
pub fn deinit(self: DarwinSDK, allocator: Allocator) void {
|
||||
allocator.free(self.path);
|
||||
}
|
||||
};
|
||||
|
||||
@ -841,7 +841,7 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(air: *Air, gpa: *std.mem.Allocator) void {
|
||||
pub fn deinit(air: *Air, gpa: std.mem.Allocator) void {
|
||||
air.instructions.deinit(gpa);
|
||||
gpa.free(air.extra);
|
||||
gpa.free(air.values);
|
||||
|
||||
@ -16,7 +16,7 @@ const indexToRef = Zir.indexToRef;
|
||||
const trace = @import("tracy.zig").trace;
|
||||
const BuiltinFn = @import("BuiltinFn.zig");
|
||||
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
tree: *const Ast,
|
||||
instructions: std.MultiArrayList(Zir.Inst) = .{},
|
||||
extra: ArrayListUnmanaged(u32) = .{},
|
||||
@ -33,7 +33,7 @@ source_line: u32 = 0,
|
||||
source_column: u32 = 0,
|
||||
/// Used for temporary allocations; freed after AstGen is complete.
|
||||
/// The resulting ZIR code has no references to anything in this arena.
|
||||
arena: *Allocator,
|
||||
arena: Allocator,
|
||||
string_table: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.default_max_load_percentage) = .{},
|
||||
compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .{},
|
||||
/// The topmost block of the current function.
|
||||
@ -92,7 +92,7 @@ fn appendRefsAssumeCapacity(astgen: *AstGen, refs: []const Zir.Inst.Ref) void {
|
||||
astgen.extra.appendSliceAssumeCapacity(coerced);
|
||||
}
|
||||
|
||||
pub fn generate(gpa: *Allocator, tree: Ast) Allocator.Error!Zir {
|
||||
pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
|
||||
var arena = std.heap.ArenaAllocator.init(gpa);
|
||||
defer arena.deinit();
|
||||
|
||||
@ -196,7 +196,7 @@ pub fn generate(gpa: *Allocator, tree: Ast) Allocator.Error!Zir {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(astgen: *AstGen, gpa: *Allocator) void {
|
||||
pub fn deinit(astgen: *AstGen, gpa: Allocator) void {
|
||||
astgen.instructions.deinit(gpa);
|
||||
astgen.extra.deinit(gpa);
|
||||
astgen.string_table.deinit(gpa);
|
||||
@ -2460,7 +2460,7 @@ fn makeDeferScope(
|
||||
astgen: *AstGen,
|
||||
scope: *Scope,
|
||||
node: Ast.Node.Index,
|
||||
block_arena: *Allocator,
|
||||
block_arena: Allocator,
|
||||
scope_tag: Scope.Tag,
|
||||
) InnerError!*Scope {
|
||||
const tree = astgen.tree;
|
||||
@ -2486,7 +2486,7 @@ fn varDecl(
|
||||
gz: *GenZir,
|
||||
scope: *Scope,
|
||||
node: Ast.Node.Index,
|
||||
block_arena: *Allocator,
|
||||
block_arena: Allocator,
|
||||
var_decl: Ast.full.VarDecl,
|
||||
) InnerError!*Scope {
|
||||
try emitDbgNode(gz, node);
|
||||
@ -3030,7 +3030,7 @@ const WipMembers = struct {
|
||||
/// (4 for src_hash + line + name + value + align + link_section + address_space)
|
||||
const max_decl_size = 10;
|
||||
|
||||
pub fn init(gpa: *Allocator, payload: *ArrayListUnmanaged(u32), decl_count: u32, field_count: u32, comptime bits_per_field: u32, comptime max_field_size: u32) Allocator.Error!Self {
|
||||
pub fn init(gpa: Allocator, payload: *ArrayListUnmanaged(u32), decl_count: u32, field_count: u32, comptime bits_per_field: u32, comptime max_field_size: u32) Allocator.Error!Self {
|
||||
const payload_top = @intCast(u32, payload.items.len);
|
||||
const decls_start = payload_top + (decl_count + decls_per_u32 - 1) / decls_per_u32;
|
||||
const field_bits_start = decls_start + decl_count * max_decl_size;
|
||||
@ -6178,7 +6178,7 @@ fn tunnelThroughClosure(
|
||||
ns: ?*Scope.Namespace,
|
||||
value: Zir.Inst.Ref,
|
||||
token: Ast.TokenIndex,
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
) !Zir.Inst.Ref {
|
||||
// For trivial values, we don't need a tunnel.
|
||||
// Just return the ref.
|
||||
@ -8806,7 +8806,7 @@ const Scope = struct {
|
||||
/// ref of the capture for decls in this namespace
|
||||
captures: std.AutoArrayHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{},
|
||||
|
||||
pub fn deinit(self: *Namespace, gpa: *Allocator) void {
|
||||
pub fn deinit(self: *Namespace, gpa: Allocator) void {
|
||||
self.decls.deinit(gpa);
|
||||
self.captures.deinit(gpa);
|
||||
self.* = undefined;
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
manifest_dir: fs.Dir,
|
||||
hash: HashHelper = .{},
|
||||
|
||||
@ -48,7 +48,7 @@ pub const File = struct {
|
||||
bin_digest: BinDigest,
|
||||
contents: ?[]const u8,
|
||||
|
||||
pub fn deinit(self: *File, allocator: *Allocator) void {
|
||||
pub fn deinit(self: *File, allocator: Allocator) void {
|
||||
if (self.path) |owned_slice| {
|
||||
allocator.free(owned_slice);
|
||||
self.path = null;
|
||||
|
||||
@ -36,7 +36,7 @@ const libtsan = @import("libtsan.zig");
|
||||
const Zir = @import("Zir.zig");
|
||||
|
||||
/// General-purpose allocator. Used for both temporary and long-term storage.
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
/// Arena-allocated memory used during initialization. Should be untouched until deinit.
|
||||
arena_state: std.heap.ArenaAllocator.State,
|
||||
bin_file: *link.File,
|
||||
@ -164,7 +164,7 @@ pub const CRTFile = struct {
|
||||
lock: Cache.Lock,
|
||||
full_object_path: []const u8,
|
||||
|
||||
fn deinit(self: *CRTFile, gpa: *Allocator) void {
|
||||
fn deinit(self: *CRTFile, gpa: Allocator) void {
|
||||
self.lock.release();
|
||||
gpa.free(self.full_object_path);
|
||||
self.* = undefined;
|
||||
@ -253,14 +253,14 @@ pub const CObject = struct {
|
||||
line: u32,
|
||||
column: u32,
|
||||
|
||||
pub fn destroy(em: *ErrorMsg, gpa: *Allocator) void {
|
||||
pub fn destroy(em: *ErrorMsg, gpa: Allocator) void {
|
||||
gpa.free(em.msg);
|
||||
gpa.destroy(em);
|
||||
}
|
||||
};
|
||||
|
||||
/// Returns if there was failure.
|
||||
pub fn clearStatus(self: *CObject, gpa: *Allocator) bool {
|
||||
pub fn clearStatus(self: *CObject, gpa: Allocator) bool {
|
||||
switch (self.status) {
|
||||
.new => return false,
|
||||
.failure, .failure_retryable => {
|
||||
@ -276,7 +276,7 @@ pub const CObject = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn destroy(self: *CObject, gpa: *Allocator) void {
|
||||
pub fn destroy(self: *CObject, gpa: Allocator) void {
|
||||
_ = self.clearStatus(gpa);
|
||||
gpa.destroy(self);
|
||||
}
|
||||
@ -305,7 +305,7 @@ pub const MiscError = struct {
|
||||
msg: []u8,
|
||||
children: ?AllErrors = null,
|
||||
|
||||
pub fn deinit(misc_err: *MiscError, gpa: *Allocator) void {
|
||||
pub fn deinit(misc_err: *MiscError, gpa: Allocator) void {
|
||||
gpa.free(misc_err.msg);
|
||||
if (misc_err.children) |*children| {
|
||||
children.deinit(gpa);
|
||||
@ -402,7 +402,7 @@ pub const AllErrors = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn deinit(self: *AllErrors, gpa: *Allocator) void {
|
||||
pub fn deinit(self: *AllErrors, gpa: Allocator) void {
|
||||
self.arena.promote(gpa).deinit();
|
||||
}
|
||||
|
||||
@ -456,7 +456,7 @@ pub const AllErrors = struct {
|
||||
}
|
||||
|
||||
pub fn addZir(
|
||||
arena: *Allocator,
|
||||
arena: Allocator,
|
||||
errors: *std.ArrayList(Message),
|
||||
file: *Module.File,
|
||||
) !void {
|
||||
@ -559,7 +559,7 @@ pub const AllErrors = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn dupeList(list: []const Message, arena: *Allocator) Allocator.Error![]Message {
|
||||
fn dupeList(list: []const Message, arena: Allocator) Allocator.Error![]Message {
|
||||
const duped_list = try arena.alloc(Message, list.len);
|
||||
for (list) |item, i| {
|
||||
duped_list[i] = switch (item) {
|
||||
@ -589,7 +589,7 @@ pub const Directory = struct {
|
||||
path: ?[]const u8,
|
||||
handle: std.fs.Dir,
|
||||
|
||||
pub fn join(self: Directory, allocator: *Allocator, paths: []const []const u8) ![]u8 {
|
||||
pub fn join(self: Directory, allocator: Allocator, paths: []const []const u8) ![]u8 {
|
||||
if (self.path) |p| {
|
||||
// TODO clean way to do this with only 1 allocation
|
||||
const part2 = try std.fs.path.join(allocator, paths);
|
||||
@ -600,7 +600,7 @@ pub const Directory = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn joinZ(self: Directory, allocator: *Allocator, paths: []const []const u8) ![:0]u8 {
|
||||
pub fn joinZ(self: Directory, allocator: Allocator, paths: []const []const u8) ![:0]u8 {
|
||||
if (self.path) |p| {
|
||||
// TODO clean way to do this with only 1 allocation
|
||||
const part2 = try std.fs.path.join(allocator, paths);
|
||||
@ -829,7 +829,7 @@ fn addPackageTableToCacheHash(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
|
||||
pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
|
||||
const is_dyn_lib = switch (options.output_mode) {
|
||||
.Obj, .Exe => false,
|
||||
.Lib => (options.link_mode orelse .Static) == .Dynamic,
|
||||
@ -3263,7 +3263,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
|
||||
};
|
||||
}
|
||||
|
||||
pub fn tmpFilePath(comp: *Compilation, arena: *Allocator, suffix: []const u8) error{OutOfMemory}![]const u8 {
|
||||
pub fn tmpFilePath(comp: *Compilation, arena: Allocator, suffix: []const u8) error{OutOfMemory}![]const u8 {
|
||||
const s = std.fs.path.sep_str;
|
||||
const rand_int = std.crypto.random.int(u64);
|
||||
if (comp.local_cache_directory.path) |p| {
|
||||
@ -3275,7 +3275,7 @@ pub fn tmpFilePath(comp: *Compilation, arena: *Allocator, suffix: []const u8) er
|
||||
|
||||
pub fn addTranslateCCArgs(
|
||||
comp: *Compilation,
|
||||
arena: *Allocator,
|
||||
arena: Allocator,
|
||||
argv: *std.ArrayList([]const u8),
|
||||
ext: FileExt,
|
||||
out_dep_path: ?[]const u8,
|
||||
@ -3289,7 +3289,7 @@ pub fn addTranslateCCArgs(
|
||||
/// Add common C compiler args between translate-c and C object compilation.
|
||||
pub fn addCCArgs(
|
||||
comp: *const Compilation,
|
||||
arena: *Allocator,
|
||||
arena: Allocator,
|
||||
argv: *std.ArrayList([]const u8),
|
||||
ext: FileExt,
|
||||
out_dep_path: ?[]const u8,
|
||||
@ -3776,7 +3776,7 @@ const LibCDirs = struct {
|
||||
libc_installation: ?*const LibCInstallation,
|
||||
};
|
||||
|
||||
fn getZigShippedLibCIncludeDirsDarwin(arena: *Allocator, zig_lib_dir: []const u8, target: Target) !LibCDirs {
|
||||
fn getZigShippedLibCIncludeDirsDarwin(arena: Allocator, zig_lib_dir: []const u8, target: Target) !LibCDirs {
|
||||
const arch_name = @tagName(target.cpu.arch);
|
||||
const os_name = try std.fmt.allocPrint(arena, "{s}.{d}", .{
|
||||
@tagName(target.os.tag),
|
||||
@ -3808,7 +3808,7 @@ fn getZigShippedLibCIncludeDirsDarwin(arena: *Allocator, zig_lib_dir: []const u8
|
||||
}
|
||||
|
||||
fn detectLibCIncludeDirs(
|
||||
arena: *Allocator,
|
||||
arena: Allocator,
|
||||
zig_lib_dir: []const u8,
|
||||
target: Target,
|
||||
is_native_abi: bool,
|
||||
@ -3933,7 +3933,7 @@ fn detectLibCIncludeDirs(
|
||||
};
|
||||
}
|
||||
|
||||
fn detectLibCFromLibCInstallation(arena: *Allocator, target: Target, lci: *const LibCInstallation) !LibCDirs {
|
||||
fn detectLibCFromLibCInstallation(arena: Allocator, target: Target, lci: *const LibCInstallation) !LibCDirs {
|
||||
var list = try std.ArrayList([]const u8).initCapacity(arena, 4);
|
||||
|
||||
list.appendAssumeCapacity(lci.include_dir.?);
|
||||
@ -3965,7 +3965,7 @@ fn detectLibCFromLibCInstallation(arena: *Allocator, target: Target, lci: *const
|
||||
};
|
||||
}
|
||||
|
||||
pub fn get_libc_crt_file(comp: *Compilation, arena: *Allocator, basename: []const u8) ![]const u8 {
|
||||
pub fn get_libc_crt_file(comp: *Compilation, arena: Allocator, basename: []const u8) ![]const u8 {
|
||||
if (comp.wantBuildGLibCFromSource() or
|
||||
comp.wantBuildMuslFromSource() or
|
||||
comp.wantBuildMinGWFromSource() or
|
||||
@ -4066,7 +4066,7 @@ pub fn dump_argv(argv: []const []const u8) void {
|
||||
std.debug.print("{s}\n", .{argv[argv.len - 1]});
|
||||
}
|
||||
|
||||
pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) Allocator.Error![]u8 {
|
||||
pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Allocator.Error![]u8 {
|
||||
const t = trace(@src());
|
||||
defer t.end();
|
||||
|
||||
@ -4717,14 +4717,14 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
|
||||
comp.stage1_lock = man.toOwnedLock();
|
||||
}
|
||||
|
||||
fn stage1LocPath(arena: *Allocator, opt_loc: ?EmitLoc, cache_directory: Directory) ![]const u8 {
|
||||
fn stage1LocPath(arena: Allocator, opt_loc: ?EmitLoc, cache_directory: Directory) ![]const u8 {
|
||||
const loc = opt_loc orelse return "";
|
||||
const directory = loc.directory orelse cache_directory;
|
||||
return directory.join(arena, &[_][]const u8{loc.basename});
|
||||
}
|
||||
|
||||
fn createStage1Pkg(
|
||||
arena: *Allocator,
|
||||
arena: Allocator,
|
||||
name: []const u8,
|
||||
pkg: *Package,
|
||||
parent_pkg: ?*stage1.Pkg,
|
||||
|
||||
@ -51,7 +51,7 @@ pub const SwitchBr = struct {
|
||||
else_death_count: u32,
|
||||
};
|
||||
|
||||
pub fn analyze(gpa: *Allocator, air: Air, zir: Zir) Allocator.Error!Liveness {
|
||||
pub fn analyze(gpa: Allocator, air: Air, zir: Zir) Allocator.Error!Liveness {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -136,7 +136,7 @@ pub fn getCondBr(l: Liveness, inst: Air.Inst.Index) CondBrSlices {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(l: *Liveness, gpa: *Allocator) void {
|
||||
pub fn deinit(l: *Liveness, gpa: Allocator) void {
|
||||
gpa.free(l.tomb_bits);
|
||||
gpa.free(l.extra);
|
||||
l.special.deinit(gpa);
|
||||
@ -150,7 +150,7 @@ pub const OperandInt = std.math.Log2Int(Bpi);
|
||||
|
||||
/// In-progress data; on successful analysis converted into `Liveness`.
|
||||
const Analysis = struct {
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
air: Air,
|
||||
table: std.AutoHashMapUnmanaged(Air.Inst.Index, void),
|
||||
tomb_bits: []usize,
|
||||
|
||||
@ -30,7 +30,7 @@ const target_util = @import("target.zig");
|
||||
const build_options = @import("build_options");
|
||||
|
||||
/// General-purpose allocator. Used for both temporary and long-term storage.
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
comp: *Compilation,
|
||||
|
||||
/// Where our incremental compilation metadata serialization will go.
|
||||
@ -299,10 +299,10 @@ pub const CaptureScope = struct {
|
||||
pub const WipCaptureScope = struct {
|
||||
scope: *CaptureScope,
|
||||
finalized: bool,
|
||||
gpa: *Allocator,
|
||||
perm_arena: *Allocator,
|
||||
gpa: Allocator,
|
||||
perm_arena: Allocator,
|
||||
|
||||
pub fn init(gpa: *Allocator, perm_arena: *Allocator, parent: ?*CaptureScope) !@This() {
|
||||
pub fn init(gpa: Allocator, perm_arena: Allocator, parent: ?*CaptureScope) !@This() {
|
||||
const scope = try perm_arena.create(CaptureScope);
|
||||
scope.* = .{ .parent = parent };
|
||||
return @This(){
|
||||
@ -469,7 +469,7 @@ pub const Decl = struct {
|
||||
|
||||
pub const DepsTable = std.AutoArrayHashMapUnmanaged(*Decl, void);
|
||||
|
||||
pub fn clearName(decl: *Decl, gpa: *Allocator) void {
|
||||
pub fn clearName(decl: *Decl, gpa: Allocator) void {
|
||||
gpa.free(mem.sliceTo(decl.name, 0));
|
||||
decl.name = undefined;
|
||||
}
|
||||
@ -499,7 +499,7 @@ pub const Decl = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn clearValues(decl: *Decl, gpa: *Allocator) void {
|
||||
pub fn clearValues(decl: *Decl, gpa: Allocator) void {
|
||||
if (decl.getFunction()) |func| {
|
||||
func.deinit(gpa);
|
||||
gpa.destroy(func);
|
||||
@ -636,7 +636,7 @@ pub const Decl = struct {
|
||||
return decl.src_namespace.renderFullyQualifiedDebugName(unqualified_name, writer);
|
||||
}
|
||||
|
||||
pub fn getFullyQualifiedName(decl: Decl, gpa: *Allocator) ![:0]u8 {
|
||||
pub fn getFullyQualifiedName(decl: Decl, gpa: Allocator) ![:0]u8 {
|
||||
var buffer = std.ArrayList(u8).init(gpa);
|
||||
defer buffer.deinit();
|
||||
try decl.renderFullyQualifiedName(buffer.writer());
|
||||
@ -855,7 +855,7 @@ pub const Struct = struct {
|
||||
is_comptime: bool,
|
||||
};
|
||||
|
||||
pub fn getFullyQualifiedName(s: *Struct, gpa: *Allocator) ![:0]u8 {
|
||||
pub fn getFullyQualifiedName(s: *Struct, gpa: Allocator) ![:0]u8 {
|
||||
return s.owner_decl.getFullyQualifiedName(gpa);
|
||||
}
|
||||
|
||||
@ -999,7 +999,7 @@ pub const Union = struct {
|
||||
|
||||
pub const Fields = std.StringArrayHashMapUnmanaged(Field);
|
||||
|
||||
pub fn getFullyQualifiedName(s: *Union, gpa: *Allocator) ![:0]u8 {
|
||||
pub fn getFullyQualifiedName(s: *Union, gpa: Allocator) ![:0]u8 {
|
||||
return s.owner_decl.getFullyQualifiedName(gpa);
|
||||
}
|
||||
|
||||
@ -1178,7 +1178,7 @@ pub const Opaque = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getFullyQualifiedName(s: *Opaque, gpa: *Allocator) ![:0]u8 {
|
||||
pub fn getFullyQualifiedName(s: *Opaque, gpa: Allocator) ![:0]u8 {
|
||||
return s.owner_decl.getFullyQualifiedName(gpa);
|
||||
}
|
||||
};
|
||||
@ -1225,7 +1225,7 @@ pub const Fn = struct {
|
||||
success,
|
||||
};
|
||||
|
||||
pub fn deinit(func: *Fn, gpa: *Allocator) void {
|
||||
pub fn deinit(func: *Fn, gpa: Allocator) void {
|
||||
if (func.getInferredErrorSet()) |map| {
|
||||
map.deinit(gpa);
|
||||
}
|
||||
@ -1422,27 +1422,27 @@ pub const File = struct {
|
||||
/// successful, this field is unloaded.
|
||||
prev_zir: ?*Zir = null,
|
||||
|
||||
pub fn unload(file: *File, gpa: *Allocator) void {
|
||||
pub fn unload(file: *File, gpa: Allocator) void {
|
||||
file.unloadTree(gpa);
|
||||
file.unloadSource(gpa);
|
||||
file.unloadZir(gpa);
|
||||
}
|
||||
|
||||
pub fn unloadTree(file: *File, gpa: *Allocator) void {
|
||||
pub fn unloadTree(file: *File, gpa: Allocator) void {
|
||||
if (file.tree_loaded) {
|
||||
file.tree_loaded = false;
|
||||
file.tree.deinit(gpa);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unloadSource(file: *File, gpa: *Allocator) void {
|
||||
pub fn unloadSource(file: *File, gpa: Allocator) void {
|
||||
if (file.source_loaded) {
|
||||
file.source_loaded = false;
|
||||
gpa.free(file.source);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unloadZir(file: *File, gpa: *Allocator) void {
|
||||
pub fn unloadZir(file: *File, gpa: Allocator) void {
|
||||
if (file.zir_loaded) {
|
||||
file.zir_loaded = false;
|
||||
file.zir.deinit(gpa);
|
||||
@ -1466,7 +1466,7 @@ pub const File = struct {
|
||||
file.* = undefined;
|
||||
}
|
||||
|
||||
pub fn getSource(file: *File, gpa: *Allocator) ![:0]const u8 {
|
||||
pub fn getSource(file: *File, gpa: Allocator) ![:0]const u8 {
|
||||
if (file.source_loaded) return file.source;
|
||||
|
||||
const root_dir_path = file.pkg.root_src_directory.path orelse ".";
|
||||
@ -1499,7 +1499,7 @@ pub const File = struct {
|
||||
return source;
|
||||
}
|
||||
|
||||
pub fn getTree(file: *File, gpa: *Allocator) !*const Ast {
|
||||
pub fn getTree(file: *File, gpa: Allocator) !*const Ast {
|
||||
if (file.tree_loaded) return &file.tree;
|
||||
|
||||
const source = try file.getSource(gpa);
|
||||
@ -1531,7 +1531,7 @@ pub const File = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn fullyQualifiedNameZ(file: File, gpa: *Allocator) ![:0]u8 {
|
||||
pub fn fullyQualifiedNameZ(file: File, gpa: Allocator) ![:0]u8 {
|
||||
var buf = std.ArrayList(u8).init(gpa);
|
||||
defer buf.deinit();
|
||||
try file.renderFullyQualifiedName(buf.writer());
|
||||
@ -1539,7 +1539,7 @@ pub const File = struct {
|
||||
}
|
||||
|
||||
/// Returns the full path to this file relative to its package.
|
||||
pub fn fullPath(file: File, ally: *Allocator) ![]u8 {
|
||||
pub fn fullPath(file: File, ally: Allocator) ![]u8 {
|
||||
return file.pkg.root_src_directory.join(ally, &[_][]const u8{file.sub_file_path});
|
||||
}
|
||||
|
||||
@ -1594,7 +1594,7 @@ pub const ErrorMsg = struct {
|
||||
notes: []ErrorMsg = &.{},
|
||||
|
||||
pub fn create(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
src_loc: SrcLoc,
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
@ -1607,13 +1607,13 @@ pub const ErrorMsg = struct {
|
||||
|
||||
/// Assumes the ErrorMsg struct and msg were both allocated with `gpa`,
|
||||
/// as well as all notes.
|
||||
pub fn destroy(err_msg: *ErrorMsg, gpa: *Allocator) void {
|
||||
pub fn destroy(err_msg: *ErrorMsg, gpa: Allocator) void {
|
||||
err_msg.deinit(gpa);
|
||||
gpa.destroy(err_msg);
|
||||
}
|
||||
|
||||
pub fn init(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
src_loc: SrcLoc,
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
@ -1624,7 +1624,7 @@ pub const ErrorMsg = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(err_msg: *ErrorMsg, gpa: *Allocator) void {
|
||||
pub fn deinit(err_msg: *ErrorMsg, gpa: Allocator) void {
|
||||
for (err_msg.notes) |*note| {
|
||||
note.deinit(gpa);
|
||||
}
|
||||
@ -1651,7 +1651,7 @@ pub const SrcLoc = struct {
|
||||
return @bitCast(Ast.Node.Index, offset + @bitCast(i32, src_loc.parent_decl_node));
|
||||
}
|
||||
|
||||
pub fn byteOffset(src_loc: SrcLoc, gpa: *Allocator) !u32 {
|
||||
pub fn byteOffset(src_loc: SrcLoc, gpa: Allocator) !u32 {
|
||||
switch (src_loc.lazy) {
|
||||
.unneeded => unreachable,
|
||||
.entire_file => return 0,
|
||||
@ -2066,7 +2066,7 @@ pub const SrcLoc = struct {
|
||||
|
||||
pub fn byteOffsetBuiltinCallArg(
|
||||
src_loc: SrcLoc,
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
node_off: i32,
|
||||
arg_index: u32,
|
||||
) !u32 {
|
||||
@ -2464,7 +2464,7 @@ pub fn deinit(mod: *Module) void {
|
||||
}
|
||||
}
|
||||
|
||||
fn freeExportList(gpa: *Allocator, export_list: []*Export) void {
|
||||
fn freeExportList(gpa: Allocator, export_list: []*Export) void {
|
||||
for (export_list) |exp| {
|
||||
gpa.free(exp.options.name);
|
||||
if (exp.options.section) |s| gpa.free(s);
|
||||
@ -2871,7 +2871,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
|
||||
/// * Decl.zir_index
|
||||
/// * Fn.zir_body_inst
|
||||
/// * Decl.zir_decl_index
|
||||
fn updateZirRefs(gpa: *Allocator, file: *File, old_zir: Zir) !void {
|
||||
fn updateZirRefs(gpa: Allocator, file: *File, old_zir: Zir) !void {
|
||||
const new_zir = file.zir;
|
||||
|
||||
// Maps from old ZIR to new ZIR, struct_decl, enum_decl, etc. Any instruction which
|
||||
@ -2965,7 +2965,7 @@ fn updateZirRefs(gpa: *Allocator, file: *File, old_zir: Zir) !void {
|
||||
}
|
||||
|
||||
pub fn mapOldZirToNew(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
old_zir: Zir,
|
||||
new_zir: Zir,
|
||||
inst_map: *std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index),
|
||||
@ -4119,7 +4119,7 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void {
|
||||
mod.gpa.free(kv.value);
|
||||
}
|
||||
|
||||
pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: *Allocator) SemaError!Air {
|
||||
pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) SemaError!Air {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
@ -4427,7 +4427,7 @@ pub fn getNextAnonNameIndex(mod: *Module) usize {
|
||||
return @atomicRmw(usize, &mod.next_anon_name_index, .Add, 1, .Monotonic);
|
||||
}
|
||||
|
||||
pub fn makeIntType(arena: *Allocator, signedness: std.builtin.Signedness, bits: u16) !Type {
|
||||
pub fn makeIntType(arena: Allocator, signedness: std.builtin.Signedness, bits: u16) !Type {
|
||||
const int_payload = try arena.create(Type.Payload.Bits);
|
||||
int_payload.* = .{
|
||||
.base = .{
|
||||
@ -4459,7 +4459,7 @@ pub fn errNoteNonLazy(
|
||||
}
|
||||
|
||||
pub fn errorUnionType(
|
||||
arena: *Allocator,
|
||||
arena: Allocator,
|
||||
error_set: Type,
|
||||
payload: Type,
|
||||
) Allocator.Error!Type {
|
||||
@ -4511,7 +4511,7 @@ pub const SwitchProngSrc = union(enum) {
|
||||
/// the LazySrcLoc in order to emit a compile error.
|
||||
pub fn resolve(
|
||||
prong_src: SwitchProngSrc,
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
decl: *Decl,
|
||||
switch_node_offset: i32,
|
||||
range_expand: RangeExpand,
|
||||
@ -4605,7 +4605,7 @@ pub const PeerTypeCandidateSrc = union(enum) {
|
||||
|
||||
pub fn resolve(
|
||||
self: PeerTypeCandidateSrc,
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
decl: *Decl,
|
||||
candidate_i: usize,
|
||||
) ?LazySrcLoc {
|
||||
|
||||
@ -21,7 +21,7 @@ root_src_directory_owned: bool = false,
|
||||
|
||||
/// Allocate a Package. No references to the slices passed are kept.
|
||||
pub fn create(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
/// Null indicates the current working directory
|
||||
root_src_dir_path: ?[]const u8,
|
||||
/// Relative to root_src_dir_path
|
||||
@ -49,7 +49,7 @@ pub fn create(
|
||||
}
|
||||
|
||||
pub fn createWithDir(
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
directory: Compilation.Directory,
|
||||
/// Relative to `directory`. If null, means `directory` is the root src dir
|
||||
/// and is owned externally.
|
||||
@ -87,7 +87,7 @@ pub fn createWithDir(
|
||||
|
||||
/// Free all memory associated with this package. It does not destroy any packages
|
||||
/// inside its table; the caller is responsible for calling destroy() on them.
|
||||
pub fn destroy(pkg: *Package, gpa: *Allocator) void {
|
||||
pub fn destroy(pkg: *Package, gpa: Allocator) void {
|
||||
gpa.free(pkg.root_src_path);
|
||||
|
||||
if (pkg.root_src_directory_owned) {
|
||||
@ -104,7 +104,7 @@ pub fn destroy(pkg: *Package, gpa: *Allocator) void {
|
||||
}
|
||||
|
||||
/// Only frees memory associated with the table.
|
||||
pub fn deinitTable(pkg: *Package, gpa: *Allocator) void {
|
||||
pub fn deinitTable(pkg: *Package, gpa: Allocator) void {
|
||||
var it = pkg.table.keyIterator();
|
||||
while (it.next()) |key| {
|
||||
gpa.free(key.*);
|
||||
@ -113,13 +113,13 @@ pub fn deinitTable(pkg: *Package, gpa: *Allocator) void {
|
||||
pkg.table.deinit(gpa);
|
||||
}
|
||||
|
||||
pub fn add(pkg: *Package, gpa: *Allocator, name: []const u8, package: *Package) !void {
|
||||
pub fn add(pkg: *Package, gpa: Allocator, name: []const u8, package: *Package) !void {
|
||||
try pkg.table.ensureUnusedCapacity(gpa, 1);
|
||||
const name_dupe = try gpa.dupe(u8, name);
|
||||
pkg.table.putAssumeCapacityNoClobber(name_dupe, package);
|
||||
}
|
||||
|
||||
pub fn addAndAdopt(parent: *Package, gpa: *Allocator, name: []const u8, child: *Package) !void {
|
||||
pub fn addAndAdopt(parent: *Package, gpa: Allocator, name: []const u8, child: *Package) !void {
|
||||
assert(child.parent == null); // make up your mind, who is the parent??
|
||||
child.parent = parent;
|
||||
return parent.add(gpa, name, child);
|
||||
|
||||
@ -13,7 +13,7 @@ pub const Range = struct {
|
||||
src: SwitchProngSrc,
|
||||
};
|
||||
|
||||
pub fn init(allocator: *std.mem.Allocator) RangeSet {
|
||||
pub fn init(allocator: std.mem.Allocator) RangeSet {
|
||||
return .{
|
||||
.ranges = std.ArrayList(Range).init(allocator),
|
||||
};
|
||||
|
||||
10
src/Sema.zig
10
src/Sema.zig
@ -7,13 +7,13 @@
|
||||
|
||||
mod: *Module,
|
||||
/// Alias to `mod.gpa`.
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
/// Points to the temporary arena allocator of the Sema.
|
||||
/// This arena will be cleared when the sema is destroyed.
|
||||
arena: *Allocator,
|
||||
arena: Allocator,
|
||||
/// Points to the arena allocator for the owner_decl.
|
||||
/// This arena will persist until the decl is invalidated.
|
||||
perm_arena: *Allocator,
|
||||
perm_arena: Allocator,
|
||||
code: Zir,
|
||||
air_instructions: std.MultiArrayList(Air.Inst) = .{},
|
||||
air_extra: std.ArrayListUnmanaged(u32) = .{},
|
||||
@ -417,7 +417,7 @@ pub const Block = struct {
|
||||
new_decl_arena: std.heap.ArenaAllocator,
|
||||
finished: bool,
|
||||
|
||||
pub fn arena(wad: *WipAnonDecl) *Allocator {
|
||||
pub fn arena(wad: *WipAnonDecl) Allocator {
|
||||
return &wad.new_decl_arena.allocator;
|
||||
}
|
||||
|
||||
@ -12793,7 +12793,7 @@ const ComptimePtrMutationKit = struct {
|
||||
ty: Type,
|
||||
decl_arena: std.heap.ArenaAllocator = undefined,
|
||||
|
||||
fn beginArena(self: *ComptimePtrMutationKit, gpa: *Allocator) *Allocator {
|
||||
fn beginArena(self: *ComptimePtrMutationKit, gpa: Allocator) Allocator {
|
||||
self.decl_arena = self.decl_ref_mut.decl.value_arena.?.promote(gpa);
|
||||
return &self.decl_arena.allocator;
|
||||
}
|
||||
|
||||
@ -9,7 +9,7 @@ const ThreadPool = @This();
|
||||
|
||||
mutex: std.Thread.Mutex = .{},
|
||||
is_running: bool = true,
|
||||
allocator: *std.mem.Allocator,
|
||||
allocator: std.mem.Allocator,
|
||||
workers: []Worker,
|
||||
run_queue: RunQueue = .{},
|
||||
idle_queue: IdleQueue = .{},
|
||||
@ -55,7 +55,7 @@ const Worker = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn init(self: *ThreadPool, allocator: *std.mem.Allocator) !void {
|
||||
pub fn init(self: *ThreadPool, allocator: std.mem.Allocator) !void {
|
||||
self.* = .{
|
||||
.allocator = allocator,
|
||||
.workers = &[_]Worker{},
|
||||
|
||||
@ -16,14 +16,14 @@ pub const Managed = struct {
|
||||
/// If this is `null` then there is no memory management needed.
|
||||
arena: ?*std.heap.ArenaAllocator.State = null,
|
||||
|
||||
pub fn deinit(self: *Managed, allocator: *Allocator) void {
|
||||
pub fn deinit(self: *Managed, allocator: Allocator) void {
|
||||
if (self.arena) |a| a.promote(allocator).deinit();
|
||||
self.* = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
/// Assumes arena allocation. Does a recursive copy.
|
||||
pub fn copy(self: TypedValue, arena: *Allocator) error{OutOfMemory}!TypedValue {
|
||||
pub fn copy(self: TypedValue, arena: Allocator) error{OutOfMemory}!TypedValue {
|
||||
return TypedValue{
|
||||
.ty = try self.ty.copy(arena),
|
||||
.val = try self.val.copy(arena),
|
||||
|
||||
@ -101,7 +101,7 @@ pub fn hasCompileErrors(code: Zir) bool {
|
||||
return code.extra[@enumToInt(ExtraIndex.compile_errors)] != 0;
|
||||
}
|
||||
|
||||
pub fn deinit(code: *Zir, gpa: *Allocator) void {
|
||||
pub fn deinit(code: *Zir, gpa: Allocator) void {
|
||||
code.instructions.deinit(gpa);
|
||||
gpa.free(code.string_bytes);
|
||||
gpa.free(code.extra);
|
||||
|
||||
@ -33,7 +33,7 @@ const InnerError = error{
|
||||
CodegenFail,
|
||||
};
|
||||
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
air: Air,
|
||||
liveness: Liveness,
|
||||
bin_file: *link.File,
|
||||
@ -164,7 +164,7 @@ const MCValue = union(enum) {
|
||||
const Branch = struct {
|
||||
inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
|
||||
|
||||
fn deinit(self: *Branch, gpa: *Allocator) void {
|
||||
fn deinit(self: *Branch, gpa: Allocator) void {
|
||||
self.inst_table.deinit(gpa);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
@ -229,7 +229,7 @@ pub const Inst = struct {
|
||||
// }
|
||||
};
|
||||
|
||||
pub fn deinit(mir: *Mir, gpa: *std.mem.Allocator) void {
|
||||
pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
|
||||
mir.instructions.deinit(gpa);
|
||||
gpa.free(mir.extra);
|
||||
mir.* = undefined;
|
||||
|
||||
@ -33,7 +33,7 @@ const InnerError = error{
|
||||
CodegenFail,
|
||||
};
|
||||
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
air: Air,
|
||||
liveness: Liveness,
|
||||
bin_file: *link.File,
|
||||
@ -164,7 +164,7 @@ const MCValue = union(enum) {
|
||||
const Branch = struct {
|
||||
inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
|
||||
|
||||
fn deinit(self: *Branch, gpa: *Allocator) void {
|
||||
fn deinit(self: *Branch, gpa: Allocator) void {
|
||||
self.inst_table.deinit(gpa);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
@ -193,7 +193,7 @@ pub const Inst = struct {
|
||||
// }
|
||||
};
|
||||
|
||||
pub fn deinit(mir: *Mir, gpa: *std.mem.Allocator) void {
|
||||
pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
|
||||
mir.instructions.deinit(gpa);
|
||||
gpa.free(mir.extra);
|
||||
mir.* = undefined;
|
||||
|
||||
@ -33,7 +33,7 @@ const InnerError = error{
|
||||
CodegenFail,
|
||||
};
|
||||
|
||||
gpa: *Allocator,
|
||||
gpa: Allocator,
|
||||
air: Air,
|
||||
liveness: Liveness,
|
||||
bin_file: *link.File,
|
||||
@ -158,7 +158,7 @@ const MCValue = union(enum) {
|
||||
const Branch = struct {
|
||||
inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
|
||||
|
||||
fn deinit(self: *Branch, gpa: *Allocator) void {
|
||||
fn deinit(self: *Branch, gpa: Allocator) void {
|
||||
self.inst_table.deinit(gpa);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
@ -101,7 +101,7 @@ pub const Inst = struct {
|
||||
// }
|
||||
};
|
||||
|
||||
pub fn deinit(mir: *Mir, gpa: *std.mem.Allocator) void {
|
||||
pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
|
||||
mir.instructions.deinit(gpa);
|
||||
gpa.free(mir.extra);
|
||||
mir.* = undefined;
|
||||
|
||||
@ -508,7 +508,7 @@ const Self = @This();
|
||||
decl: *Decl,
|
||||
air: Air,
|
||||
liveness: Liveness,
|
||||
gpa: *mem.Allocator,
|
||||
gpa: mem.Allocator,
|
||||
/// Table to save `WValue`'s generated by an `Air.Inst`
|
||||
values: ValueTable,
|
||||
/// Mapping from Air.Inst.Index to block ids
|
||||
@ -983,7 +983,7 @@ const CallWValues = struct {
|
||||
args: []WValue,
|
||||
return_value: WValue,
|
||||
|
||||
fn deinit(self: *CallWValues, gpa: *Allocator) void {
|
||||
fn deinit(self: *CallWValues, gpa: Allocator) void {
|
||||
gpa.free(self.args);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
@ -411,7 +411,7 @@ pub const Inst = struct {
|
||||
};
|
||||
};
|
||||
|
||||
pub fn deinit(self: *Mir, gpa: *std.mem.Allocator) void {
|
||||
pub fn deinit(self: *Mir, gpa: std.mem.Allocator) void {
|
||||
self.instructions.deinit(gpa);
|
||||
gpa.free(self.extra);
|
||||
self.* = undefined;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user