mirror of
https://github.com/ziglang/zig.git
synced 2026-02-17 23:10:09 +00:00
Update all ensureCapacity calls to the relevant non-deprecated version
This commit is contained in:
parent
feeb25908b
commit
59f5053bed
@ -90,7 +90,7 @@ pub fn ArrayHashMap(
|
||||
/// Modifying the key is allowed only if it does not change the hash.
|
||||
/// Modifying the value is allowed.
|
||||
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
|
||||
/// unless `ensureCapacity` was previously used.
|
||||
/// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
|
||||
pub const Entry = Unmanaged.Entry;
|
||||
|
||||
/// A KV pair which has been copied out of the backing store
|
||||
@ -110,7 +110,7 @@ pub fn ArrayHashMap(
|
||||
/// Modifying the key is allowed only if it does not change the hash.
|
||||
/// Modifying the value is allowed.
|
||||
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
|
||||
/// unless `ensureCapacity` was previously used.
|
||||
/// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
|
||||
pub const GetOrPutResult = Unmanaged.GetOrPutResult;
|
||||
|
||||
/// An Iterator over Entry pointers.
|
||||
@ -478,7 +478,7 @@ pub fn ArrayHashMapUnmanaged(
|
||||
/// Modifying the key is allowed only if it does not change the hash.
|
||||
/// Modifying the value is allowed.
|
||||
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
|
||||
/// unless `ensureCapacity` was previously used.
|
||||
/// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
|
||||
pub const Entry = struct {
|
||||
key_ptr: *K,
|
||||
value_ptr: *V,
|
||||
@ -509,7 +509,7 @@ pub fn ArrayHashMapUnmanaged(
|
||||
/// Modifying the key is allowed only if it does not change the hash.
|
||||
/// Modifying the value is allowed.
|
||||
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
|
||||
/// unless `ensureCapacity` was previously used.
|
||||
/// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
|
||||
pub const GetOrPutResult = struct {
|
||||
key_ptr: *K,
|
||||
value_ptr: *V,
|
||||
@ -759,20 +759,20 @@ pub fn ArrayHashMapUnmanaged(
|
||||
}
|
||||
pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_capacity: usize, ctx: Context) !void {
|
||||
if (new_capacity <= linear_scan_max) {
|
||||
try self.entries.ensureCapacity(allocator, new_capacity);
|
||||
try self.entries.ensureTotalCapacity(allocator, new_capacity);
|
||||
return;
|
||||
}
|
||||
|
||||
if (self.index_header) |header| {
|
||||
if (new_capacity <= header.capacity()) {
|
||||
try self.entries.ensureCapacity(allocator, new_capacity);
|
||||
try self.entries.ensureTotalCapacity(allocator, new_capacity);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const new_bit_index = try IndexHeader.findBitIndex(new_capacity);
|
||||
const new_header = try IndexHeader.alloc(allocator, new_bit_index);
|
||||
try self.entries.ensureCapacity(allocator, new_capacity);
|
||||
try self.entries.ensureTotalCapacity(allocator, new_capacity);
|
||||
|
||||
if (self.index_header) |old_header| old_header.free(allocator);
|
||||
self.insertAllEntriesIntoNewHeader(if (store_hash) {} else ctx, new_header);
|
||||
@ -1441,7 +1441,7 @@ pub fn ArrayHashMapUnmanaged(
|
||||
unreachable;
|
||||
}
|
||||
|
||||
/// Must ensureCapacity before calling this.
|
||||
/// Must `ensureTotalCapacity`/`ensureUnusedCapacity` before calling this.
|
||||
fn getOrPutInternal(self: *Self, key: anytype, ctx: anytype, header: *IndexHeader, comptime I: type) GetOrPutResult {
|
||||
const slice = self.entries.slice();
|
||||
const hashes_array = if (store_hash) slice.items(.hash) else {};
|
||||
@ -1485,7 +1485,7 @@ pub fn ArrayHashMapUnmanaged(
|
||||
}
|
||||
|
||||
// This pointer survives the following append because we call
|
||||
// entries.ensureCapacity before getOrPutInternal.
|
||||
// entries.ensureTotalCapacity before getOrPutInternal.
|
||||
const hash_match = if (store_hash) h == hashes_array[slot_data.entry_index] else true;
|
||||
if (hash_match and checkedEql(ctx, key, keys_array[slot_data.entry_index])) {
|
||||
return .{
|
||||
@ -1946,7 +1946,7 @@ test "iterator hash map" {
|
||||
var reset_map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
|
||||
defer reset_map.deinit();
|
||||
|
||||
// test ensureCapacity with a 0 parameter
|
||||
// test ensureTotalCapacity with a 0 parameter
|
||||
try reset_map.ensureTotalCapacity(0);
|
||||
|
||||
try reset_map.putNoClobber(0, 11);
|
||||
|
||||
@ -195,7 +195,7 @@ pub const ChildProcess = struct {
|
||||
};
|
||||
|
||||
var dead_fds: usize = 0;
|
||||
// We ask for ensureCapacity with this much extra space. This has more of an
|
||||
// We ask for ensureTotalCapacity with this much extra space. This has more of an
|
||||
// effect on small reads because once the reads start to get larger the amount
|
||||
// of space an ArrayList will allocate grows exponentially.
|
||||
const bump_amt = 512;
|
||||
@ -215,7 +215,7 @@ pub const ChildProcess = struct {
|
||||
if (poll_fds[0].revents & os.POLL.IN != 0) {
|
||||
// stdout is ready.
|
||||
const new_capacity = std.math.min(stdout.items.len + bump_amt, max_output_bytes);
|
||||
try stdout.ensureCapacity(new_capacity);
|
||||
try stdout.ensureTotalCapacity(new_capacity);
|
||||
const buf = stdout.unusedCapacitySlice();
|
||||
if (buf.len == 0) return error.StdoutStreamTooLong;
|
||||
const nread = try os.read(poll_fds[0].fd, buf);
|
||||
@ -230,7 +230,7 @@ pub const ChildProcess = struct {
|
||||
if (poll_fds[1].revents & os.POLL.IN != 0) {
|
||||
// stderr is ready.
|
||||
const new_capacity = std.math.min(stderr.items.len + bump_amt, max_output_bytes);
|
||||
try stderr.ensureCapacity(new_capacity);
|
||||
try stderr.ensureTotalCapacity(new_capacity);
|
||||
const buf = stderr.unusedCapacitySlice();
|
||||
if (buf.len == 0) return error.StderrStreamTooLong;
|
||||
const nread = try os.read(poll_fds[1].fd, buf);
|
||||
@ -276,7 +276,7 @@ pub const ChildProcess = struct {
|
||||
|
||||
// Windows Async IO requires an initial call to ReadFile before waiting on the handle
|
||||
for ([_]u1{ 0, 1 }) |i| {
|
||||
try outs[i].ensureCapacity(bump_amt);
|
||||
try outs[i].ensureTotalCapacity(bump_amt);
|
||||
const buf = outs[i].unusedCapacitySlice();
|
||||
_ = windows.kernel32.ReadFile(handles[i], buf.ptr, math.cast(u32, buf.len) catch maxInt(u32), null, &overlapped[i]);
|
||||
wait_objects[wait_object_count] = handles[i];
|
||||
@ -318,7 +318,7 @@ pub const ChildProcess = struct {
|
||||
|
||||
outs[i].items.len += read_bytes;
|
||||
const new_capacity = std.math.min(outs[i].items.len + bump_amt, max_output_bytes);
|
||||
try outs[i].ensureCapacity(new_capacity);
|
||||
try outs[i].ensureTotalCapacity(new_capacity);
|
||||
const buf = outs[i].unusedCapacitySlice();
|
||||
if (buf.len == 0) return if (i == 0) error.StdoutStreamTooLong else error.StderrStreamTooLong;
|
||||
_ = windows.kernel32.ReadFile(handles[i], buf.ptr, math.cast(u32, buf.len) catch maxInt(u32), null, &overlapped[i]);
|
||||
|
||||
@ -277,7 +277,7 @@ pub const Coff = struct {
|
||||
if (self.sections.items.len == self.coff_header.number_of_sections)
|
||||
return;
|
||||
|
||||
try self.sections.ensureCapacity(self.coff_header.number_of_sections);
|
||||
try self.sections.ensureTotalCapacity(self.coff_header.number_of_sections);
|
||||
|
||||
const in = self.in_file.reader();
|
||||
|
||||
|
||||
@ -1568,11 +1568,11 @@ test "std.hash_map basic usage" {
|
||||
try expectEqual(total, sum);
|
||||
}
|
||||
|
||||
test "std.hash_map ensureCapacity" {
|
||||
test "std.hash_map ensureTotalCapacity" {
|
||||
var map = AutoHashMap(i32, i32).init(std.testing.allocator);
|
||||
defer map.deinit();
|
||||
|
||||
try map.ensureCapacity(20);
|
||||
try map.ensureTotalCapacity(20);
|
||||
const initial_capacity = map.capacity();
|
||||
try testing.expect(initial_capacity >= 20);
|
||||
var i: i32 = 0;
|
||||
@ -1583,13 +1583,13 @@ test "std.hash_map ensureCapacity" {
|
||||
try testing.expect(initial_capacity == map.capacity());
|
||||
}
|
||||
|
||||
test "std.hash_map ensureCapacity with tombstones" {
|
||||
test "std.hash_map ensureUnusedCapacity with tombstones" {
|
||||
var map = AutoHashMap(i32, i32).init(std.testing.allocator);
|
||||
defer map.deinit();
|
||||
|
||||
var i: i32 = 0;
|
||||
while (i < 100) : (i += 1) {
|
||||
try map.ensureCapacity(@intCast(u32, map.count() + 1));
|
||||
try map.ensureUnusedCapacity(1);
|
||||
map.putAssumeCapacity(i, i);
|
||||
// Remove to create tombstones that still count as load in the hashmap.
|
||||
_ = map.remove(i);
|
||||
@ -1669,7 +1669,7 @@ test "std.hash_map clone" {
|
||||
try expectEqual(b.get(3).?, 3);
|
||||
}
|
||||
|
||||
test "std.hash_map ensureCapacity with existing elements" {
|
||||
test "std.hash_map ensureTotalCapacity with existing elements" {
|
||||
var map = AutoHashMap(u32, u32).init(std.testing.allocator);
|
||||
defer map.deinit();
|
||||
|
||||
@ -1677,16 +1677,16 @@ test "std.hash_map ensureCapacity with existing elements" {
|
||||
try expectEqual(map.count(), 1);
|
||||
try expectEqual(map.capacity(), @TypeOf(map).Unmanaged.minimal_capacity);
|
||||
|
||||
try map.ensureCapacity(65);
|
||||
try map.ensureTotalCapacity(65);
|
||||
try expectEqual(map.count(), 1);
|
||||
try expectEqual(map.capacity(), 128);
|
||||
}
|
||||
|
||||
test "std.hash_map ensureCapacity satisfies max load factor" {
|
||||
test "std.hash_map ensureTotalCapacity satisfies max load factor" {
|
||||
var map = AutoHashMap(u32, u32).init(std.testing.allocator);
|
||||
defer map.deinit();
|
||||
|
||||
try map.ensureCapacity(127);
|
||||
try map.ensureTotalCapacity(127);
|
||||
try expectEqual(map.capacity(), 256);
|
||||
}
|
||||
|
||||
@ -1870,7 +1870,7 @@ test "std.hash_map putAssumeCapacity" {
|
||||
var map = AutoHashMap(u32, u32).init(std.testing.allocator);
|
||||
defer map.deinit();
|
||||
|
||||
try map.ensureCapacity(20);
|
||||
try map.ensureTotalCapacity(20);
|
||||
var i: u32 = 0;
|
||||
while (i < 20) : (i += 1) {
|
||||
map.putAssumeCapacityNoClobber(i, i);
|
||||
|
||||
@ -746,10 +746,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
|
||||
const new_aligned_size = math.max(len, ptr_align);
|
||||
if (new_aligned_size > largest_bucket_object_size) {
|
||||
try self.large_allocations.ensureCapacity(
|
||||
self.backing_allocator,
|
||||
self.large_allocations.count() + 1,
|
||||
);
|
||||
try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
|
||||
|
||||
const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr);
|
||||
|
||||
|
||||
@ -61,7 +61,7 @@ pub fn Reader(
|
||||
array_list: *std.ArrayListAligned(u8, alignment),
|
||||
max_append_size: usize,
|
||||
) !void {
|
||||
try array_list.ensureCapacity(math.min(max_append_size, 4096));
|
||||
try array_list.ensureTotalCapacity(math.min(max_append_size, 4096));
|
||||
const original_len = array_list.items.len;
|
||||
var start_index: usize = original_len;
|
||||
while (true) {
|
||||
@ -81,7 +81,7 @@ pub fn Reader(
|
||||
}
|
||||
|
||||
// This will trigger ArrayList to expand superlinearly at whatever its growth rate is.
|
||||
try array_list.ensureCapacity(start_index + 1);
|
||||
try array_list.ensureTotalCapacity(start_index + 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1838,7 +1838,7 @@ fn parseInternal(
|
||||
else => {},
|
||||
}
|
||||
|
||||
try arraylist.ensureCapacity(arraylist.items.len + 1);
|
||||
try arraylist.ensureUnusedCapacity(1);
|
||||
const v = try parseInternal(ptrInfo.child, tok, tokens, options);
|
||||
arraylist.appendAssumeCapacity(v);
|
||||
}
|
||||
|
||||
@ -189,7 +189,7 @@ pub fn MultiArrayList(comptime S: type) type {
|
||||
/// sets the given index to the specified element. May reallocate
|
||||
/// and invalidate iterators.
|
||||
pub fn insert(self: *Self, gpa: *Allocator, index: usize, elem: S) void {
|
||||
try self.ensureCapacity(gpa, self.len + 1);
|
||||
try self.ensureUnusedCapacity(gpa, 1);
|
||||
self.insertAssumeCapacity(index, elem);
|
||||
}
|
||||
|
||||
@ -376,7 +376,7 @@ pub fn MultiArrayList(comptime S: type) type {
|
||||
pub fn clone(self: Self, gpa: *Allocator) !Self {
|
||||
var result = Self{};
|
||||
errdefer result.deinit(gpa);
|
||||
try result.ensureCapacity(gpa, self.len);
|
||||
try result.ensureTotalCapacity(gpa, self.len);
|
||||
result.len = self.len;
|
||||
const self_slice = self.slice();
|
||||
const result_slice = result.slice();
|
||||
|
||||
@ -668,7 +668,7 @@ pub fn utf8ToUtf16LeWithNull(allocator: *mem.Allocator, utf8: []const u8) ![:0]u
|
||||
var result = std.ArrayList(u16).init(allocator);
|
||||
errdefer result.deinit();
|
||||
// optimistically guess that it will not require surrogate pairs
|
||||
try result.ensureCapacity(utf8.len + 1);
|
||||
try result.ensureTotalCapacity(utf8.len + 1);
|
||||
|
||||
const view = try Utf8View.init(utf8);
|
||||
var it = view.iterator();
|
||||
|
||||
@ -17,7 +17,7 @@ pub fn parse(gpa: *Allocator, source: [:0]const u8) Allocator.Error!Ast {
|
||||
|
||||
// Empirically, the zig std lib has an 8:1 ratio of source bytes to token count.
|
||||
const estimated_token_count = source.len / 8;
|
||||
try tokens.ensureCapacity(gpa, estimated_token_count);
|
||||
try tokens.ensureTotalCapacity(gpa, estimated_token_count);
|
||||
|
||||
var tokenizer = std.zig.Tokenizer.init(source);
|
||||
while (true) {
|
||||
@ -48,7 +48,7 @@ pub fn parse(gpa: *Allocator, source: [:0]const u8) Allocator.Error!Ast {
|
||||
// Empirically, Zig source code has a 2:1 ratio of tokens to AST nodes.
|
||||
// Make sure at least 1 so we can use appendAssumeCapacity on the root node below.
|
||||
const estimated_node_count = (tokens.len + 2) / 2;
|
||||
try parser.nodes.ensureCapacity(gpa, estimated_node_count);
|
||||
try parser.nodes.ensureTotalCapacity(gpa, estimated_node_count);
|
||||
|
||||
// Root node must be index 0.
|
||||
// Root <- skip ContainerMembers eof
|
||||
@ -138,7 +138,7 @@ const Parser = struct {
|
||||
|
||||
fn addExtra(p: *Parser, extra: anytype) Allocator.Error!Node.Index {
|
||||
const fields = std.meta.fields(@TypeOf(extra));
|
||||
try p.extra_data.ensureCapacity(p.gpa, p.extra_data.items.len + fields.len);
|
||||
try p.extra_data.ensureUnusedCapacity(p.gpa, fields.len);
|
||||
const result = @intCast(u32, p.extra_data.items.len);
|
||||
inline for (fields) |field| {
|
||||
comptime assert(field.field_type == Node.Index);
|
||||
|
||||
@ -29,7 +29,7 @@ pub fn parseAppend(buf: *std.ArrayList(u8), bytes: []const u8) error{OutOfMemory
|
||||
const slice = bytes[1..];
|
||||
|
||||
const prev_len = buf.items.len;
|
||||
try buf.ensureCapacity(prev_len + slice.len - 1);
|
||||
try buf.ensureUnusedCapacity(slice.len - 1);
|
||||
errdefer buf.shrinkRetainingCapacity(prev_len);
|
||||
|
||||
const State = enum {
|
||||
|
||||
@ -3515,7 +3515,7 @@ fn structDeclInner(
|
||||
defer wip_decls.deinit(gpa);
|
||||
|
||||
// We don't know which members are fields until we iterate, so cannot do
|
||||
// an accurate ensureCapacity yet.
|
||||
// an accurate ensureTotalCapacity yet.
|
||||
var fields_data = ArrayListUnmanaged(u32){};
|
||||
defer fields_data.deinit(gpa);
|
||||
|
||||
@ -3791,7 +3791,7 @@ fn unionDeclInner(
|
||||
defer wip_decls.deinit(gpa);
|
||||
|
||||
// We don't know which members are fields until we iterate, so cannot do
|
||||
// an accurate ensureCapacity yet.
|
||||
// an accurate ensureTotalCapacity yet.
|
||||
var fields_data = ArrayListUnmanaged(u32){};
|
||||
defer fields_data.deinit(gpa);
|
||||
|
||||
|
||||
@ -210,7 +210,7 @@ pub const Manifest = struct {
|
||||
pub fn addFile(self: *Manifest, file_path: []const u8, max_file_size: ?usize) !usize {
|
||||
assert(self.manifest_file == null);
|
||||
|
||||
try self.files.ensureCapacity(self.cache.gpa, self.files.items.len + 1);
|
||||
try self.files.ensureUnusedCapacity(self.cache.gpa, 1);
|
||||
const resolved_path = try fs.path.resolve(self.cache.gpa, &[_][]const u8{file_path});
|
||||
|
||||
const idx = self.files.items.len;
|
||||
|
||||
@ -1097,7 +1097,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
|
||||
|
||||
if (feature.llvm_name) |llvm_name| {
|
||||
const plus_or_minus = "-+"[@boolToInt(is_enabled)];
|
||||
try buf.ensureCapacity(buf.items.len + 2 + llvm_name.len);
|
||||
try buf.ensureUnusedCapacity(2 + llvm_name.len);
|
||||
buf.appendAssumeCapacity(plus_or_minus);
|
||||
buf.appendSliceAssumeCapacity(llvm_name);
|
||||
buf.appendSliceAssumeCapacity(",");
|
||||
@ -1347,7 +1347,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
|
||||
|
||||
var system_libs: std.StringArrayHashMapUnmanaged(void) = .{};
|
||||
errdefer system_libs.deinit(gpa);
|
||||
try system_libs.ensureCapacity(gpa, options.system_libs.len);
|
||||
try system_libs.ensureTotalCapacity(gpa, options.system_libs.len);
|
||||
for (options.system_libs) |lib_name| {
|
||||
system_libs.putAssumeCapacity(lib_name, {});
|
||||
}
|
||||
@ -1483,7 +1483,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
|
||||
errdefer comp.astgen_wait_group.deinit();
|
||||
|
||||
// Add a `CObject` for each `c_source_files`.
|
||||
try comp.c_object_table.ensureCapacity(gpa, options.c_source_files.len);
|
||||
try comp.c_object_table.ensureTotalCapacity(gpa, options.c_source_files.len);
|
||||
for (options.c_source_files) |c_source_file| {
|
||||
const c_object = try gpa.create(CObject);
|
||||
errdefer gpa.destroy(c_object);
|
||||
@ -3084,7 +3084,7 @@ pub fn addCCArgs(
|
||||
|
||||
// It would be really nice if there was a more compact way to communicate this info to Clang.
|
||||
const all_features_list = target.cpu.arch.allFeaturesList();
|
||||
try argv.ensureCapacity(argv.items.len + all_features_list.len * 4);
|
||||
try argv.ensureUnusedCapacity(all_features_list.len * 4);
|
||||
for (all_features_list) |feature, index_usize| {
|
||||
const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize);
|
||||
const is_enabled = target.cpu.features.isEnabled(index);
|
||||
@ -3334,7 +3334,7 @@ fn failCObjWithOwnedErrorMsg(
|
||||
defer lock.release();
|
||||
{
|
||||
errdefer err_msg.destroy(comp.gpa);
|
||||
try comp.failed_c_objects.ensureCapacity(comp.gpa, comp.failed_c_objects.count() + 1);
|
||||
try comp.failed_c_objects.ensureUnusedCapacity(comp.gpa, 1);
|
||||
}
|
||||
comp.failed_c_objects.putAssumeCapacityNoClobber(c_object, err_msg);
|
||||
}
|
||||
@ -3585,7 +3585,7 @@ fn detectLibCIncludeDirs(
|
||||
|
||||
fn detectLibCFromLibCInstallation(arena: *Allocator, target: Target, lci: *const LibCInstallation) !LibCDirs {
|
||||
var list = std.ArrayList([]const u8).init(arena);
|
||||
try list.ensureCapacity(4);
|
||||
try list.ensureTotalCapacity(4);
|
||||
|
||||
list.appendAssumeCapacity(lci.include_dir.?);
|
||||
|
||||
@ -3692,7 +3692,7 @@ fn setMiscFailure(
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) Allocator.Error!void {
|
||||
try comp.misc_failures.ensureCapacity(comp.gpa, comp.misc_failures.count() + 1);
|
||||
try comp.misc_failures.ensureUnusedCapacity(comp.gpa, 1);
|
||||
const msg = try std.fmt.allocPrint(comp.gpa, format, args);
|
||||
comp.misc_failures.putAssumeCapacityNoClobber(tag, .{ .msg = msg });
|
||||
}
|
||||
@ -4027,7 +4027,7 @@ fn buildOutputFromZig(
|
||||
defer if (!keep_errors) errors.deinit(sub_compilation.gpa);
|
||||
|
||||
if (errors.list.len != 0) {
|
||||
try comp.misc_failures.ensureCapacity(comp.gpa, comp.misc_failures.count() + 1);
|
||||
try comp.misc_failures.ensureUnusedCapacity(comp.gpa, 1);
|
||||
comp.misc_failures.putAssumeCapacityNoClobber(misc_task_tag, .{
|
||||
.msg = try std.fmt.allocPrint(comp.gpa, "sub-compilation of {s} failed", .{
|
||||
@tagName(misc_task_tag),
|
||||
@ -4459,7 +4459,7 @@ pub fn build_crt_file(
|
||||
|
||||
try sub_compilation.updateSubCompilation();
|
||||
|
||||
try comp.crt_files.ensureCapacity(comp.gpa, comp.crt_files.count() + 1);
|
||||
try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1);
|
||||
|
||||
comp.crt_files.putAssumeCapacityNoClobber(basename, .{
|
||||
.full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(comp.gpa, &[_][]const u8{
|
||||
|
||||
@ -454,7 +454,7 @@ fn analyzeInst(
|
||||
}
|
||||
// Now we have to correctly populate new_set.
|
||||
if (new_set) |ns| {
|
||||
try ns.ensureCapacity(gpa, @intCast(u32, ns.count() + then_table.count() + else_table.count()));
|
||||
try ns.ensureUnusedCapacity(gpa, @intCast(u32, then_table.count() + else_table.count()));
|
||||
var it = then_table.keyIterator();
|
||||
while (it.next()) |key| {
|
||||
_ = ns.putAssumeCapacity(key.*, {});
|
||||
|
||||
@ -3504,7 +3504,7 @@ pub fn scanNamespace(
|
||||
const zir = namespace.file_scope.zir;
|
||||
|
||||
try mod.comp.work_queue.ensureUnusedCapacity(decls_len);
|
||||
try namespace.decls.ensureCapacity(gpa, decls_len);
|
||||
try namespace.decls.ensureTotalCapacity(gpa, decls_len);
|
||||
|
||||
const bit_bags_count = std.math.divCeil(usize, decls_len, 8) catch unreachable;
|
||||
var extra_index = extra_start + bit_bags_count;
|
||||
@ -4071,7 +4071,7 @@ pub fn getErrorValue(mod: *Module, name: []const u8) !std.StringHashMapUnmanaged
|
||||
}
|
||||
|
||||
errdefer assert(mod.global_error_set.remove(name));
|
||||
try mod.error_name_list.ensureCapacity(mod.gpa, mod.error_name_list.items.len + 1);
|
||||
try mod.error_name_list.ensureUnusedCapacity(mod.gpa, 1);
|
||||
gop.key_ptr.* = try mod.gpa.dupe(u8, name);
|
||||
gop.value_ptr.* = @intCast(ErrorInt, mod.error_name_list.items.len);
|
||||
mod.error_name_list.appendAssumeCapacity(gop.key_ptr.*);
|
||||
|
||||
@ -111,7 +111,7 @@ pub fn destroy(pkg: *Package, gpa: *Allocator) void {
|
||||
}
|
||||
|
||||
pub fn add(pkg: *Package, gpa: *Allocator, name: []const u8, package: *Package) !void {
|
||||
try pkg.table.ensureCapacity(gpa, pkg.table.count() + 1);
|
||||
try pkg.table.ensureUnusedCapacity(gpa, 1);
|
||||
const name_dupe = try mem.dupe(gpa, u8, name);
|
||||
pkg.table.putAssumeCapacityNoClobber(name_dupe, package);
|
||||
}
|
||||
|
||||
@ -1130,7 +1130,7 @@ fn zirEnumDecl(
|
||||
const body_end = extra_index;
|
||||
extra_index += bit_bags_count;
|
||||
|
||||
try enum_obj.fields.ensureCapacity(&new_decl_arena.allocator, fields_len);
|
||||
try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len);
|
||||
const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| {
|
||||
if (bag != 0) break true;
|
||||
} else false;
|
||||
@ -3484,7 +3484,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Com
|
||||
},
|
||||
.error_set => {
|
||||
const lhs_set = lhs_ty.castTag(.error_set).?.data;
|
||||
try set.ensureCapacity(sema.gpa, set.count() + lhs_set.names_len);
|
||||
try set.ensureUnusedCapacity(sema.gpa, lhs_set.names_len);
|
||||
for (lhs_set.names_ptr[0..lhs_set.names_len]) |name| {
|
||||
set.putAssumeCapacityNoClobber(name, {});
|
||||
}
|
||||
@ -3498,7 +3498,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Com
|
||||
},
|
||||
.error_set => {
|
||||
const rhs_set = rhs_ty.castTag(.error_set).?.data;
|
||||
try set.ensureCapacity(sema.gpa, set.count() + rhs_set.names_len);
|
||||
try set.ensureUnusedCapacity(sema.gpa, rhs_set.names_len);
|
||||
for (rhs_set.names_ptr[0..rhs_set.names_len]) |name| {
|
||||
set.putAssumeCapacity(name, {});
|
||||
}
|
||||
@ -10361,7 +10361,7 @@ fn analyzeUnionFields(
|
||||
var decl_arena = union_obj.owner_decl.value_arena.?.promote(gpa);
|
||||
defer union_obj.owner_decl.value_arena.?.* = decl_arena.state;
|
||||
|
||||
try union_obj.fields.ensureCapacity(&decl_arena.allocator, fields_len);
|
||||
try union_obj.fields.ensureTotalCapacity(&decl_arena.allocator, fields_len);
|
||||
|
||||
if (body.len != 0) {
|
||||
_ = try sema.analyzeBody(block, body);
|
||||
|
||||
@ -141,7 +141,7 @@ pub fn generateSymbol(
|
||||
// TODO populate .debug_info for the array
|
||||
if (typed_value.val.castTag(.bytes)) |payload| {
|
||||
if (typed_value.ty.sentinel()) |sentinel| {
|
||||
try code.ensureCapacity(code.items.len + payload.data.len + 1);
|
||||
try code.ensureUnusedCapacity(payload.data.len + 1);
|
||||
code.appendSliceAssumeCapacity(payload.data);
|
||||
switch (try generateSymbol(bin_file, src_loc, .{
|
||||
.ty = typed_value.ty.elemType(),
|
||||
@ -568,7 +568,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
fn gen(self: *Self) !void {
|
||||
switch (arch) {
|
||||
.x86_64 => {
|
||||
try self.code.ensureCapacity(self.code.items.len + 11);
|
||||
try self.code.ensureUnusedCapacity(11);
|
||||
|
||||
const cc = self.fn_type.fnCallingConvention();
|
||||
if (cc != .Naked) {
|
||||
@ -607,7 +607,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
// Important to be after the possible self.code.items.len -= 5 above.
|
||||
try self.dbgSetEpilogueBegin();
|
||||
|
||||
try self.code.ensureCapacity(self.code.items.len + 9);
|
||||
try self.code.ensureUnusedCapacity(9);
|
||||
// add rsp, x
|
||||
if (aligned_stack_end > math.maxInt(i8)) {
|
||||
// example: 48 81 c4 ff ff ff 7f add rsp,0x7fffffff
|
||||
@ -1960,7 +1960,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
//
|
||||
// TODO: make this algorithm less bad
|
||||
|
||||
try self.code.ensureCapacity(self.code.items.len + 8);
|
||||
try self.code.ensureUnusedCapacity(8);
|
||||
|
||||
const lhs = try self.resolveInst(op_lhs);
|
||||
const rhs = try self.resolveInst(op_rhs);
|
||||
@ -2447,13 +2447,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
.register => |reg| {
|
||||
switch (self.debug_output) {
|
||||
.dwarf => |dbg_out| {
|
||||
try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 3);
|
||||
try dbg_out.dbg_info.ensureUnusedCapacity(3);
|
||||
dbg_out.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter);
|
||||
dbg_out.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
|
||||
1, // ULEB128 dwarf expression length
|
||||
reg.dwarfLocOp(),
|
||||
});
|
||||
try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 5 + name_with_null.len);
|
||||
try dbg_out.dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
|
||||
try self.addDbgInfoTypeReloc(ty); // DW.AT.type, DW.FORM.ref4
|
||||
dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
|
||||
},
|
||||
@ -2484,7 +2484,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
try dbg_out.dbg_info.append(DW.OP.breg11);
|
||||
try leb128.writeILEB128(dbg_out.dbg_info.writer(), adjusted_stack_offset);
|
||||
|
||||
try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 5 + name_with_null.len);
|
||||
try dbg_out.dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
|
||||
try self.addDbgInfoTypeReloc(ty); // DW.AT.type, DW.FORM.ref4
|
||||
dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
|
||||
},
|
||||
@ -2626,7 +2626,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
unreachable;
|
||||
|
||||
// ff 14 25 xx xx xx xx call [addr]
|
||||
try self.code.ensureCapacity(self.code.items.len + 7);
|
||||
try self.code.ensureUnusedCapacity(7);
|
||||
self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 });
|
||||
mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), got_addr);
|
||||
} else if (func_value.castTag(.extern_fn)) |_| {
|
||||
@ -2839,7 +2839,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
.memory = func.owner_decl.link.macho.local_sym_index,
|
||||
});
|
||||
// callq *%rax
|
||||
try self.code.ensureCapacity(self.code.items.len + 2);
|
||||
try self.code.ensureUnusedCapacity(2);
|
||||
self.code.appendSliceAssumeCapacity(&[2]u8{ 0xff, 0xd0 });
|
||||
},
|
||||
.aarch64 => {
|
||||
@ -2858,7 +2858,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
switch (arch) {
|
||||
.x86_64 => {
|
||||
// callq
|
||||
try self.code.ensureCapacity(self.code.items.len + 5);
|
||||
try self.code.ensureUnusedCapacity(5);
|
||||
self.code.appendSliceAssumeCapacity(&[5]u8{ 0xe8, 0x0, 0x0, 0x0, 0x0 });
|
||||
break :blk @intCast(u32, self.code.items.len) - 4;
|
||||
},
|
||||
@ -2932,7 +2932,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
const got_addr = p9.bases.data;
|
||||
const got_index = func_payload.data.owner_decl.link.plan9.got_index.?;
|
||||
// ff 14 25 xx xx xx xx call [addr]
|
||||
try self.code.ensureCapacity(self.code.items.len + 7);
|
||||
try self.code.ensureUnusedCapacity(7);
|
||||
self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 });
|
||||
const fn_got_addr = got_addr + got_index * ptr_bytes;
|
||||
mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), @intCast(u32, fn_got_addr));
|
||||
@ -3075,7 +3075,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const result: MCValue = switch (arch) {
|
||||
.x86_64 => result: {
|
||||
try self.code.ensureCapacity(self.code.items.len + 8);
|
||||
try self.code.ensureUnusedCapacity(8);
|
||||
|
||||
// There are 2 operands, destination and source.
|
||||
// Either one, but not both, can be a memory operand.
|
||||
@ -3159,7 +3159,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
|
||||
const reloc: Reloc = switch (arch) {
|
||||
.i386, .x86_64 => reloc: {
|
||||
try self.code.ensureCapacity(self.code.items.len + 6);
|
||||
try self.code.ensureUnusedCapacity(6);
|
||||
|
||||
const opcode: u8 = switch (cond) {
|
||||
.compare_flags_signed => |cmp_op| blk: {
|
||||
@ -3519,7 +3519,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
fn jump(self: *Self, index: usize) !void {
|
||||
switch (arch) {
|
||||
.i386, .x86_64 => {
|
||||
try self.code.ensureCapacity(self.code.items.len + 5);
|
||||
try self.code.ensureUnusedCapacity(5);
|
||||
if (math.cast(i8, @intCast(i32, index) - (@intCast(i32, self.code.items.len + 2)))) |delta| {
|
||||
self.code.appendAssumeCapacity(0xeb); // jmp rel8
|
||||
self.code.appendAssumeCapacity(@bitCast(u8, delta));
|
||||
@ -3657,7 +3657,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
const block_data = self.blocks.getPtr(block).?;
|
||||
|
||||
// Emit a jump with a relocation. It will be patched up after the block ends.
|
||||
try block_data.relocs.ensureCapacity(self.gpa, block_data.relocs.items.len + 1);
|
||||
try block_data.relocs.ensureUnusedCapacity(self.gpa, 1);
|
||||
|
||||
switch (arch) {
|
||||
.i386, .x86_64 => {
|
||||
@ -4041,7 +4041,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
if (adj_off > 128) {
|
||||
return self.fail("TODO implement set stack variable with large stack offset", .{});
|
||||
}
|
||||
try self.code.ensureCapacity(self.code.items.len + 8);
|
||||
try self.code.ensureUnusedCapacity(8);
|
||||
switch (abi_size) {
|
||||
1 => {
|
||||
return self.fail("TODO implement set abi_size=1 stack variable with immediate", .{});
|
||||
@ -4067,7 +4067,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
|
||||
// 64 bit write to memory would take two mov's anyways so we
|
||||
// insted just use two 32 bit writes to avoid register allocation
|
||||
try self.code.ensureCapacity(self.code.items.len + 14);
|
||||
try self.code.ensureUnusedCapacity(14);
|
||||
var buf: [8]u8 = undefined;
|
||||
mem.writeIntLittle(u64, &buf, x_big);
|
||||
|
||||
|
||||
@ -629,7 +629,7 @@ pub const DeclGen = struct {
|
||||
const params = decl.ty.fnParamLen();
|
||||
var i: usize = 0;
|
||||
|
||||
try self.args.ensureCapacity(params);
|
||||
try self.args.ensureTotalCapacity(params);
|
||||
while (i < params) : (i += 1) {
|
||||
const param_type_id = self.spv.types.get(decl.ty.fnParamType(i)).?;
|
||||
const arg_result_id = self.spv.allocResultId();
|
||||
|
||||
@ -108,7 +108,7 @@ pub fn buildLibCXX(comp: *Compilation) !void {
|
||||
const cxxabi_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", "include" });
|
||||
const cxx_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" });
|
||||
var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena);
|
||||
try c_source_files.ensureCapacity(libcxx_files.len);
|
||||
try c_source_files.ensureTotalCapacity(libcxx_files.len);
|
||||
|
||||
for (libcxx_files) |cxx_src| {
|
||||
var cflags = std.ArrayList([]const u8).init(arena);
|
||||
@ -246,7 +246,7 @@ pub fn buildLibCXXABI(comp: *Compilation) !void {
|
||||
const cxxabi_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", "include" });
|
||||
const cxx_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" });
|
||||
var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena);
|
||||
try c_source_files.ensureCapacity(libcxxabi_files.len);
|
||||
try c_source_files.ensureTotalCapacity(libcxxabi_files.len);
|
||||
|
||||
for (libcxxabi_files) |cxxabi_src| {
|
||||
var cflags = std.ArrayList([]const u8).init(arena);
|
||||
|
||||
@ -34,7 +34,7 @@ pub fn buildTsan(comp: *Compilation) !void {
|
||||
};
|
||||
|
||||
var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena);
|
||||
try c_source_files.ensureCapacity(c_source_files.items.len + tsan_sources.len);
|
||||
try c_source_files.ensureUnusedCapacity(tsan_sources.len);
|
||||
|
||||
const tsan_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{"tsan"});
|
||||
for (tsan_sources) |tsan_src| {
|
||||
@ -58,7 +58,7 @@ pub fn buildTsan(comp: *Compilation) !void {
|
||||
&darwin_tsan_sources
|
||||
else
|
||||
&unix_tsan_sources;
|
||||
try c_source_files.ensureCapacity(c_source_files.items.len + platform_tsan_sources.len);
|
||||
try c_source_files.ensureUnusedCapacity(platform_tsan_sources.len);
|
||||
for (platform_tsan_sources) |tsan_src| {
|
||||
var cflags = std.ArrayList([]const u8).init(arena);
|
||||
|
||||
@ -96,7 +96,7 @@ pub fn buildTsan(comp: *Compilation) !void {
|
||||
});
|
||||
}
|
||||
|
||||
try c_source_files.ensureCapacity(c_source_files.items.len + sanitizer_common_sources.len);
|
||||
try c_source_files.ensureUnusedCapacity(sanitizer_common_sources.len);
|
||||
const sanitizer_common_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
|
||||
"tsan", "sanitizer_common",
|
||||
});
|
||||
@ -123,7 +123,7 @@ pub fn buildTsan(comp: *Compilation) !void {
|
||||
&sanitizer_libcdep_sources
|
||||
else
|
||||
&sanitizer_nolibc_sources;
|
||||
try c_source_files.ensureCapacity(c_source_files.items.len + to_c_or_not_to_c_sources.len);
|
||||
try c_source_files.ensureUnusedCapacity(to_c_or_not_to_c_sources.len);
|
||||
for (to_c_or_not_to_c_sources) |c_src| {
|
||||
var cflags = std.ArrayList([]const u8).init(arena);
|
||||
|
||||
@ -143,7 +143,7 @@ pub fn buildTsan(comp: *Compilation) !void {
|
||||
});
|
||||
}
|
||||
|
||||
try c_source_files.ensureCapacity(c_source_files.items.len + sanitizer_symbolizer_sources.len);
|
||||
try c_source_files.ensureUnusedCapacity(sanitizer_symbolizer_sources.len);
|
||||
for (sanitizer_symbolizer_sources) |c_src| {
|
||||
var cflags = std.ArrayList([]const u8).init(arena);
|
||||
|
||||
@ -168,7 +168,7 @@ pub fn buildTsan(comp: *Compilation) !void {
|
||||
&[_][]const u8{"interception"},
|
||||
);
|
||||
|
||||
try c_source_files.ensureCapacity(c_source_files.items.len + interception_sources.len);
|
||||
try c_source_files.ensureUnusedCapacity(interception_sources.len);
|
||||
for (interception_sources) |c_src| {
|
||||
var cflags = std.ArrayList([]const u8).init(arena);
|
||||
|
||||
|
||||
@ -635,7 +635,7 @@ pub const File = struct {
|
||||
var object_files = std.ArrayList([*:0]const u8).init(base.allocator);
|
||||
defer object_files.deinit();
|
||||
|
||||
try object_files.ensureCapacity(base.options.objects.len + comp.c_object_table.count() + 2);
|
||||
try object_files.ensureTotalCapacity(base.options.objects.len + comp.c_object_table.count() + 2);
|
||||
for (base.options.objects) |obj_path| {
|
||||
object_files.appendAssumeCapacity(try arena.dupeZ(u8, obj_path));
|
||||
}
|
||||
|
||||
@ -197,7 +197,7 @@ pub fn flushModule(self: *C, comp: *Compilation) !void {
|
||||
defer all_buffers.deinit();
|
||||
|
||||
// This is at least enough until we get to the function bodies without error handling.
|
||||
try all_buffers.ensureCapacity(self.decl_table.count() + 2);
|
||||
try all_buffers.ensureTotalCapacity(self.decl_table.count() + 2);
|
||||
|
||||
var file_size: u64 = zig_h.len;
|
||||
all_buffers.appendAssumeCapacity(.{
|
||||
@ -258,7 +258,7 @@ pub fn flushModule(self: *C, comp: *Compilation) !void {
|
||||
file_size += err_typedef_buf.items.len;
|
||||
|
||||
// Now the function bodies.
|
||||
try all_buffers.ensureCapacity(all_buffers.items.len + fn_count);
|
||||
try all_buffers.ensureUnusedCapacity(fn_count);
|
||||
for (self.decl_table.keys()) |decl| {
|
||||
if (!decl.has_tv) continue;
|
||||
if (decl.val.castTag(.function)) |_| {
|
||||
@ -286,7 +286,7 @@ pub fn flushEmitH(module: *Module) !void {
|
||||
var all_buffers = std.ArrayList(std.os.iovec_const).init(module.gpa);
|
||||
defer all_buffers.deinit();
|
||||
|
||||
try all_buffers.ensureCapacity(emit_h.decl_table.count() + 1);
|
||||
try all_buffers.ensureTotalCapacity(emit_h.decl_table.count() + 1);
|
||||
|
||||
var file_size: u64 = zig_h.len;
|
||||
all_buffers.appendAssumeCapacity(.{
|
||||
|
||||
@ -418,7 +418,7 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Coff {
|
||||
pub fn allocateDeclIndexes(self: *Coff, decl: *Module.Decl) !void {
|
||||
if (self.llvm_object) |_| return;
|
||||
|
||||
try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
|
||||
try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1);
|
||||
|
||||
if (self.offset_table_free_list.popOrNull()) |i| {
|
||||
decl.link.coff.offset_table_index = i;
|
||||
@ -793,7 +793,7 @@ pub fn updateDeclExports(
|
||||
for (exports) |exp| {
|
||||
if (exp.options.section) |section_name| {
|
||||
if (!mem.eql(u8, section_name, ".text")) {
|
||||
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1);
|
||||
try module.failed_exports.ensureUnusedCapacity(module.gpa, 1);
|
||||
module.failed_exports.putAssumeCapacityNoClobber(
|
||||
exp,
|
||||
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}),
|
||||
@ -804,7 +804,7 @@ pub fn updateDeclExports(
|
||||
if (mem.eql(u8, exp.options.name, "_start")) {
|
||||
self.entry_addr = decl.link.coff.getVAddr(self.*) - default_image_base;
|
||||
} else {
|
||||
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1);
|
||||
try module.failed_exports.ensureUnusedCapacity(module.gpa, 1);
|
||||
module.failed_exports.putAssumeCapacityNoClobber(
|
||||
exp,
|
||||
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: Exports other than '_start'", .{}),
|
||||
|
||||
@ -411,7 +411,7 @@ fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u16) u64 {
|
||||
|
||||
/// TODO Improve this to use a table.
|
||||
fn makeString(self: *Elf, bytes: []const u8) !u32 {
|
||||
try self.shstrtab.ensureCapacity(self.base.allocator, self.shstrtab.items.len + bytes.len + 1);
|
||||
try self.shstrtab.ensureUnusedCapacity(self.base.allocator, bytes.len + 1);
|
||||
const result = self.shstrtab.items.len;
|
||||
self.shstrtab.appendSliceAssumeCapacity(bytes);
|
||||
self.shstrtab.appendAssumeCapacity(0);
|
||||
@ -420,7 +420,7 @@ fn makeString(self: *Elf, bytes: []const u8) !u32 {
|
||||
|
||||
/// TODO Improve this to use a table.
|
||||
fn makeDebugString(self: *Elf, bytes: []const u8) !u32 {
|
||||
try self.debug_strtab.ensureCapacity(self.base.allocator, self.debug_strtab.items.len + bytes.len + 1);
|
||||
try self.debug_strtab.ensureUnusedCapacity(self.base.allocator, bytes.len + 1);
|
||||
const result = self.debug_strtab.items.len;
|
||||
self.debug_strtab.appendSliceAssumeCapacity(bytes);
|
||||
self.debug_strtab.appendAssumeCapacity(0);
|
||||
@ -856,7 +856,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
|
||||
|
||||
// We have a function to compute the upper bound size, because it's needed
|
||||
// for determining where to put the offset of the first `LinkBlock`.
|
||||
try di_buf.ensureCapacity(self.dbgInfoNeededHeaderBytes());
|
||||
try di_buf.ensureTotalCapacity(self.dbgInfoNeededHeaderBytes());
|
||||
|
||||
// initial length - length of the .debug_info contribution for this compilation unit,
|
||||
// not including the initial length itself.
|
||||
@ -925,7 +925,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
|
||||
|
||||
// Enough for all the data without resizing. When support for more compilation units
|
||||
// is added, the size of this section will become more variable.
|
||||
try di_buf.ensureCapacity(100);
|
||||
try di_buf.ensureTotalCapacity(100);
|
||||
|
||||
// initial length - length of the .debug_aranges contribution for this compilation unit,
|
||||
// not including the initial length itself.
|
||||
@ -1004,7 +1004,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
|
||||
// The size of this header is variable, depending on the number of directories,
|
||||
// files, and padding. We have a function to compute the upper bound size, however,
|
||||
// because it's needed for determining where to put the offset of the first `SrcFn`.
|
||||
try di_buf.ensureCapacity(self.dbgLineNeededHeaderBytes());
|
||||
try di_buf.ensureTotalCapacity(self.dbgLineNeededHeaderBytes());
|
||||
|
||||
// initial length - length of the .debug_line contribution for this compilation unit,
|
||||
// not including the initial length itself.
|
||||
@ -1639,7 +1639,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
|
||||
// Shared libraries.
|
||||
if (is_exe_or_dyn_lib) {
|
||||
const system_libs = self.base.options.system_libs.keys();
|
||||
try argv.ensureCapacity(argv.items.len + system_libs.len);
|
||||
try argv.ensureUnusedCapacity(system_libs.len);
|
||||
for (system_libs) |link_lib| {
|
||||
// By this time, we depend on these libs being dynamically linked libraries and not static libraries
|
||||
// (the check for that needs to be earlier), but they could be full paths to .so files, in which
|
||||
@ -2113,8 +2113,8 @@ pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void {
|
||||
|
||||
if (decl.link.elf.local_sym_index != 0) return;
|
||||
|
||||
try self.local_symbols.ensureCapacity(self.base.allocator, self.local_symbols.items.len + 1);
|
||||
try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
|
||||
try self.local_symbols.ensureUnusedCapacity(self.base.allocator, 1);
|
||||
try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1);
|
||||
|
||||
if (self.local_symbol_free_list.popOrNull()) |i| {
|
||||
log.debug("reusing symbol index {d} for {s}", .{ i, decl.name });
|
||||
@ -2316,7 +2316,7 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
|
||||
defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs);
|
||||
|
||||
// For functions we need to add a prologue to the debug line program.
|
||||
try dbg_line_buffer.ensureCapacity(26);
|
||||
try dbg_line_buffer.ensureTotalCapacity(26);
|
||||
|
||||
const decl = func.owner_decl;
|
||||
const line_off = @intCast(u28, decl.src_line + func.lbrace_line);
|
||||
@ -2351,7 +2351,7 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
|
||||
|
||||
// .debug_info subprogram
|
||||
const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1];
|
||||
try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len);
|
||||
try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len);
|
||||
|
||||
const fn_ret_type = decl.ty.fnReturnType();
|
||||
const fn_ret_has_bits = fn_ret_type.hasCodeGenBits();
|
||||
@ -2593,7 +2593,7 @@ fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !vo
|
||||
},
|
||||
.Int => {
|
||||
const info = ty.intInfo(self.base.options.target);
|
||||
try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 12);
|
||||
try dbg_info_buffer.ensureUnusedCapacity(12);
|
||||
dbg_info_buffer.appendAssumeCapacity(abbrev_base_type);
|
||||
// DW.AT.encoding, DW.FORM.data1
|
||||
dbg_info_buffer.appendAssumeCapacity(switch (info.signedness) {
|
||||
@ -2607,7 +2607,7 @@ fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !vo
|
||||
},
|
||||
.Optional => {
|
||||
if (ty.isPtrLikeOptional()) {
|
||||
try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 12);
|
||||
try dbg_info_buffer.ensureUnusedCapacity(12);
|
||||
dbg_info_buffer.appendAssumeCapacity(abbrev_base_type);
|
||||
// DW.AT.encoding, DW.FORM.data1
|
||||
dbg_info_buffer.appendAssumeCapacity(DW.ATE.address);
|
||||
@ -2747,14 +2747,14 @@ pub fn updateDeclExports(
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
try self.global_symbols.ensureCapacity(self.base.allocator, self.global_symbols.items.len + exports.len);
|
||||
try self.global_symbols.ensureUnusedCapacity(self.base.allocator, exports.len);
|
||||
if (decl.link.elf.local_sym_index == 0) return;
|
||||
const decl_sym = self.local_symbols.items[decl.link.elf.local_sym_index];
|
||||
|
||||
for (exports) |exp| {
|
||||
if (exp.options.section) |section_name| {
|
||||
if (!mem.eql(u8, section_name, ".text")) {
|
||||
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1);
|
||||
try module.failed_exports.ensureUnusedCapacity(module.gpa, 1);
|
||||
module.failed_exports.putAssumeCapacityNoClobber(
|
||||
exp,
|
||||
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}),
|
||||
@ -2772,7 +2772,7 @@ pub fn updateDeclExports(
|
||||
},
|
||||
.Weak => elf.STB_WEAK,
|
||||
.LinkOnce => {
|
||||
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1);
|
||||
try module.failed_exports.ensureUnusedCapacity(module.gpa, 1);
|
||||
module.failed_exports.putAssumeCapacityNoClobber(
|
||||
exp,
|
||||
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: GlobalLinkage.LinkOnce", .{}),
|
||||
|
||||
@ -102,7 +102,7 @@ pub fn calcAdhocSignature(
|
||||
var buffer = try allocator.alloc(u8, page_size);
|
||||
defer allocator.free(buffer);
|
||||
|
||||
try cdir.data.ensureCapacity(allocator, total_pages * hash_size + id.len + 1);
|
||||
try cdir.data.ensureTotalCapacity(allocator, total_pages * hash_size + id.len + 1);
|
||||
|
||||
// 1. Save the identifier and update offsets
|
||||
cdir.inner.identOffset = cdir.inner.length;
|
||||
|
||||
@ -353,7 +353,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt
|
||||
|
||||
// We have a function to compute the upper bound size, because it's needed
|
||||
// for determining where to put the offset of the first `LinkBlock`.
|
||||
try di_buf.ensureCapacity(self.dbgInfoNeededHeaderBytes());
|
||||
try di_buf.ensureTotalCapacity(self.dbgInfoNeededHeaderBytes());
|
||||
|
||||
// initial length - length of the .debug_info contribution for this compilation unit,
|
||||
// not including the initial length itself.
|
||||
@ -408,7 +408,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt
|
||||
|
||||
// Enough for all the data without resizing. When support for more compilation units
|
||||
// is added, the size of this section will become more variable.
|
||||
try di_buf.ensureCapacity(100);
|
||||
try di_buf.ensureTotalCapacity(100);
|
||||
|
||||
// initial length - length of the .debug_aranges contribution for this compilation unit,
|
||||
// not including the initial length itself.
|
||||
@ -479,7 +479,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt
|
||||
// The size of this header is variable, depending on the number of directories,
|
||||
// files, and padding. We have a function to compute the upper bound size, however,
|
||||
// because it's needed for determining where to put the offset of the first `SrcFn`.
|
||||
try di_buf.ensureCapacity(self.dbgLineNeededHeaderBytes(module));
|
||||
try di_buf.ensureTotalCapacity(self.dbgLineNeededHeaderBytes(module));
|
||||
|
||||
// initial length - length of the .debug_line contribution for this compilation unit,
|
||||
// not including the initial length itself.
|
||||
@ -607,7 +607,7 @@ fn copySegmentCommand(self: *DebugSymbols, allocator: *Allocator, base_cmd: Segm
|
||||
};
|
||||
mem.copy(u8, &cmd.inner.segname, &base_cmd.inner.segname);
|
||||
|
||||
try cmd.sections.ensureCapacity(allocator, cmd.inner.nsects);
|
||||
try cmd.sections.ensureTotalCapacity(allocator, cmd.inner.nsects);
|
||||
for (base_cmd.sections.items) |base_sect, i| {
|
||||
var sect = macho.section_64{
|
||||
.sectname = undefined,
|
||||
@ -855,7 +855,7 @@ pub fn initDeclDebugBuffers(
|
||||
switch (decl.ty.zigTypeTag()) {
|
||||
.Fn => {
|
||||
// For functions we need to add a prologue to the debug line program.
|
||||
try dbg_line_buffer.ensureCapacity(26);
|
||||
try dbg_line_buffer.ensureTotalCapacity(26);
|
||||
|
||||
const func = decl.val.castTag(.function).?.data;
|
||||
const line_off = @intCast(u28, decl.src_line + func.lbrace_line);
|
||||
@ -889,7 +889,7 @@ pub fn initDeclDebugBuffers(
|
||||
|
||||
// .debug_info subprogram
|
||||
const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1];
|
||||
try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 27 + decl_name_with_null.len);
|
||||
try dbg_info_buffer.ensureUnusedCapacity(27 + decl_name_with_null.len);
|
||||
|
||||
const fn_ret_type = decl.ty.fnReturnType();
|
||||
const fn_ret_has_bits = fn_ret_type.hasCodeGenBits();
|
||||
@ -1124,7 +1124,7 @@ fn addDbgInfoType(
|
||||
},
|
||||
.Int => {
|
||||
const info = ty.intInfo(target);
|
||||
try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 12);
|
||||
try dbg_info_buffer.ensureUnusedCapacity(12);
|
||||
dbg_info_buffer.appendAssumeCapacity(abbrev_base_type);
|
||||
// DW.AT.encoding, DW.FORM.data1
|
||||
dbg_info_buffer.appendAssumeCapacity(switch (info.signedness) {
|
||||
@ -1261,7 +1261,7 @@ fn getDebugLineProgramEnd(self: DebugSymbols) u32 {
|
||||
|
||||
/// TODO Improve this to use a table.
|
||||
fn makeDebugString(self: *DebugSymbols, allocator: *Allocator, bytes: []const u8) !u32 {
|
||||
try self.debug_string_table.ensureCapacity(allocator, self.debug_string_table.items.len + bytes.len + 1);
|
||||
try self.debug_string_table.ensureUnusedCapacity(allocator, bytes.len + 1);
|
||||
const result = self.debug_string_table.items.len;
|
||||
self.debug_string_table.appendSliceAssumeCapacity(bytes);
|
||||
self.debug_string_table.appendAssumeCapacity(0);
|
||||
|
||||
@ -180,7 +180,7 @@ pub fn parse(self: *Dylib, allocator: *Allocator, target: std.Target) !void {
|
||||
fn readLoadCommands(self: *Dylib, allocator: *Allocator, reader: anytype) !void {
|
||||
const should_lookup_reexports = self.header.?.flags & macho.MH_NO_REEXPORTED_DYLIBS == 0;
|
||||
|
||||
try self.load_commands.ensureCapacity(allocator, self.header.?.ncmds);
|
||||
try self.load_commands.ensureTotalCapacity(allocator, self.header.?.ncmds);
|
||||
|
||||
var i: u16 = 0;
|
||||
while (i < self.header.?.ncmds) : (i += 1) {
|
||||
|
||||
@ -261,7 +261,7 @@ pub fn readLoadCommands(self: *Object, allocator: *Allocator, reader: anytype) !
|
||||
const header = self.header orelse unreachable; // Unreachable here signifies a fatal unexplored condition.
|
||||
const offset = self.file_offset orelse 0;
|
||||
|
||||
try self.load_commands.ensureCapacity(allocator, header.ncmds);
|
||||
try self.load_commands.ensureTotalCapacity(allocator, header.ncmds);
|
||||
|
||||
var i: u16 = 0;
|
||||
while (i < header.ncmds) : (i += 1) {
|
||||
|
||||
@ -326,7 +326,7 @@ pub fn finalize(self: *Trie, allocator: *Allocator) !void {
|
||||
if (!self.trie_dirty) return;
|
||||
|
||||
self.ordered_nodes.shrinkRetainingCapacity(0);
|
||||
try self.ordered_nodes.ensureCapacity(allocator, self.node_count);
|
||||
try self.ordered_nodes.ensureTotalCapacity(allocator, self.node_count);
|
||||
|
||||
var fifo = std.fifo.LinearFifo(*Node, .Dynamic).init(allocator);
|
||||
defer fifo.deinit();
|
||||
|
||||
@ -223,7 +223,7 @@ pub const SegmentCommand = struct {
|
||||
var segment = SegmentCommand{
|
||||
.inner = inner,
|
||||
};
|
||||
try segment.sections.ensureCapacity(alloc, inner.nsects);
|
||||
try segment.sections.ensureTotalCapacity(alloc, inner.nsects);
|
||||
|
||||
var i: usize = 0;
|
||||
while (i < inner.nsects) : (i += 1) {
|
||||
|
||||
@ -172,8 +172,8 @@ pub fn deinit(self: *Wasm) void {
|
||||
pub fn allocateDeclIndexes(self: *Wasm, decl: *Module.Decl) !void {
|
||||
if (decl.link.wasm.init) return;
|
||||
|
||||
try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
|
||||
try self.symbols.ensureCapacity(self.base.allocator, self.symbols.items.len + 1);
|
||||
try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1);
|
||||
try self.symbols.ensureUnusedCapacity(self.base.allocator, 1);
|
||||
|
||||
const block = &decl.link.wasm;
|
||||
block.init = true;
|
||||
|
||||
12
src/main.zig
12
src/main.zig
@ -1704,22 +1704,22 @@ fn buildOutputType(
|
||||
} else true;
|
||||
if (!should_get_sdk_path) break :outer false;
|
||||
if (try std.zig.system.darwin.getSDKPath(arena, target_info.target)) |sdk_path| {
|
||||
try clang_argv.ensureCapacity(clang_argv.items.len + 2);
|
||||
try clang_argv.ensureUnusedCapacity(2);
|
||||
clang_argv.appendAssumeCapacity("-isysroot");
|
||||
clang_argv.appendAssumeCapacity(sdk_path);
|
||||
break :outer true;
|
||||
} else break :outer false;
|
||||
} else false;
|
||||
|
||||
try clang_argv.ensureCapacity(clang_argv.items.len + paths.include_dirs.items.len * 2);
|
||||
try clang_argv.ensureUnusedCapacity(paths.include_dirs.items.len * 2);
|
||||
const isystem_flag = if (has_sysroot) "-iwithsysroot" else "-isystem";
|
||||
for (paths.include_dirs.items) |include_dir| {
|
||||
clang_argv.appendAssumeCapacity(isystem_flag);
|
||||
clang_argv.appendAssumeCapacity(include_dir);
|
||||
}
|
||||
|
||||
try clang_argv.ensureCapacity(clang_argv.items.len + paths.framework_dirs.items.len * 2);
|
||||
try framework_dirs.ensureCapacity(framework_dirs.items.len + paths.framework_dirs.items.len);
|
||||
try clang_argv.ensureUnusedCapacity(paths.framework_dirs.items.len * 2);
|
||||
try framework_dirs.ensureUnusedCapacity(paths.framework_dirs.items.len);
|
||||
const iframework_flag = if (has_sysroot) "-iframeworkwithsysroot" else "-iframework";
|
||||
for (paths.framework_dirs.items) |framework_dir| {
|
||||
clang_argv.appendAssumeCapacity(iframework_flag);
|
||||
@ -2783,7 +2783,7 @@ pub fn cmdInit(
|
||||
fatal("unable to read template file 'build.zig': {s}", .{@errorName(err)});
|
||||
};
|
||||
var modified_build_zig_contents = std.ArrayList(u8).init(arena);
|
||||
try modified_build_zig_contents.ensureCapacity(build_zig_contents.len);
|
||||
try modified_build_zig_contents.ensureTotalCapacity(build_zig_contents.len);
|
||||
for (build_zig_contents) |c| {
|
||||
if (c == '$') {
|
||||
try modified_build_zig_contents.appendSlice(cwd_basename);
|
||||
@ -3464,7 +3464,7 @@ fn fmtPathFile(
|
||||
|
||||
// As a heuristic, we make enough capacity for the same as the input source.
|
||||
fmt.out_buffer.shrinkRetainingCapacity(0);
|
||||
try fmt.out_buffer.ensureCapacity(source_code.len);
|
||||
try fmt.out_buffer.ensureTotalCapacity(source_code.len);
|
||||
|
||||
try tree.renderToArrayList(&fmt.out_buffer);
|
||||
if (mem.eql(u8, fmt.out_buffer.items, source_code))
|
||||
|
||||
@ -312,7 +312,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
|
||||
if (try man.hit()) {
|
||||
const digest = man.final();
|
||||
|
||||
try comp.crt_files.ensureCapacity(comp.gpa, comp.crt_files.count() + 1);
|
||||
try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1);
|
||||
comp.crt_files.putAssumeCapacityNoClobber(final_lib_basename, .{
|
||||
.full_object_path = try comp.global_cache_directory.join(comp.gpa, &[_][]const u8{
|
||||
"o", &digest, final_lib_basename,
|
||||
|
||||
@ -112,7 +112,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
|
||||
var source_table = std.StringArrayHashMap(Ext).init(comp.gpa);
|
||||
defer source_table.deinit();
|
||||
|
||||
try source_table.ensureCapacity(compat_time32_files.len + src_files.len);
|
||||
try source_table.ensureTotalCapacity(compat_time32_files.len + src_files.len);
|
||||
|
||||
for (src_files) |src_file| {
|
||||
try addSrcFile(arena, &source_table, src_file);
|
||||
@ -231,7 +231,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
|
||||
|
||||
try sub_compilation.updateSubCompilation();
|
||||
|
||||
try comp.crt_files.ensureCapacity(comp.gpa, comp.crt_files.count() + 1);
|
||||
try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1);
|
||||
|
||||
const basename = try comp.gpa.dupe(u8, "libc.so");
|
||||
errdefer comp.gpa.free(basename);
|
||||
|
||||
@ -4882,7 +4882,7 @@ fn finishTransFnProto(
|
||||
var fn_params = std.ArrayList(ast.Payload.Param).init(c.gpa);
|
||||
defer fn_params.deinit();
|
||||
const param_count: usize = if (fn_proto_ty != null) fn_proto_ty.?.getNumParams() else 0;
|
||||
try fn_params.ensureCapacity(param_count);
|
||||
try fn_params.ensureTotalCapacity(param_count);
|
||||
|
||||
var i: usize = 0;
|
||||
while (i < param_count) : (i += 1) {
|
||||
|
||||
@ -728,13 +728,13 @@ pub fn render(gpa: *Allocator, nodes: []const Node) !std.zig.Ast {
|
||||
|
||||
// Estimate that each top level node has 10 child nodes.
|
||||
const estimated_node_count = nodes.len * 10;
|
||||
try ctx.nodes.ensureCapacity(gpa, estimated_node_count);
|
||||
try ctx.nodes.ensureTotalCapacity(gpa, estimated_node_count);
|
||||
// Estimate that each each node has 2 tokens.
|
||||
const estimated_tokens_count = estimated_node_count * 2;
|
||||
try ctx.tokens.ensureCapacity(gpa, estimated_tokens_count);
|
||||
try ctx.tokens.ensureTotalCapacity(gpa, estimated_tokens_count);
|
||||
// Estimate that each each token is 3 bytes long.
|
||||
const estimated_buf_len = estimated_tokens_count * 3;
|
||||
try ctx.buf.ensureCapacity(estimated_buf_len);
|
||||
try ctx.buf.ensureTotalCapacity(estimated_buf_len);
|
||||
|
||||
ctx.nodes.appendAssumeCapacity(.{
|
||||
.tag = .root,
|
||||
@ -839,7 +839,7 @@ const Context = struct {
|
||||
|
||||
fn addExtra(c: *Context, extra: anytype) Allocator.Error!NodeIndex {
|
||||
const fields = std.meta.fields(@TypeOf(extra));
|
||||
try c.extra_data.ensureCapacity(c.gpa, c.extra_data.items.len + fields.len);
|
||||
try c.extra_data.ensureUnusedCapacity(c.gpa, fields.len);
|
||||
const result = @intCast(u32, c.extra_data.items.len);
|
||||
inline for (fields) |field| {
|
||||
comptime std.debug.assert(field.field_type == NodeIndex);
|
||||
@ -2797,7 +2797,7 @@ fn renderParams(c: *Context, params: []Payload.Param, is_var_args: bool) !std.Ar
|
||||
_ = try c.addToken(.l_paren, "(");
|
||||
var rendered = std.ArrayList(NodeIndex).init(c.gpa);
|
||||
errdefer rendered.deinit();
|
||||
try rendered.ensureCapacity(std.math.max(params.len, 1));
|
||||
try rendered.ensureTotalCapacity(std.math.max(params.len, 1));
|
||||
|
||||
for (params) |param, i| {
|
||||
if (i != 0) _ = try c.addToken(.comma, ",");
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user