diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index 91e0c4d883..42443f2138 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -90,7 +90,7 @@ pub fn ArrayHashMap( /// Modifying the key is allowed only if it does not change the hash. /// Modifying the value is allowed. /// Entry pointers become invalid whenever this ArrayHashMap is modified, - /// unless `ensureCapacity` was previously used. + /// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used. pub const Entry = Unmanaged.Entry; /// A KV pair which has been copied out of the backing store @@ -110,7 +110,7 @@ pub fn ArrayHashMap( /// Modifying the key is allowed only if it does not change the hash. /// Modifying the value is allowed. /// Entry pointers become invalid whenever this ArrayHashMap is modified, - /// unless `ensureCapacity` was previously used. + /// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used. pub const GetOrPutResult = Unmanaged.GetOrPutResult; /// An Iterator over Entry pointers. @@ -478,7 +478,7 @@ pub fn ArrayHashMapUnmanaged( /// Modifying the key is allowed only if it does not change the hash. /// Modifying the value is allowed. /// Entry pointers become invalid whenever this ArrayHashMap is modified, - /// unless `ensureCapacity` was previously used. + /// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used. pub const Entry = struct { key_ptr: *K, value_ptr: *V, @@ -509,7 +509,7 @@ pub fn ArrayHashMapUnmanaged( /// Modifying the key is allowed only if it does not change the hash. /// Modifying the value is allowed. /// Entry pointers become invalid whenever this ArrayHashMap is modified, - /// unless `ensureCapacity` was previously used. + /// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used. pub const GetOrPutResult = struct { key_ptr: *K, value_ptr: *V, @@ -759,20 +759,20 @@ pub fn ArrayHashMapUnmanaged( } pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_capacity: usize, ctx: Context) !void { if (new_capacity <= linear_scan_max) { - try self.entries.ensureCapacity(allocator, new_capacity); + try self.entries.ensureTotalCapacity(allocator, new_capacity); return; } if (self.index_header) |header| { if (new_capacity <= header.capacity()) { - try self.entries.ensureCapacity(allocator, new_capacity); + try self.entries.ensureTotalCapacity(allocator, new_capacity); return; } } const new_bit_index = try IndexHeader.findBitIndex(new_capacity); const new_header = try IndexHeader.alloc(allocator, new_bit_index); - try self.entries.ensureCapacity(allocator, new_capacity); + try self.entries.ensureTotalCapacity(allocator, new_capacity); if (self.index_header) |old_header| old_header.free(allocator); self.insertAllEntriesIntoNewHeader(if (store_hash) {} else ctx, new_header); @@ -1441,7 +1441,7 @@ pub fn ArrayHashMapUnmanaged( unreachable; } - /// Must ensureCapacity before calling this. + /// Must `ensureTotalCapacity`/`ensureUnusedCapacity` before calling this. fn getOrPutInternal(self: *Self, key: anytype, ctx: anytype, header: *IndexHeader, comptime I: type) GetOrPutResult { const slice = self.entries.slice(); const hashes_array = if (store_hash) slice.items(.hash) else {}; @@ -1485,7 +1485,7 @@ pub fn ArrayHashMapUnmanaged( } // This pointer survives the following append because we call - // entries.ensureCapacity before getOrPutInternal. + // entries.ensureTotalCapacity before getOrPutInternal. const hash_match = if (store_hash) h == hashes_array[slot_data.entry_index] else true; if (hash_match and checkedEql(ctx, key, keys_array[slot_data.entry_index])) { return .{ @@ -1946,7 +1946,7 @@ test "iterator hash map" { var reset_map = AutoArrayHashMap(i32, i32).init(std.testing.allocator); defer reset_map.deinit(); - // test ensureCapacity with a 0 parameter + // test ensureTotalCapacity with a 0 parameter try reset_map.ensureTotalCapacity(0); try reset_map.putNoClobber(0, 11); diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig index 530a9b68a6..11b95a6e36 100644 --- a/lib/std/child_process.zig +++ b/lib/std/child_process.zig @@ -195,7 +195,7 @@ pub const ChildProcess = struct { }; var dead_fds: usize = 0; - // We ask for ensureCapacity with this much extra space. This has more of an + // We ask for ensureTotalCapacity with this much extra space. This has more of an // effect on small reads because once the reads start to get larger the amount // of space an ArrayList will allocate grows exponentially. const bump_amt = 512; @@ -215,7 +215,7 @@ pub const ChildProcess = struct { if (poll_fds[0].revents & os.POLL.IN != 0) { // stdout is ready. const new_capacity = std.math.min(stdout.items.len + bump_amt, max_output_bytes); - try stdout.ensureCapacity(new_capacity); + try stdout.ensureTotalCapacity(new_capacity); const buf = stdout.unusedCapacitySlice(); if (buf.len == 0) return error.StdoutStreamTooLong; const nread = try os.read(poll_fds[0].fd, buf); @@ -230,7 +230,7 @@ pub const ChildProcess = struct { if (poll_fds[1].revents & os.POLL.IN != 0) { // stderr is ready. const new_capacity = std.math.min(stderr.items.len + bump_amt, max_output_bytes); - try stderr.ensureCapacity(new_capacity); + try stderr.ensureTotalCapacity(new_capacity); const buf = stderr.unusedCapacitySlice(); if (buf.len == 0) return error.StderrStreamTooLong; const nread = try os.read(poll_fds[1].fd, buf); @@ -276,7 +276,7 @@ pub const ChildProcess = struct { // Windows Async IO requires an initial call to ReadFile before waiting on the handle for ([_]u1{ 0, 1 }) |i| { - try outs[i].ensureCapacity(bump_amt); + try outs[i].ensureTotalCapacity(bump_amt); const buf = outs[i].unusedCapacitySlice(); _ = windows.kernel32.ReadFile(handles[i], buf.ptr, math.cast(u32, buf.len) catch maxInt(u32), null, &overlapped[i]); wait_objects[wait_object_count] = handles[i]; @@ -318,7 +318,7 @@ pub const ChildProcess = struct { outs[i].items.len += read_bytes; const new_capacity = std.math.min(outs[i].items.len + bump_amt, max_output_bytes); - try outs[i].ensureCapacity(new_capacity); + try outs[i].ensureTotalCapacity(new_capacity); const buf = outs[i].unusedCapacitySlice(); if (buf.len == 0) return if (i == 0) error.StdoutStreamTooLong else error.StderrStreamTooLong; _ = windows.kernel32.ReadFile(handles[i], buf.ptr, math.cast(u32, buf.len) catch maxInt(u32), null, &overlapped[i]); diff --git a/lib/std/coff.zig b/lib/std/coff.zig index 6caf214728..c8b0a44044 100644 --- a/lib/std/coff.zig +++ b/lib/std/coff.zig @@ -277,7 +277,7 @@ pub const Coff = struct { if (self.sections.items.len == self.coff_header.number_of_sections) return; - try self.sections.ensureCapacity(self.coff_header.number_of_sections); + try self.sections.ensureTotalCapacity(self.coff_header.number_of_sections); const in = self.in_file.reader(); diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index 644429f871..a75178d428 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -1568,11 +1568,11 @@ test "std.hash_map basic usage" { try expectEqual(total, sum); } -test "std.hash_map ensureCapacity" { +test "std.hash_map ensureTotalCapacity" { var map = AutoHashMap(i32, i32).init(std.testing.allocator); defer map.deinit(); - try map.ensureCapacity(20); + try map.ensureTotalCapacity(20); const initial_capacity = map.capacity(); try testing.expect(initial_capacity >= 20); var i: i32 = 0; @@ -1583,13 +1583,13 @@ test "std.hash_map ensureCapacity" { try testing.expect(initial_capacity == map.capacity()); } -test "std.hash_map ensureCapacity with tombstones" { +test "std.hash_map ensureUnusedCapacity with tombstones" { var map = AutoHashMap(i32, i32).init(std.testing.allocator); defer map.deinit(); var i: i32 = 0; while (i < 100) : (i += 1) { - try map.ensureCapacity(@intCast(u32, map.count() + 1)); + try map.ensureUnusedCapacity(1); map.putAssumeCapacity(i, i); // Remove to create tombstones that still count as load in the hashmap. _ = map.remove(i); @@ -1669,7 +1669,7 @@ test "std.hash_map clone" { try expectEqual(b.get(3).?, 3); } -test "std.hash_map ensureCapacity with existing elements" { +test "std.hash_map ensureTotalCapacity with existing elements" { var map = AutoHashMap(u32, u32).init(std.testing.allocator); defer map.deinit(); @@ -1677,16 +1677,16 @@ test "std.hash_map ensureCapacity with existing elements" { try expectEqual(map.count(), 1); try expectEqual(map.capacity(), @TypeOf(map).Unmanaged.minimal_capacity); - try map.ensureCapacity(65); + try map.ensureTotalCapacity(65); try expectEqual(map.count(), 1); try expectEqual(map.capacity(), 128); } -test "std.hash_map ensureCapacity satisfies max load factor" { +test "std.hash_map ensureTotalCapacity satisfies max load factor" { var map = AutoHashMap(u32, u32).init(std.testing.allocator); defer map.deinit(); - try map.ensureCapacity(127); + try map.ensureTotalCapacity(127); try expectEqual(map.capacity(), 256); } @@ -1870,7 +1870,7 @@ test "std.hash_map putAssumeCapacity" { var map = AutoHashMap(u32, u32).init(std.testing.allocator); defer map.deinit(); - try map.ensureCapacity(20); + try map.ensureTotalCapacity(20); var i: u32 = 0; while (i < 20) : (i += 1) { map.putAssumeCapacityNoClobber(i, i); diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index d51c349be2..3dd6b9db3d 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -746,10 +746,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { const new_aligned_size = math.max(len, ptr_align); if (new_aligned_size > largest_bucket_object_size) { - try self.large_allocations.ensureCapacity( - self.backing_allocator, - self.large_allocations.count() + 1, - ); + try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1); const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr); diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig index 3e8eb8ec24..f8e7650bc4 100644 --- a/lib/std/io/reader.zig +++ b/lib/std/io/reader.zig @@ -61,7 +61,7 @@ pub fn Reader( array_list: *std.ArrayListAligned(u8, alignment), max_append_size: usize, ) !void { - try array_list.ensureCapacity(math.min(max_append_size, 4096)); + try array_list.ensureTotalCapacity(math.min(max_append_size, 4096)); const original_len = array_list.items.len; var start_index: usize = original_len; while (true) { @@ -81,7 +81,7 @@ pub fn Reader( } // This will trigger ArrayList to expand superlinearly at whatever its growth rate is. - try array_list.ensureCapacity(start_index + 1); + try array_list.ensureTotalCapacity(start_index + 1); } } diff --git a/lib/std/json.zig b/lib/std/json.zig index e8f9d9d395..acae9e7d1f 100644 --- a/lib/std/json.zig +++ b/lib/std/json.zig @@ -1838,7 +1838,7 @@ fn parseInternal( else => {}, } - try arraylist.ensureCapacity(arraylist.items.len + 1); + try arraylist.ensureUnusedCapacity(1); const v = try parseInternal(ptrInfo.child, tok, tokens, options); arraylist.appendAssumeCapacity(v); } diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig index 693937b399..243a7413fa 100644 --- a/lib/std/multi_array_list.zig +++ b/lib/std/multi_array_list.zig @@ -189,7 +189,7 @@ pub fn MultiArrayList(comptime S: type) type { /// sets the given index to the specified element. May reallocate /// and invalidate iterators. pub fn insert(self: *Self, gpa: *Allocator, index: usize, elem: S) void { - try self.ensureCapacity(gpa, self.len + 1); + try self.ensureUnusedCapacity(gpa, 1); self.insertAssumeCapacity(index, elem); } @@ -376,7 +376,7 @@ pub fn MultiArrayList(comptime S: type) type { pub fn clone(self: Self, gpa: *Allocator) !Self { var result = Self{}; errdefer result.deinit(gpa); - try result.ensureCapacity(gpa, self.len); + try result.ensureTotalCapacity(gpa, self.len); result.len = self.len; const self_slice = self.slice(); const result_slice = result.slice(); diff --git a/lib/std/unicode.zig b/lib/std/unicode.zig index 25f1ba1b48..5da7686d66 100644 --- a/lib/std/unicode.zig +++ b/lib/std/unicode.zig @@ -668,7 +668,7 @@ pub fn utf8ToUtf16LeWithNull(allocator: *mem.Allocator, utf8: []const u8) ![:0]u var result = std.ArrayList(u16).init(allocator); errdefer result.deinit(); // optimistically guess that it will not require surrogate pairs - try result.ensureCapacity(utf8.len + 1); + try result.ensureTotalCapacity(utf8.len + 1); const view = try Utf8View.init(utf8); var it = view.iterator(); diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig index 842847a295..5bd5a6dfeb 100644 --- a/lib/std/zig/parse.zig +++ b/lib/std/zig/parse.zig @@ -17,7 +17,7 @@ pub fn parse(gpa: *Allocator, source: [:0]const u8) Allocator.Error!Ast { // Empirically, the zig std lib has an 8:1 ratio of source bytes to token count. const estimated_token_count = source.len / 8; - try tokens.ensureCapacity(gpa, estimated_token_count); + try tokens.ensureTotalCapacity(gpa, estimated_token_count); var tokenizer = std.zig.Tokenizer.init(source); while (true) { @@ -48,7 +48,7 @@ pub fn parse(gpa: *Allocator, source: [:0]const u8) Allocator.Error!Ast { // Empirically, Zig source code has a 2:1 ratio of tokens to AST nodes. // Make sure at least 1 so we can use appendAssumeCapacity on the root node below. const estimated_node_count = (tokens.len + 2) / 2; - try parser.nodes.ensureCapacity(gpa, estimated_node_count); + try parser.nodes.ensureTotalCapacity(gpa, estimated_node_count); // Root node must be index 0. // Root <- skip ContainerMembers eof @@ -138,7 +138,7 @@ const Parser = struct { fn addExtra(p: *Parser, extra: anytype) Allocator.Error!Node.Index { const fields = std.meta.fields(@TypeOf(extra)); - try p.extra_data.ensureCapacity(p.gpa, p.extra_data.items.len + fields.len); + try p.extra_data.ensureUnusedCapacity(p.gpa, fields.len); const result = @intCast(u32, p.extra_data.items.len); inline for (fields) |field| { comptime assert(field.field_type == Node.Index); diff --git a/lib/std/zig/string_literal.zig b/lib/std/zig/string_literal.zig index 64242038d3..2a38195b1f 100644 --- a/lib/std/zig/string_literal.zig +++ b/lib/std/zig/string_literal.zig @@ -29,7 +29,7 @@ pub fn parseAppend(buf: *std.ArrayList(u8), bytes: []const u8) error{OutOfMemory const slice = bytes[1..]; const prev_len = buf.items.len; - try buf.ensureCapacity(prev_len + slice.len - 1); + try buf.ensureUnusedCapacity(slice.len - 1); errdefer buf.shrinkRetainingCapacity(prev_len); const State = enum { diff --git a/src/AstGen.zig b/src/AstGen.zig index b176136ba4..4bdfa1811c 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -3515,7 +3515,7 @@ fn structDeclInner( defer wip_decls.deinit(gpa); // We don't know which members are fields until we iterate, so cannot do - // an accurate ensureCapacity yet. + // an accurate ensureTotalCapacity yet. var fields_data = ArrayListUnmanaged(u32){}; defer fields_data.deinit(gpa); @@ -3791,7 +3791,7 @@ fn unionDeclInner( defer wip_decls.deinit(gpa); // We don't know which members are fields until we iterate, so cannot do - // an accurate ensureCapacity yet. + // an accurate ensureTotalCapacity yet. var fields_data = ArrayListUnmanaged(u32){}; defer fields_data.deinit(gpa); diff --git a/src/Cache.zig b/src/Cache.zig index 28401c3d18..8a3b801e71 100644 --- a/src/Cache.zig +++ b/src/Cache.zig @@ -210,7 +210,7 @@ pub const Manifest = struct { pub fn addFile(self: *Manifest, file_path: []const u8, max_file_size: ?usize) !usize { assert(self.manifest_file == null); - try self.files.ensureCapacity(self.cache.gpa, self.files.items.len + 1); + try self.files.ensureUnusedCapacity(self.cache.gpa, 1); const resolved_path = try fs.path.resolve(self.cache.gpa, &[_][]const u8{file_path}); const idx = self.files.items.len; diff --git a/src/Compilation.zig b/src/Compilation.zig index 8edbb2dd73..c1dfe91dc2 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1097,7 +1097,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation { if (feature.llvm_name) |llvm_name| { const plus_or_minus = "-+"[@boolToInt(is_enabled)]; - try buf.ensureCapacity(buf.items.len + 2 + llvm_name.len); + try buf.ensureUnusedCapacity(2 + llvm_name.len); buf.appendAssumeCapacity(plus_or_minus); buf.appendSliceAssumeCapacity(llvm_name); buf.appendSliceAssumeCapacity(","); @@ -1347,7 +1347,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation { var system_libs: std.StringArrayHashMapUnmanaged(void) = .{}; errdefer system_libs.deinit(gpa); - try system_libs.ensureCapacity(gpa, options.system_libs.len); + try system_libs.ensureTotalCapacity(gpa, options.system_libs.len); for (options.system_libs) |lib_name| { system_libs.putAssumeCapacity(lib_name, {}); } @@ -1483,7 +1483,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation { errdefer comp.astgen_wait_group.deinit(); // Add a `CObject` for each `c_source_files`. - try comp.c_object_table.ensureCapacity(gpa, options.c_source_files.len); + try comp.c_object_table.ensureTotalCapacity(gpa, options.c_source_files.len); for (options.c_source_files) |c_source_file| { const c_object = try gpa.create(CObject); errdefer gpa.destroy(c_object); @@ -3084,7 +3084,7 @@ pub fn addCCArgs( // It would be really nice if there was a more compact way to communicate this info to Clang. const all_features_list = target.cpu.arch.allFeaturesList(); - try argv.ensureCapacity(argv.items.len + all_features_list.len * 4); + try argv.ensureUnusedCapacity(all_features_list.len * 4); for (all_features_list) |feature, index_usize| { const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize); const is_enabled = target.cpu.features.isEnabled(index); @@ -3334,7 +3334,7 @@ fn failCObjWithOwnedErrorMsg( defer lock.release(); { errdefer err_msg.destroy(comp.gpa); - try comp.failed_c_objects.ensureCapacity(comp.gpa, comp.failed_c_objects.count() + 1); + try comp.failed_c_objects.ensureUnusedCapacity(comp.gpa, 1); } comp.failed_c_objects.putAssumeCapacityNoClobber(c_object, err_msg); } @@ -3585,7 +3585,7 @@ fn detectLibCIncludeDirs( fn detectLibCFromLibCInstallation(arena: *Allocator, target: Target, lci: *const LibCInstallation) !LibCDirs { var list = std.ArrayList([]const u8).init(arena); - try list.ensureCapacity(4); + try list.ensureTotalCapacity(4); list.appendAssumeCapacity(lci.include_dir.?); @@ -3692,7 +3692,7 @@ fn setMiscFailure( comptime format: []const u8, args: anytype, ) Allocator.Error!void { - try comp.misc_failures.ensureCapacity(comp.gpa, comp.misc_failures.count() + 1); + try comp.misc_failures.ensureUnusedCapacity(comp.gpa, 1); const msg = try std.fmt.allocPrint(comp.gpa, format, args); comp.misc_failures.putAssumeCapacityNoClobber(tag, .{ .msg = msg }); } @@ -4027,7 +4027,7 @@ fn buildOutputFromZig( defer if (!keep_errors) errors.deinit(sub_compilation.gpa); if (errors.list.len != 0) { - try comp.misc_failures.ensureCapacity(comp.gpa, comp.misc_failures.count() + 1); + try comp.misc_failures.ensureUnusedCapacity(comp.gpa, 1); comp.misc_failures.putAssumeCapacityNoClobber(misc_task_tag, .{ .msg = try std.fmt.allocPrint(comp.gpa, "sub-compilation of {s} failed", .{ @tagName(misc_task_tag), @@ -4459,7 +4459,7 @@ pub fn build_crt_file( try sub_compilation.updateSubCompilation(); - try comp.crt_files.ensureCapacity(comp.gpa, comp.crt_files.count() + 1); + try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1); comp.crt_files.putAssumeCapacityNoClobber(basename, .{ .full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(comp.gpa, &[_][]const u8{ diff --git a/src/Liveness.zig b/src/Liveness.zig index 599507500e..6e6a3ccf1f 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -454,7 +454,7 @@ fn analyzeInst( } // Now we have to correctly populate new_set. if (new_set) |ns| { - try ns.ensureCapacity(gpa, @intCast(u32, ns.count() + then_table.count() + else_table.count())); + try ns.ensureUnusedCapacity(gpa, @intCast(u32, then_table.count() + else_table.count())); var it = then_table.keyIterator(); while (it.next()) |key| { _ = ns.putAssumeCapacity(key.*, {}); diff --git a/src/Module.zig b/src/Module.zig index 473c27e338..add0562d93 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -3504,7 +3504,7 @@ pub fn scanNamespace( const zir = namespace.file_scope.zir; try mod.comp.work_queue.ensureUnusedCapacity(decls_len); - try namespace.decls.ensureCapacity(gpa, decls_len); + try namespace.decls.ensureTotalCapacity(gpa, decls_len); const bit_bags_count = std.math.divCeil(usize, decls_len, 8) catch unreachable; var extra_index = extra_start + bit_bags_count; @@ -4071,7 +4071,7 @@ pub fn getErrorValue(mod: *Module, name: []const u8) !std.StringHashMapUnmanaged } errdefer assert(mod.global_error_set.remove(name)); - try mod.error_name_list.ensureCapacity(mod.gpa, mod.error_name_list.items.len + 1); + try mod.error_name_list.ensureUnusedCapacity(mod.gpa, 1); gop.key_ptr.* = try mod.gpa.dupe(u8, name); gop.value_ptr.* = @intCast(ErrorInt, mod.error_name_list.items.len); mod.error_name_list.appendAssumeCapacity(gop.key_ptr.*); diff --git a/src/Package.zig b/src/Package.zig index 1f19c1d43a..3814f0eb95 100644 --- a/src/Package.zig +++ b/src/Package.zig @@ -111,7 +111,7 @@ pub fn destroy(pkg: *Package, gpa: *Allocator) void { } pub fn add(pkg: *Package, gpa: *Allocator, name: []const u8, package: *Package) !void { - try pkg.table.ensureCapacity(gpa, pkg.table.count() + 1); + try pkg.table.ensureUnusedCapacity(gpa, 1); const name_dupe = try mem.dupe(gpa, u8, name); pkg.table.putAssumeCapacityNoClobber(name_dupe, package); } diff --git a/src/Sema.zig b/src/Sema.zig index 645e68c6ef..80cab6d00f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1130,7 +1130,7 @@ fn zirEnumDecl( const body_end = extra_index; extra_index += bit_bags_count; - try enum_obj.fields.ensureCapacity(&new_decl_arena.allocator, fields_len); + try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len); const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| { if (bag != 0) break true; } else false; @@ -3484,7 +3484,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Com }, .error_set => { const lhs_set = lhs_ty.castTag(.error_set).?.data; - try set.ensureCapacity(sema.gpa, set.count() + lhs_set.names_len); + try set.ensureUnusedCapacity(sema.gpa, lhs_set.names_len); for (lhs_set.names_ptr[0..lhs_set.names_len]) |name| { set.putAssumeCapacityNoClobber(name, {}); } @@ -3498,7 +3498,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Com }, .error_set => { const rhs_set = rhs_ty.castTag(.error_set).?.data; - try set.ensureCapacity(sema.gpa, set.count() + rhs_set.names_len); + try set.ensureUnusedCapacity(sema.gpa, rhs_set.names_len); for (rhs_set.names_ptr[0..rhs_set.names_len]) |name| { set.putAssumeCapacity(name, {}); } @@ -10361,7 +10361,7 @@ fn analyzeUnionFields( var decl_arena = union_obj.owner_decl.value_arena.?.promote(gpa); defer union_obj.owner_decl.value_arena.?.* = decl_arena.state; - try union_obj.fields.ensureCapacity(&decl_arena.allocator, fields_len); + try union_obj.fields.ensureTotalCapacity(&decl_arena.allocator, fields_len); if (body.len != 0) { _ = try sema.analyzeBody(block, body); diff --git a/src/codegen.zig b/src/codegen.zig index 08ee358bff..e0047de1f7 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -141,7 +141,7 @@ pub fn generateSymbol( // TODO populate .debug_info for the array if (typed_value.val.castTag(.bytes)) |payload| { if (typed_value.ty.sentinel()) |sentinel| { - try code.ensureCapacity(code.items.len + payload.data.len + 1); + try code.ensureUnusedCapacity(payload.data.len + 1); code.appendSliceAssumeCapacity(payload.data); switch (try generateSymbol(bin_file, src_loc, .{ .ty = typed_value.ty.elemType(), @@ -568,7 +568,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn gen(self: *Self) !void { switch (arch) { .x86_64 => { - try self.code.ensureCapacity(self.code.items.len + 11); + try self.code.ensureUnusedCapacity(11); const cc = self.fn_type.fnCallingConvention(); if (cc != .Naked) { @@ -607,7 +607,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Important to be after the possible self.code.items.len -= 5 above. try self.dbgSetEpilogueBegin(); - try self.code.ensureCapacity(self.code.items.len + 9); + try self.code.ensureUnusedCapacity(9); // add rsp, x if (aligned_stack_end > math.maxInt(i8)) { // example: 48 81 c4 ff ff ff 7f add rsp,0x7fffffff @@ -1960,7 +1960,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // // TODO: make this algorithm less bad - try self.code.ensureCapacity(self.code.items.len + 8); + try self.code.ensureUnusedCapacity(8); const lhs = try self.resolveInst(op_lhs); const rhs = try self.resolveInst(op_rhs); @@ -2447,13 +2447,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .register => |reg| { switch (self.debug_output) { .dwarf => |dbg_out| { - try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 3); + try dbg_out.dbg_info.ensureUnusedCapacity(3); dbg_out.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter); dbg_out.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc 1, // ULEB128 dwarf expression length reg.dwarfLocOp(), }); - try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 5 + name_with_null.len); + try dbg_out.dbg_info.ensureUnusedCapacity(5 + name_with_null.len); try self.addDbgInfoTypeReloc(ty); // DW.AT.type, DW.FORM.ref4 dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string }, @@ -2484,7 +2484,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try dbg_out.dbg_info.append(DW.OP.breg11); try leb128.writeILEB128(dbg_out.dbg_info.writer(), adjusted_stack_offset); - try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 5 + name_with_null.len); + try dbg_out.dbg_info.ensureUnusedCapacity(5 + name_with_null.len); try self.addDbgInfoTypeReloc(ty); // DW.AT.type, DW.FORM.ref4 dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string }, @@ -2626,7 +2626,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { unreachable; // ff 14 25 xx xx xx xx call [addr] - try self.code.ensureCapacity(self.code.items.len + 7); + try self.code.ensureUnusedCapacity(7); self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 }); mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), got_addr); } else if (func_value.castTag(.extern_fn)) |_| { @@ -2839,7 +2839,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .memory = func.owner_decl.link.macho.local_sym_index, }); // callq *%rax - try self.code.ensureCapacity(self.code.items.len + 2); + try self.code.ensureUnusedCapacity(2); self.code.appendSliceAssumeCapacity(&[2]u8{ 0xff, 0xd0 }); }, .aarch64 => { @@ -2858,7 +2858,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (arch) { .x86_64 => { // callq - try self.code.ensureCapacity(self.code.items.len + 5); + try self.code.ensureUnusedCapacity(5); self.code.appendSliceAssumeCapacity(&[5]u8{ 0xe8, 0x0, 0x0, 0x0, 0x0 }); break :blk @intCast(u32, self.code.items.len) - 4; }, @@ -2932,7 +2932,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const got_addr = p9.bases.data; const got_index = func_payload.data.owner_decl.link.plan9.got_index.?; // ff 14 25 xx xx xx xx call [addr] - try self.code.ensureCapacity(self.code.items.len + 7); + try self.code.ensureUnusedCapacity(7); self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 }); const fn_got_addr = got_addr + got_index * ptr_bytes; mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), @intCast(u32, fn_got_addr)); @@ -3075,7 +3075,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const rhs = try self.resolveInst(bin_op.rhs); const result: MCValue = switch (arch) { .x86_64 => result: { - try self.code.ensureCapacity(self.code.items.len + 8); + try self.code.ensureUnusedCapacity(8); // There are 2 operands, destination and source. // Either one, but not both, can be a memory operand. @@ -3159,7 +3159,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const reloc: Reloc = switch (arch) { .i386, .x86_64 => reloc: { - try self.code.ensureCapacity(self.code.items.len + 6); + try self.code.ensureUnusedCapacity(6); const opcode: u8 = switch (cond) { .compare_flags_signed => |cmp_op| blk: { @@ -3519,7 +3519,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn jump(self: *Self, index: usize) !void { switch (arch) { .i386, .x86_64 => { - try self.code.ensureCapacity(self.code.items.len + 5); + try self.code.ensureUnusedCapacity(5); if (math.cast(i8, @intCast(i32, index) - (@intCast(i32, self.code.items.len + 2)))) |delta| { self.code.appendAssumeCapacity(0xeb); // jmp rel8 self.code.appendAssumeCapacity(@bitCast(u8, delta)); @@ -3657,7 +3657,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const block_data = self.blocks.getPtr(block).?; // Emit a jump with a relocation. It will be patched up after the block ends. - try block_data.relocs.ensureCapacity(self.gpa, block_data.relocs.items.len + 1); + try block_data.relocs.ensureUnusedCapacity(self.gpa, 1); switch (arch) { .i386, .x86_64 => { @@ -4041,7 +4041,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (adj_off > 128) { return self.fail("TODO implement set stack variable with large stack offset", .{}); } - try self.code.ensureCapacity(self.code.items.len + 8); + try self.code.ensureUnusedCapacity(8); switch (abi_size) { 1 => { return self.fail("TODO implement set abi_size=1 stack variable with immediate", .{}); @@ -4067,7 +4067,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // 64 bit write to memory would take two mov's anyways so we // insted just use two 32 bit writes to avoid register allocation - try self.code.ensureCapacity(self.code.items.len + 14); + try self.code.ensureUnusedCapacity(14); var buf: [8]u8 = undefined; mem.writeIntLittle(u64, &buf, x_big); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 7429e3c3b0..f5796b06bc 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -629,7 +629,7 @@ pub const DeclGen = struct { const params = decl.ty.fnParamLen(); var i: usize = 0; - try self.args.ensureCapacity(params); + try self.args.ensureTotalCapacity(params); while (i < params) : (i += 1) { const param_type_id = self.spv.types.get(decl.ty.fnParamType(i)).?; const arg_result_id = self.spv.allocResultId(); diff --git a/src/libcxx.zig b/src/libcxx.zig index 0b29eda3ca..5ba6e47ece 100644 --- a/src/libcxx.zig +++ b/src/libcxx.zig @@ -108,7 +108,7 @@ pub fn buildLibCXX(comp: *Compilation) !void { const cxxabi_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", "include" }); const cxx_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" }); var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena); - try c_source_files.ensureCapacity(libcxx_files.len); + try c_source_files.ensureTotalCapacity(libcxx_files.len); for (libcxx_files) |cxx_src| { var cflags = std.ArrayList([]const u8).init(arena); @@ -246,7 +246,7 @@ pub fn buildLibCXXABI(comp: *Compilation) !void { const cxxabi_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", "include" }); const cxx_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" }); var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena); - try c_source_files.ensureCapacity(libcxxabi_files.len); + try c_source_files.ensureTotalCapacity(libcxxabi_files.len); for (libcxxabi_files) |cxxabi_src| { var cflags = std.ArrayList([]const u8).init(arena); diff --git a/src/libtsan.zig b/src/libtsan.zig index 5b91dd5a38..2f288df9c2 100644 --- a/src/libtsan.zig +++ b/src/libtsan.zig @@ -34,7 +34,7 @@ pub fn buildTsan(comp: *Compilation) !void { }; var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena); - try c_source_files.ensureCapacity(c_source_files.items.len + tsan_sources.len); + try c_source_files.ensureUnusedCapacity(tsan_sources.len); const tsan_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{"tsan"}); for (tsan_sources) |tsan_src| { @@ -58,7 +58,7 @@ pub fn buildTsan(comp: *Compilation) !void { &darwin_tsan_sources else &unix_tsan_sources; - try c_source_files.ensureCapacity(c_source_files.items.len + platform_tsan_sources.len); + try c_source_files.ensureUnusedCapacity(platform_tsan_sources.len); for (platform_tsan_sources) |tsan_src| { var cflags = std.ArrayList([]const u8).init(arena); @@ -96,7 +96,7 @@ pub fn buildTsan(comp: *Compilation) !void { }); } - try c_source_files.ensureCapacity(c_source_files.items.len + sanitizer_common_sources.len); + try c_source_files.ensureUnusedCapacity(sanitizer_common_sources.len); const sanitizer_common_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "tsan", "sanitizer_common", }); @@ -123,7 +123,7 @@ pub fn buildTsan(comp: *Compilation) !void { &sanitizer_libcdep_sources else &sanitizer_nolibc_sources; - try c_source_files.ensureCapacity(c_source_files.items.len + to_c_or_not_to_c_sources.len); + try c_source_files.ensureUnusedCapacity(to_c_or_not_to_c_sources.len); for (to_c_or_not_to_c_sources) |c_src| { var cflags = std.ArrayList([]const u8).init(arena); @@ -143,7 +143,7 @@ pub fn buildTsan(comp: *Compilation) !void { }); } - try c_source_files.ensureCapacity(c_source_files.items.len + sanitizer_symbolizer_sources.len); + try c_source_files.ensureUnusedCapacity(sanitizer_symbolizer_sources.len); for (sanitizer_symbolizer_sources) |c_src| { var cflags = std.ArrayList([]const u8).init(arena); @@ -168,7 +168,7 @@ pub fn buildTsan(comp: *Compilation) !void { &[_][]const u8{"interception"}, ); - try c_source_files.ensureCapacity(c_source_files.items.len + interception_sources.len); + try c_source_files.ensureUnusedCapacity(interception_sources.len); for (interception_sources) |c_src| { var cflags = std.ArrayList([]const u8).init(arena); diff --git a/src/link.zig b/src/link.zig index 88159496f4..e649101f08 100644 --- a/src/link.zig +++ b/src/link.zig @@ -635,7 +635,7 @@ pub const File = struct { var object_files = std.ArrayList([*:0]const u8).init(base.allocator); defer object_files.deinit(); - try object_files.ensureCapacity(base.options.objects.len + comp.c_object_table.count() + 2); + try object_files.ensureTotalCapacity(base.options.objects.len + comp.c_object_table.count() + 2); for (base.options.objects) |obj_path| { object_files.appendAssumeCapacity(try arena.dupeZ(u8, obj_path)); } diff --git a/src/link/C.zig b/src/link/C.zig index 09f789f7d1..103cb60901 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -197,7 +197,7 @@ pub fn flushModule(self: *C, comp: *Compilation) !void { defer all_buffers.deinit(); // This is at least enough until we get to the function bodies without error handling. - try all_buffers.ensureCapacity(self.decl_table.count() + 2); + try all_buffers.ensureTotalCapacity(self.decl_table.count() + 2); var file_size: u64 = zig_h.len; all_buffers.appendAssumeCapacity(.{ @@ -258,7 +258,7 @@ pub fn flushModule(self: *C, comp: *Compilation) !void { file_size += err_typedef_buf.items.len; // Now the function bodies. - try all_buffers.ensureCapacity(all_buffers.items.len + fn_count); + try all_buffers.ensureUnusedCapacity(fn_count); for (self.decl_table.keys()) |decl| { if (!decl.has_tv) continue; if (decl.val.castTag(.function)) |_| { @@ -286,7 +286,7 @@ pub fn flushEmitH(module: *Module) !void { var all_buffers = std.ArrayList(std.os.iovec_const).init(module.gpa); defer all_buffers.deinit(); - try all_buffers.ensureCapacity(emit_h.decl_table.count() + 1); + try all_buffers.ensureTotalCapacity(emit_h.decl_table.count() + 1); var file_size: u64 = zig_h.len; all_buffers.appendAssumeCapacity(.{ diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 4f5df73f8d..41b88881c4 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -418,7 +418,7 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Coff { pub fn allocateDeclIndexes(self: *Coff, decl: *Module.Decl) !void { if (self.llvm_object) |_| return; - try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1); + try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1); if (self.offset_table_free_list.popOrNull()) |i| { decl.link.coff.offset_table_index = i; @@ -793,7 +793,7 @@ pub fn updateDeclExports( for (exports) |exp| { if (exp.options.section) |section_name| { if (!mem.eql(u8, section_name, ".text")) { - try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1); + try module.failed_exports.ensureUnusedCapacity(module.gpa, 1); module.failed_exports.putAssumeCapacityNoClobber( exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}), @@ -804,7 +804,7 @@ pub fn updateDeclExports( if (mem.eql(u8, exp.options.name, "_start")) { self.entry_addr = decl.link.coff.getVAddr(self.*) - default_image_base; } else { - try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1); + try module.failed_exports.ensureUnusedCapacity(module.gpa, 1); module.failed_exports.putAssumeCapacityNoClobber( exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: Exports other than '_start'", .{}), diff --git a/src/link/Elf.zig b/src/link/Elf.zig index f8cf70104f..98eb0815a7 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -411,7 +411,7 @@ fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u16) u64 { /// TODO Improve this to use a table. fn makeString(self: *Elf, bytes: []const u8) !u32 { - try self.shstrtab.ensureCapacity(self.base.allocator, self.shstrtab.items.len + bytes.len + 1); + try self.shstrtab.ensureUnusedCapacity(self.base.allocator, bytes.len + 1); const result = self.shstrtab.items.len; self.shstrtab.appendSliceAssumeCapacity(bytes); self.shstrtab.appendAssumeCapacity(0); @@ -420,7 +420,7 @@ fn makeString(self: *Elf, bytes: []const u8) !u32 { /// TODO Improve this to use a table. fn makeDebugString(self: *Elf, bytes: []const u8) !u32 { - try self.debug_strtab.ensureCapacity(self.base.allocator, self.debug_strtab.items.len + bytes.len + 1); + try self.debug_strtab.ensureUnusedCapacity(self.base.allocator, bytes.len + 1); const result = self.debug_strtab.items.len; self.debug_strtab.appendSliceAssumeCapacity(bytes); self.debug_strtab.appendAssumeCapacity(0); @@ -856,7 +856,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void { // We have a function to compute the upper bound size, because it's needed // for determining where to put the offset of the first `LinkBlock`. - try di_buf.ensureCapacity(self.dbgInfoNeededHeaderBytes()); + try di_buf.ensureTotalCapacity(self.dbgInfoNeededHeaderBytes()); // initial length - length of the .debug_info contribution for this compilation unit, // not including the initial length itself. @@ -925,7 +925,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void { // Enough for all the data without resizing. When support for more compilation units // is added, the size of this section will become more variable. - try di_buf.ensureCapacity(100); + try di_buf.ensureTotalCapacity(100); // initial length - length of the .debug_aranges contribution for this compilation unit, // not including the initial length itself. @@ -1004,7 +1004,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void { // The size of this header is variable, depending on the number of directories, // files, and padding. We have a function to compute the upper bound size, however, // because it's needed for determining where to put the offset of the first `SrcFn`. - try di_buf.ensureCapacity(self.dbgLineNeededHeaderBytes()); + try di_buf.ensureTotalCapacity(self.dbgLineNeededHeaderBytes()); // initial length - length of the .debug_line contribution for this compilation unit, // not including the initial length itself. @@ -1639,7 +1639,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void { // Shared libraries. if (is_exe_or_dyn_lib) { const system_libs = self.base.options.system_libs.keys(); - try argv.ensureCapacity(argv.items.len + system_libs.len); + try argv.ensureUnusedCapacity(system_libs.len); for (system_libs) |link_lib| { // By this time, we depend on these libs being dynamically linked libraries and not static libraries // (the check for that needs to be earlier), but they could be full paths to .so files, in which @@ -2113,8 +2113,8 @@ pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void { if (decl.link.elf.local_sym_index != 0) return; - try self.local_symbols.ensureCapacity(self.base.allocator, self.local_symbols.items.len + 1); - try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1); + try self.local_symbols.ensureUnusedCapacity(self.base.allocator, 1); + try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1); if (self.local_symbol_free_list.popOrNull()) |i| { log.debug("reusing symbol index {d} for {s}", .{ i, decl.name }); @@ -2316,7 +2316,7 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs); // For functions we need to add a prologue to the debug line program. - try dbg_line_buffer.ensureCapacity(26); + try dbg_line_buffer.ensureTotalCapacity(26); const decl = func.owner_decl; const line_off = @intCast(u28, decl.src_line + func.lbrace_line); @@ -2351,7 +2351,7 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven // .debug_info subprogram const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1]; - try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len); + try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len); const fn_ret_type = decl.ty.fnReturnType(); const fn_ret_has_bits = fn_ret_type.hasCodeGenBits(); @@ -2593,7 +2593,7 @@ fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !vo }, .Int => { const info = ty.intInfo(self.base.options.target); - try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 12); + try dbg_info_buffer.ensureUnusedCapacity(12); dbg_info_buffer.appendAssumeCapacity(abbrev_base_type); // DW.AT.encoding, DW.FORM.data1 dbg_info_buffer.appendAssumeCapacity(switch (info.signedness) { @@ -2607,7 +2607,7 @@ fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !vo }, .Optional => { if (ty.isPtrLikeOptional()) { - try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 12); + try dbg_info_buffer.ensureUnusedCapacity(12); dbg_info_buffer.appendAssumeCapacity(abbrev_base_type); // DW.AT.encoding, DW.FORM.data1 dbg_info_buffer.appendAssumeCapacity(DW.ATE.address); @@ -2747,14 +2747,14 @@ pub fn updateDeclExports( const tracy = trace(@src()); defer tracy.end(); - try self.global_symbols.ensureCapacity(self.base.allocator, self.global_symbols.items.len + exports.len); + try self.global_symbols.ensureUnusedCapacity(self.base.allocator, exports.len); if (decl.link.elf.local_sym_index == 0) return; const decl_sym = self.local_symbols.items[decl.link.elf.local_sym_index]; for (exports) |exp| { if (exp.options.section) |section_name| { if (!mem.eql(u8, section_name, ".text")) { - try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1); + try module.failed_exports.ensureUnusedCapacity(module.gpa, 1); module.failed_exports.putAssumeCapacityNoClobber( exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}), @@ -2772,7 +2772,7 @@ pub fn updateDeclExports( }, .Weak => elf.STB_WEAK, .LinkOnce => { - try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1); + try module.failed_exports.ensureUnusedCapacity(module.gpa, 1); module.failed_exports.putAssumeCapacityNoClobber( exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: GlobalLinkage.LinkOnce", .{}), diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index 6dd7e556b5..845122f5e3 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -102,7 +102,7 @@ pub fn calcAdhocSignature( var buffer = try allocator.alloc(u8, page_size); defer allocator.free(buffer); - try cdir.data.ensureCapacity(allocator, total_pages * hash_size + id.len + 1); + try cdir.data.ensureTotalCapacity(allocator, total_pages * hash_size + id.len + 1); // 1. Save the identifier and update offsets cdir.inner.identOffset = cdir.inner.length; diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index a8c0138f60..3e940da85d 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -353,7 +353,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt // We have a function to compute the upper bound size, because it's needed // for determining where to put the offset of the first `LinkBlock`. - try di_buf.ensureCapacity(self.dbgInfoNeededHeaderBytes()); + try di_buf.ensureTotalCapacity(self.dbgInfoNeededHeaderBytes()); // initial length - length of the .debug_info contribution for this compilation unit, // not including the initial length itself. @@ -408,7 +408,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt // Enough for all the data without resizing. When support for more compilation units // is added, the size of this section will become more variable. - try di_buf.ensureCapacity(100); + try di_buf.ensureTotalCapacity(100); // initial length - length of the .debug_aranges contribution for this compilation unit, // not including the initial length itself. @@ -479,7 +479,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt // The size of this header is variable, depending on the number of directories, // files, and padding. We have a function to compute the upper bound size, however, // because it's needed for determining where to put the offset of the first `SrcFn`. - try di_buf.ensureCapacity(self.dbgLineNeededHeaderBytes(module)); + try di_buf.ensureTotalCapacity(self.dbgLineNeededHeaderBytes(module)); // initial length - length of the .debug_line contribution for this compilation unit, // not including the initial length itself. @@ -607,7 +607,7 @@ fn copySegmentCommand(self: *DebugSymbols, allocator: *Allocator, base_cmd: Segm }; mem.copy(u8, &cmd.inner.segname, &base_cmd.inner.segname); - try cmd.sections.ensureCapacity(allocator, cmd.inner.nsects); + try cmd.sections.ensureTotalCapacity(allocator, cmd.inner.nsects); for (base_cmd.sections.items) |base_sect, i| { var sect = macho.section_64{ .sectname = undefined, @@ -855,7 +855,7 @@ pub fn initDeclDebugBuffers( switch (decl.ty.zigTypeTag()) { .Fn => { // For functions we need to add a prologue to the debug line program. - try dbg_line_buffer.ensureCapacity(26); + try dbg_line_buffer.ensureTotalCapacity(26); const func = decl.val.castTag(.function).?.data; const line_off = @intCast(u28, decl.src_line + func.lbrace_line); @@ -889,7 +889,7 @@ pub fn initDeclDebugBuffers( // .debug_info subprogram const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1]; - try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 27 + decl_name_with_null.len); + try dbg_info_buffer.ensureUnusedCapacity(27 + decl_name_with_null.len); const fn_ret_type = decl.ty.fnReturnType(); const fn_ret_has_bits = fn_ret_type.hasCodeGenBits(); @@ -1124,7 +1124,7 @@ fn addDbgInfoType( }, .Int => { const info = ty.intInfo(target); - try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 12); + try dbg_info_buffer.ensureUnusedCapacity(12); dbg_info_buffer.appendAssumeCapacity(abbrev_base_type); // DW.AT.encoding, DW.FORM.data1 dbg_info_buffer.appendAssumeCapacity(switch (info.signedness) { @@ -1261,7 +1261,7 @@ fn getDebugLineProgramEnd(self: DebugSymbols) u32 { /// TODO Improve this to use a table. fn makeDebugString(self: *DebugSymbols, allocator: *Allocator, bytes: []const u8) !u32 { - try self.debug_string_table.ensureCapacity(allocator, self.debug_string_table.items.len + bytes.len + 1); + try self.debug_string_table.ensureUnusedCapacity(allocator, bytes.len + 1); const result = self.debug_string_table.items.len; self.debug_string_table.appendSliceAssumeCapacity(bytes); self.debug_string_table.appendAssumeCapacity(0); diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig index 05d44559ce..6a1a74f79a 100644 --- a/src/link/MachO/Dylib.zig +++ b/src/link/MachO/Dylib.zig @@ -180,7 +180,7 @@ pub fn parse(self: *Dylib, allocator: *Allocator, target: std.Target) !void { fn readLoadCommands(self: *Dylib, allocator: *Allocator, reader: anytype) !void { const should_lookup_reexports = self.header.?.flags & macho.MH_NO_REEXPORTED_DYLIBS == 0; - try self.load_commands.ensureCapacity(allocator, self.header.?.ncmds); + try self.load_commands.ensureTotalCapacity(allocator, self.header.?.ncmds); var i: u16 = 0; while (i < self.header.?.ncmds) : (i += 1) { diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig index 12c480b0f1..d71c549d77 100644 --- a/src/link/MachO/Object.zig +++ b/src/link/MachO/Object.zig @@ -261,7 +261,7 @@ pub fn readLoadCommands(self: *Object, allocator: *Allocator, reader: anytype) ! const header = self.header orelse unreachable; // Unreachable here signifies a fatal unexplored condition. const offset = self.file_offset orelse 0; - try self.load_commands.ensureCapacity(allocator, header.ncmds); + try self.load_commands.ensureTotalCapacity(allocator, header.ncmds); var i: u16 = 0; while (i < header.ncmds) : (i += 1) { diff --git a/src/link/MachO/Trie.zig b/src/link/MachO/Trie.zig index ab3a97eb33..7bf451f2c8 100644 --- a/src/link/MachO/Trie.zig +++ b/src/link/MachO/Trie.zig @@ -326,7 +326,7 @@ pub fn finalize(self: *Trie, allocator: *Allocator) !void { if (!self.trie_dirty) return; self.ordered_nodes.shrinkRetainingCapacity(0); - try self.ordered_nodes.ensureCapacity(allocator, self.node_count); + try self.ordered_nodes.ensureTotalCapacity(allocator, self.node_count); var fifo = std.fifo.LinearFifo(*Node, .Dynamic).init(allocator); defer fifo.deinit(); diff --git a/src/link/MachO/commands.zig b/src/link/MachO/commands.zig index d9ca056c8e..35512886d4 100644 --- a/src/link/MachO/commands.zig +++ b/src/link/MachO/commands.zig @@ -223,7 +223,7 @@ pub const SegmentCommand = struct { var segment = SegmentCommand{ .inner = inner, }; - try segment.sections.ensureCapacity(alloc, inner.nsects); + try segment.sections.ensureTotalCapacity(alloc, inner.nsects); var i: usize = 0; while (i < inner.nsects) : (i += 1) { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index fc559948c4..e3ec0bf255 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -172,8 +172,8 @@ pub fn deinit(self: *Wasm) void { pub fn allocateDeclIndexes(self: *Wasm, decl: *Module.Decl) !void { if (decl.link.wasm.init) return; - try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1); - try self.symbols.ensureCapacity(self.base.allocator, self.symbols.items.len + 1); + try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1); + try self.symbols.ensureUnusedCapacity(self.base.allocator, 1); const block = &decl.link.wasm; block.init = true; diff --git a/src/main.zig b/src/main.zig index 5774bf2a67..b7b5d06264 100644 --- a/src/main.zig +++ b/src/main.zig @@ -1704,22 +1704,22 @@ fn buildOutputType( } else true; if (!should_get_sdk_path) break :outer false; if (try std.zig.system.darwin.getSDKPath(arena, target_info.target)) |sdk_path| { - try clang_argv.ensureCapacity(clang_argv.items.len + 2); + try clang_argv.ensureUnusedCapacity(2); clang_argv.appendAssumeCapacity("-isysroot"); clang_argv.appendAssumeCapacity(sdk_path); break :outer true; } else break :outer false; } else false; - try clang_argv.ensureCapacity(clang_argv.items.len + paths.include_dirs.items.len * 2); + try clang_argv.ensureUnusedCapacity(paths.include_dirs.items.len * 2); const isystem_flag = if (has_sysroot) "-iwithsysroot" else "-isystem"; for (paths.include_dirs.items) |include_dir| { clang_argv.appendAssumeCapacity(isystem_flag); clang_argv.appendAssumeCapacity(include_dir); } - try clang_argv.ensureCapacity(clang_argv.items.len + paths.framework_dirs.items.len * 2); - try framework_dirs.ensureCapacity(framework_dirs.items.len + paths.framework_dirs.items.len); + try clang_argv.ensureUnusedCapacity(paths.framework_dirs.items.len * 2); + try framework_dirs.ensureUnusedCapacity(paths.framework_dirs.items.len); const iframework_flag = if (has_sysroot) "-iframeworkwithsysroot" else "-iframework"; for (paths.framework_dirs.items) |framework_dir| { clang_argv.appendAssumeCapacity(iframework_flag); @@ -2783,7 +2783,7 @@ pub fn cmdInit( fatal("unable to read template file 'build.zig': {s}", .{@errorName(err)}); }; var modified_build_zig_contents = std.ArrayList(u8).init(arena); - try modified_build_zig_contents.ensureCapacity(build_zig_contents.len); + try modified_build_zig_contents.ensureTotalCapacity(build_zig_contents.len); for (build_zig_contents) |c| { if (c == '$') { try modified_build_zig_contents.appendSlice(cwd_basename); @@ -3464,7 +3464,7 @@ fn fmtPathFile( // As a heuristic, we make enough capacity for the same as the input source. fmt.out_buffer.shrinkRetainingCapacity(0); - try fmt.out_buffer.ensureCapacity(source_code.len); + try fmt.out_buffer.ensureTotalCapacity(source_code.len); try tree.renderToArrayList(&fmt.out_buffer); if (mem.eql(u8, fmt.out_buffer.items, source_code)) diff --git a/src/mingw.zig b/src/mingw.zig index 84857df5b5..7771065a5a 100644 --- a/src/mingw.zig +++ b/src/mingw.zig @@ -312,7 +312,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { if (try man.hit()) { const digest = man.final(); - try comp.crt_files.ensureCapacity(comp.gpa, comp.crt_files.count() + 1); + try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1); comp.crt_files.putAssumeCapacityNoClobber(final_lib_basename, .{ .full_object_path = try comp.global_cache_directory.join(comp.gpa, &[_][]const u8{ "o", &digest, final_lib_basename, diff --git a/src/musl.zig b/src/musl.zig index 7dbaf3ba3f..3b5915719b 100644 --- a/src/musl.zig +++ b/src/musl.zig @@ -112,7 +112,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void { var source_table = std.StringArrayHashMap(Ext).init(comp.gpa); defer source_table.deinit(); - try source_table.ensureCapacity(compat_time32_files.len + src_files.len); + try source_table.ensureTotalCapacity(compat_time32_files.len + src_files.len); for (src_files) |src_file| { try addSrcFile(arena, &source_table, src_file); @@ -231,7 +231,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void { try sub_compilation.updateSubCompilation(); - try comp.crt_files.ensureCapacity(comp.gpa, comp.crt_files.count() + 1); + try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1); const basename = try comp.gpa.dupe(u8, "libc.so"); errdefer comp.gpa.free(basename); diff --git a/src/translate_c.zig b/src/translate_c.zig index d980fa657e..7247ed50a9 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -4882,7 +4882,7 @@ fn finishTransFnProto( var fn_params = std.ArrayList(ast.Payload.Param).init(c.gpa); defer fn_params.deinit(); const param_count: usize = if (fn_proto_ty != null) fn_proto_ty.?.getNumParams() else 0; - try fn_params.ensureCapacity(param_count); + try fn_params.ensureTotalCapacity(param_count); var i: usize = 0; while (i < param_count) : (i += 1) { diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig index 3686b90bda..93acd464f4 100644 --- a/src/translate_c/ast.zig +++ b/src/translate_c/ast.zig @@ -728,13 +728,13 @@ pub fn render(gpa: *Allocator, nodes: []const Node) !std.zig.Ast { // Estimate that each top level node has 10 child nodes. const estimated_node_count = nodes.len * 10; - try ctx.nodes.ensureCapacity(gpa, estimated_node_count); + try ctx.nodes.ensureTotalCapacity(gpa, estimated_node_count); // Estimate that each each node has 2 tokens. const estimated_tokens_count = estimated_node_count * 2; - try ctx.tokens.ensureCapacity(gpa, estimated_tokens_count); + try ctx.tokens.ensureTotalCapacity(gpa, estimated_tokens_count); // Estimate that each each token is 3 bytes long. const estimated_buf_len = estimated_tokens_count * 3; - try ctx.buf.ensureCapacity(estimated_buf_len); + try ctx.buf.ensureTotalCapacity(estimated_buf_len); ctx.nodes.appendAssumeCapacity(.{ .tag = .root, @@ -839,7 +839,7 @@ const Context = struct { fn addExtra(c: *Context, extra: anytype) Allocator.Error!NodeIndex { const fields = std.meta.fields(@TypeOf(extra)); - try c.extra_data.ensureCapacity(c.gpa, c.extra_data.items.len + fields.len); + try c.extra_data.ensureUnusedCapacity(c.gpa, fields.len); const result = @intCast(u32, c.extra_data.items.len); inline for (fields) |field| { comptime std.debug.assert(field.field_type == NodeIndex); @@ -2797,7 +2797,7 @@ fn renderParams(c: *Context, params: []Payload.Param, is_var_args: bool) !std.Ar _ = try c.addToken(.l_paren, "("); var rendered = std.ArrayList(NodeIndex).init(c.gpa); errdefer rendered.deinit(); - try rendered.ensureCapacity(std.math.max(params.len, 1)); + try rendered.ensureTotalCapacity(std.math.max(params.len, 1)); for (params) |param, i| { if (i != 0) _ = try c.addToken(.comma, ",");