mirror of
https://github.com/ziglang/zig.git
synced 2026-02-21 16:54:52 +00:00
Replace ArrayList.init/ensureTotalCapacity pairs with initCapacity
Because ArrayList.initCapacity uses 'precise' capacity allocation, this should save memory on average, and definitely will save memory in cases where ArrayList is used where a regular allocated slice could have also be used.
This commit is contained in:
parent
d03e9d0b83
commit
e97feb96e4
@ -551,10 +551,9 @@ fn testDecode(bytes: []const u8) !u21 {
|
||||
|
||||
/// Caller must free returned memory.
|
||||
pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8 {
|
||||
var result = std.ArrayList(u8).init(allocator);
|
||||
errdefer result.deinit();
|
||||
// optimistically guess that it will all be ascii.
|
||||
try result.ensureTotalCapacity(utf16le.len);
|
||||
var result = try std.ArrayList(u8).initCapacity(allocator, utf16le.len);
|
||||
errdefer result.deinit();
|
||||
var out_index: usize = 0;
|
||||
var it = Utf16LeIterator.init(utf16le);
|
||||
while (try it.nextCodepoint()) |codepoint| {
|
||||
@ -569,10 +568,9 @@ pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8
|
||||
|
||||
/// Caller must free returned memory.
|
||||
pub fn utf16leToUtf8AllocZ(allocator: *mem.Allocator, utf16le: []const u16) ![:0]u8 {
|
||||
var result = std.ArrayList(u8).init(allocator);
|
||||
errdefer result.deinit();
|
||||
// optimistically guess that it will all be ascii.
|
||||
try result.ensureTotalCapacity(utf16le.len);
|
||||
var result = try std.ArrayList(u8).initCapacity(allocator, utf16le.len);
|
||||
errdefer result.deinit();
|
||||
var out_index: usize = 0;
|
||||
var it = Utf16LeIterator.init(utf16le);
|
||||
while (try it.nextCodepoint()) |codepoint| {
|
||||
@ -664,10 +662,9 @@ test "utf16leToUtf8" {
|
||||
}
|
||||
|
||||
pub fn utf8ToUtf16LeWithNull(allocator: *mem.Allocator, utf8: []const u8) ![:0]u16 {
|
||||
var result = std.ArrayList(u16).init(allocator);
|
||||
errdefer result.deinit();
|
||||
// optimistically guess that it will not require surrogate pairs
|
||||
try result.ensureTotalCapacity(utf8.len + 1);
|
||||
var result = try std.ArrayList(u16).initCapacity(allocator, utf8.len + 1);
|
||||
errdefer result.deinit();
|
||||
|
||||
const view = try Utf8View.init(utf8);
|
||||
var it = view.iterator();
|
||||
|
||||
@ -3812,8 +3812,7 @@ fn detectLibCIncludeDirs(
|
||||
}
|
||||
|
||||
fn detectLibCFromLibCInstallation(arena: *Allocator, target: Target, lci: *const LibCInstallation) !LibCDirs {
|
||||
var list = std.ArrayList([]const u8).init(arena);
|
||||
try list.ensureTotalCapacity(4);
|
||||
var list = try std.ArrayList([]const u8).initCapacity(arena, 4);
|
||||
|
||||
list.appendAssumeCapacity(lci.include_dir.?);
|
||||
|
||||
|
||||
@ -6219,12 +6219,11 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
|
||||
|
||||
try sema.requireRuntimeBlock(block, src);
|
||||
|
||||
var cases_extra: std.ArrayListUnmanaged(u32) = .{};
|
||||
const estimated_cases_extra = (scalar_cases_len + multi_cases_len) *
|
||||
@typeInfo(Air.SwitchBr.Case).Struct.fields.len + 2;
|
||||
var cases_extra = try std.ArrayListUnmanaged(u32).initCapacity(gpa, estimated_cases_extra);
|
||||
defer cases_extra.deinit(gpa);
|
||||
|
||||
try cases_extra.ensureTotalCapacity(gpa, (scalar_cases_len + multi_cases_len) *
|
||||
@typeInfo(Air.SwitchBr.Case).Struct.fields.len + 2);
|
||||
|
||||
var case_block = child_block.makeSubBlock();
|
||||
case_block.runtime_loop = null;
|
||||
case_block.runtime_cond = operand_src;
|
||||
|
||||
@ -849,8 +849,7 @@ pub const DeclGen = struct {
|
||||
|
||||
assert(struct_obj.haveFieldTypes());
|
||||
|
||||
var llvm_field_types: std.ArrayListUnmanaged(*const llvm.Type) = .{};
|
||||
try llvm_field_types.ensureTotalCapacity(gpa, struct_obj.fields.count());
|
||||
var llvm_field_types = try std.ArrayListUnmanaged(*const llvm.Type).initCapacity(gpa, struct_obj.fields.count());
|
||||
defer llvm_field_types.deinit(gpa);
|
||||
|
||||
for (struct_obj.fields.values()) |field| {
|
||||
@ -1251,8 +1250,7 @@ pub const DeclGen = struct {
|
||||
const field_vals = tv.val.castTag(.@"struct").?.data;
|
||||
const gpa = self.gpa;
|
||||
|
||||
var llvm_fields: std.ArrayListUnmanaged(*const llvm.Value) = .{};
|
||||
try llvm_fields.ensureTotalCapacity(gpa, field_vals.len);
|
||||
var llvm_fields = try std.ArrayListUnmanaged(*const llvm.Value).initCapacity(gpa, field_vals.len);
|
||||
defer llvm_fields.deinit(gpa);
|
||||
|
||||
for (field_vals) |field_val, i| {
|
||||
|
||||
@ -109,8 +109,7 @@ pub fn buildLibCXX(comp: *Compilation) !void {
|
||||
|
||||
const cxxabi_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", "include" });
|
||||
const cxx_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" });
|
||||
var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena);
|
||||
try c_source_files.ensureTotalCapacity(libcxx_files.len);
|
||||
var c_source_files = try std.ArrayList(Compilation.CSourceFile).initCapacity(arena, libcxx_files.len);
|
||||
|
||||
for (libcxx_files) |cxx_src| {
|
||||
var cflags = std.ArrayList([]const u8).init(arena);
|
||||
@ -256,8 +255,7 @@ pub fn buildLibCXXABI(comp: *Compilation) !void {
|
||||
|
||||
const cxxabi_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", "include" });
|
||||
const cxx_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" });
|
||||
var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena);
|
||||
try c_source_files.ensureTotalCapacity(libcxxabi_files.len);
|
||||
var c_source_files = try std.ArrayList(Compilation.CSourceFile).initCapacity(arena, libcxxabi_files.len);
|
||||
|
||||
for (libcxxabi_files) |cxxabi_src| {
|
||||
var cflags = std.ArrayList([]const u8).init(arena);
|
||||
|
||||
@ -650,10 +650,10 @@ pub const File = struct {
|
||||
};
|
||||
}
|
||||
|
||||
var object_files = std.ArrayList([*:0]const u8).init(base.allocator);
|
||||
const num_object_files = base.options.objects.len + comp.c_object_table.count() + 2;
|
||||
var object_files = try std.ArrayList([*:0]const u8).initCapacity(base.allocator, num_object_files);
|
||||
defer object_files.deinit();
|
||||
|
||||
try object_files.ensureTotalCapacity(base.options.objects.len + comp.c_object_table.count() + 2);
|
||||
for (base.options.objects) |obj_path| {
|
||||
object_files.appendAssumeCapacity(try arena.dupeZ(u8, obj_path));
|
||||
}
|
||||
|
||||
@ -397,11 +397,10 @@ pub fn flushEmitH(module: *Module) !void {
|
||||
const emit_h = module.emit_h orelse return;
|
||||
|
||||
// We collect a list of buffers to write, and write them all at once with pwritev 😎
|
||||
var all_buffers = std.ArrayList(std.os.iovec_const).init(module.gpa);
|
||||
const num_buffers = emit_h.decl_table.count() + 1;
|
||||
var all_buffers = try std.ArrayList(std.os.iovec_const).initCapacity(module.gpa, num_buffers);
|
||||
defer all_buffers.deinit();
|
||||
|
||||
try all_buffers.ensureTotalCapacity(emit_h.decl_table.count() + 1);
|
||||
|
||||
var file_size: u64 = zig_h.len;
|
||||
all_buffers.appendAssumeCapacity(.{
|
||||
.iov_base = zig_h,
|
||||
|
||||
@ -851,12 +851,11 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
|
||||
const last_dbg_info_decl = self.dbg_info_decl_last.?;
|
||||
const debug_info_sect = &self.sections.items[self.debug_info_section_index.?];
|
||||
|
||||
var di_buf = std.ArrayList(u8).init(self.base.allocator);
|
||||
defer di_buf.deinit();
|
||||
|
||||
// We have a function to compute the upper bound size, because it's needed
|
||||
// for determining where to put the offset of the first `LinkBlock`.
|
||||
try di_buf.ensureTotalCapacity(self.dbgInfoNeededHeaderBytes());
|
||||
const needed_bytes = self.dbgInfoNeededHeaderBytes();
|
||||
var di_buf = try std.ArrayList(u8).initCapacity(self.base.allocator, needed_bytes);
|
||||
defer di_buf.deinit();
|
||||
|
||||
// initial length - length of the .debug_info contribution for this compilation unit,
|
||||
// not including the initial length itself.
|
||||
@ -920,12 +919,10 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
|
||||
if (self.debug_aranges_section_dirty) {
|
||||
const debug_aranges_sect = &self.sections.items[self.debug_aranges_section_index.?];
|
||||
|
||||
var di_buf = std.ArrayList(u8).init(self.base.allocator);
|
||||
defer di_buf.deinit();
|
||||
|
||||
// Enough for all the data without resizing. When support for more compilation units
|
||||
// is added, the size of this section will become more variable.
|
||||
try di_buf.ensureTotalCapacity(100);
|
||||
var di_buf = try std.ArrayList(u8).initCapacity(self.base.allocator, 100);
|
||||
defer di_buf.deinit();
|
||||
|
||||
// initial length - length of the .debug_aranges contribution for this compilation unit,
|
||||
// not including the initial length itself.
|
||||
@ -998,13 +995,12 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
|
||||
|
||||
const debug_line_sect = &self.sections.items[self.debug_line_section_index.?];
|
||||
|
||||
var di_buf = std.ArrayList(u8).init(self.base.allocator);
|
||||
defer di_buf.deinit();
|
||||
|
||||
// The size of this header is variable, depending on the number of directories,
|
||||
// files, and padding. We have a function to compute the upper bound size, however,
|
||||
// because it's needed for determining where to put the offset of the first `SrcFn`.
|
||||
try di_buf.ensureTotalCapacity(self.dbgLineNeededHeaderBytes());
|
||||
const needed_bytes = self.dbgLineNeededHeaderBytes();
|
||||
var di_buf = try std.ArrayList(u8).initCapacity(self.base.allocator, needed_bytes);
|
||||
defer di_buf.deinit();
|
||||
|
||||
// initial length - length of the .debug_line contribution for this compilation unit,
|
||||
// not including the initial length itself.
|
||||
@ -2300,7 +2296,8 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
|
||||
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator);
|
||||
// For functions we need to add a prologue to the debug line program.
|
||||
var dbg_line_buffer = try std.ArrayList(u8).initCapacity(self.base.allocator, 26);
|
||||
defer dbg_line_buffer.deinit();
|
||||
|
||||
var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator);
|
||||
@ -2309,9 +2306,6 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
|
||||
var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{};
|
||||
defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs);
|
||||
|
||||
// For functions we need to add a prologue to the debug line program.
|
||||
try dbg_line_buffer.ensureTotalCapacity(26);
|
||||
|
||||
const decl = func.owner_decl;
|
||||
const line_off = @intCast(u28, decl.src_line + func.lbrace_line);
|
||||
|
||||
|
||||
@ -5323,8 +5323,7 @@ fn snapshotState(self: *MachO) !void {
|
||||
node.payload.aliases = aliases.toOwnedSlice();
|
||||
try nodes.append(node);
|
||||
|
||||
var relocs = std.ArrayList(Snapshot.Node).init(arena);
|
||||
try relocs.ensureTotalCapacity(atom.relocs.items.len);
|
||||
var relocs = try std.ArrayList(Snapshot.Node).initCapacity(arena, atom.relocs.items.len);
|
||||
for (atom.relocs.items) |rel| {
|
||||
const arch = self.base.options.target.cpu.arch;
|
||||
const source_addr = blk: {
|
||||
|
||||
@ -348,12 +348,11 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt
|
||||
const dwarf_segment = &self.load_commands.items[self.dwarf_segment_cmd_index.?].Segment;
|
||||
const debug_info_sect = &dwarf_segment.sections.items[self.debug_info_section_index.?];
|
||||
|
||||
var di_buf = std.ArrayList(u8).init(allocator);
|
||||
defer di_buf.deinit();
|
||||
|
||||
// We have a function to compute the upper bound size, because it's needed
|
||||
// for determining where to put the offset of the first `LinkBlock`.
|
||||
try di_buf.ensureTotalCapacity(self.dbgInfoNeededHeaderBytes());
|
||||
const needed_bytes = self.dbgInfoNeededHeaderBytes();
|
||||
var di_buf = try std.ArrayList(u8).initCapacity(allocator, needed_bytes);
|
||||
defer di_buf.deinit();
|
||||
|
||||
// initial length - length of the .debug_info contribution for this compilation unit,
|
||||
// not including the initial length itself.
|
||||
@ -403,12 +402,10 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt
|
||||
const dwarf_segment = &self.load_commands.items[self.dwarf_segment_cmd_index.?].Segment;
|
||||
const debug_aranges_sect = &dwarf_segment.sections.items[self.debug_aranges_section_index.?];
|
||||
|
||||
var di_buf = std.ArrayList(u8).init(allocator);
|
||||
defer di_buf.deinit();
|
||||
|
||||
// Enough for all the data without resizing. When support for more compilation units
|
||||
// is added, the size of this section will become more variable.
|
||||
try di_buf.ensureTotalCapacity(100);
|
||||
var di_buf = try std.ArrayList(u8).initCapacity(allocator, 100);
|
||||
defer di_buf.deinit();
|
||||
|
||||
// initial length - length of the .debug_aranges contribution for this compilation unit,
|
||||
// not including the initial length itself.
|
||||
@ -473,13 +470,12 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt
|
||||
const dwarf_segment = &self.load_commands.items[self.dwarf_segment_cmd_index.?].Segment;
|
||||
const debug_line_sect = &dwarf_segment.sections.items[self.debug_line_section_index.?];
|
||||
|
||||
var di_buf = std.ArrayList(u8).init(allocator);
|
||||
defer di_buf.deinit();
|
||||
|
||||
// The size of this header is variable, depending on the number of directories,
|
||||
// files, and padding. We have a function to compute the upper bound size, however,
|
||||
// because it's needed for determining where to put the offset of the first `SrcFn`.
|
||||
try di_buf.ensureTotalCapacity(self.dbgLineNeededHeaderBytes(module));
|
||||
const needed_bytes = self.dbgLineNeededHeaderBytes(module);
|
||||
var di_buf = try std.ArrayList(u8).initCapacity(allocator, needed_bytes);
|
||||
defer di_buf.deinit();
|
||||
|
||||
// initial length - length of the .debug_line contribution for this compilation unit,
|
||||
// not including the initial length itself.
|
||||
|
||||
@ -393,9 +393,8 @@ pub fn parseIntoAtoms(self: *Object, allocator: *Allocator, macho_file: *MachO)
|
||||
// local < extern defined < undefined. Unfortunately, this is not guaranteed! For instance,
|
||||
// the GO compiler does not necessarily respect that therefore we sort immediately by type
|
||||
// and address within.
|
||||
var sorted_all_nlists = std.ArrayList(NlistWithIndex).init(allocator);
|
||||
var sorted_all_nlists = try std.ArrayList(NlistWithIndex).initCapacity(allocator, self.symtab.items.len);
|
||||
defer sorted_all_nlists.deinit();
|
||||
try sorted_all_nlists.ensureTotalCapacity(self.symtab.items.len);
|
||||
|
||||
for (self.symtab.items) |nlist, index| {
|
||||
sorted_all_nlists.appendAssumeCapacity(.{
|
||||
|
||||
@ -2866,8 +2866,7 @@ pub fn cmdInit(
|
||||
const build_zig_contents = template_dir.readFileAlloc(arena, "build.zig", max_bytes) catch |err| {
|
||||
fatal("unable to read template file 'build.zig': {s}", .{@errorName(err)});
|
||||
};
|
||||
var modified_build_zig_contents = std.ArrayList(u8).init(arena);
|
||||
try modified_build_zig_contents.ensureTotalCapacity(build_zig_contents.len);
|
||||
var modified_build_zig_contents = try std.ArrayList(u8).initCapacity(arena, build_zig_contents.len);
|
||||
for (build_zig_contents) |c| {
|
||||
if (c == '$') {
|
||||
try modified_build_zig_contents.appendSlice(cwd_basename);
|
||||
|
||||
@ -4901,10 +4901,9 @@ fn finishTransFnProto(
|
||||
|
||||
// TODO check for align attribute
|
||||
|
||||
var fn_params = std.ArrayList(ast.Payload.Param).init(c.gpa);
|
||||
defer fn_params.deinit();
|
||||
const param_count: usize = if (fn_proto_ty != null) fn_proto_ty.?.getNumParams() else 0;
|
||||
try fn_params.ensureTotalCapacity(param_count);
|
||||
var fn_params = try std.ArrayList(ast.Payload.Param).initCapacity(c.gpa, param_count);
|
||||
defer fn_params.deinit();
|
||||
|
||||
var i: usize = 0;
|
||||
while (i < param_count) : (i += 1) {
|
||||
|
||||
@ -2787,9 +2787,8 @@ fn renderMacroFunc(c: *Context, node: Node) !NodeIndex {
|
||||
|
||||
fn renderParams(c: *Context, params: []Payload.Param, is_var_args: bool) !std.ArrayList(NodeIndex) {
|
||||
_ = try c.addToken(.l_paren, "(");
|
||||
var rendered = std.ArrayList(NodeIndex).init(c.gpa);
|
||||
var rendered = try std.ArrayList(NodeIndex).initCapacity(c.gpa, std.math.max(params.len, 1));
|
||||
errdefer rendered.deinit();
|
||||
try rendered.ensureTotalCapacity(std.math.max(params.len, 1));
|
||||
|
||||
for (params) |param, i| {
|
||||
if (i != 0) _ = try c.addToken(.comma, ",");
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user