Merge pull request #10077 from squeek502/arraylist-capacity

std.ArrayList: add ensureTotalCapacityPrecise and update doc comments
This commit is contained in:
Andrew Kelley 2021-11-01 14:28:27 -04:00 committed by GitHub
commit 77eefebe65
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 35 additions and 22 deletions

View File

@ -56,19 +56,11 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
}
/// Initialize with capacity to hold at least `num` elements.
/// The resulting capacity is likely to be equal to `num`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn initCapacity(allocator: *Allocator, num: usize) !Self {
var self = Self.init(allocator);
if (@sizeOf(T) > 0) {
const new_memory = try self.allocator.allocAdvanced(T, alignment, num, .at_least);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
} else {
// If `T` is a zero-sized type, then we do not need to allocate memory.
self.capacity = std.math.maxInt(usize);
}
try self.ensureTotalCapacityPrecise(num);
return self;
}
@ -330,8 +322,22 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
if (better_capacity >= new_capacity) break;
}
return self.ensureTotalCapacityPrecise(better_capacity);
} else {
self.capacity = std.math.maxInt(usize);
}
}
/// Modify the array so that it can hold at least `new_capacity` items.
/// Like `ensureTotalCapacity`, but the resulting capacity is much more likely
/// (but not guaranteed) to be equal to `new_capacity`.
/// Invalidates pointers if additional memory is needed.
pub fn ensureTotalCapacityPrecise(self: *Self, new_capacity: usize) !void {
if (@sizeOf(T) > 0) {
if (self.capacity >= new_capacity) return;
// TODO This can be optimized to avoid needlessly copying undefined memory.
const new_memory = try self.allocator.reallocAtLeast(self.allocatedSlice(), better_capacity);
const new_memory = try self.allocator.reallocAtLeast(self.allocatedSlice(), new_capacity);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
} else {
@ -464,14 +470,11 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
/// Initialize with capacity to hold at least num elements.
/// The resulting capacity is likely to be equal to `num`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn initCapacity(allocator: *Allocator, num: usize) !Self {
var self = Self{};
const new_memory = try allocator.allocAdvanced(T, alignment, num, .at_least);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
try self.ensureTotalCapacityPrecise(allocator, num);
return self;
}
@ -685,7 +688,17 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
if (better_capacity >= new_capacity) break;
}
const new_memory = try allocator.reallocAtLeast(self.allocatedSlice(), better_capacity);
return self.ensureTotalCapacityPrecise(allocator, better_capacity);
}
/// Modify the array so that it can hold at least `new_capacity` items.
/// Like `ensureTotalCapacity`, but the resulting capacity is much more likely
/// (but not guaranteed) to be equal to `new_capacity`.
/// Invalidates pointers if additional memory is needed.
pub fn ensureTotalCapacityPrecise(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
if (self.capacity >= new_capacity) return;
const new_memory = try allocator.reallocAtLeast(self.allocatedSlice(), new_capacity);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
}

View File

@ -276,7 +276,7 @@ pub const Coff = struct {
if (self.sections.items.len == self.coff_header.number_of_sections)
return;
try self.sections.ensureTotalCapacity(self.coff_header.number_of_sections);
try self.sections.ensureTotalCapacityPrecise(self.coff_header.number_of_sections);
const in = self.in_file.reader();
@ -297,7 +297,7 @@ pub const Coff = struct {
std.mem.set(u8, name[8..], 0);
}
try self.sections.append(Section{
self.sections.appendAssumeCapacity(Section{
.header = SectionHeader{
.name = name,
.misc = SectionHeader.Misc{ .virtual_size = try in.readIntLittle(u32) },

View File

@ -4146,7 +4146,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: *Allocator) Se
// for the runtime ones.
const fn_ty = decl.ty;
const runtime_params_len = @intCast(u32, fn_ty.fnParamLen());
try inner_block.instructions.ensureTotalCapacity(gpa, runtime_params_len);
try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len);
try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType`
try sema.inst_map.ensureUnusedCapacity(gpa, fn_info.total_params_len);

View File

@ -102,7 +102,7 @@ pub fn calcAdhocSignature(
var buffer = try allocator.alloc(u8, page_size);
defer allocator.free(buffer);
try cdir.data.ensureTotalCapacity(allocator, total_pages * hash_size + id.len + 1);
try cdir.data.ensureTotalCapacityPrecise(allocator, total_pages * hash_size + id.len + 1);
// 1. Save the identifier and update offsets
cdir.inner.identOffset = cdir.inner.length;

View File

@ -223,7 +223,7 @@ pub const SegmentCommand = struct {
var segment = SegmentCommand{
.inner = inner,
};
try segment.sections.ensureTotalCapacity(alloc, inner.nsects);
try segment.sections.ensureTotalCapacityPrecise(alloc, inner.nsects);
var i: usize = 0;
while (i < inner.nsects) : (i += 1) {