mem: rename align*Generic to mem.align*

Anecdote 1: The generic version is way more popular than the non-generic
one in Zig codebase:

     git grep -w alignForward | wc -l
    56
     git grep -w alignForwardGeneric | wc -l
    149

     git grep -w alignBackward | wc -l
    6
     git grep -w alignBackwardGeneric | wc -l
    15

Anecdote 2: In my project (turbonss) that does much arithmetic and
alignment I exclusively use the Generic functions.

Anecdote 3: we used only the Generic versions in the Macho Man's linker
workshop.
This commit is contained in:
Motiejus Jakštys 2023-06-09 16:02:18 -07:00 committed by Andrew Kelley
parent 5baa05664e
commit d41111d7ef
39 changed files with 223 additions and 232 deletions

View File

@ -931,18 +931,18 @@ const LinuxThreadImpl = struct {
guard_offset = bytes; guard_offset = bytes;
bytes += @max(page_size, config.stack_size); bytes += @max(page_size, config.stack_size);
bytes = std.mem.alignForward(bytes, page_size); bytes = std.mem.alignForward(usize, bytes, page_size);
stack_offset = bytes; stack_offset = bytes;
bytes = std.mem.alignForward(bytes, linux.tls.tls_image.alloc_align); bytes = std.mem.alignForward(usize, bytes, linux.tls.tls_image.alloc_align);
tls_offset = bytes; tls_offset = bytes;
bytes += linux.tls.tls_image.alloc_size; bytes += linux.tls.tls_image.alloc_size;
bytes = std.mem.alignForward(bytes, @alignOf(Instance)); bytes = std.mem.alignForward(usize, bytes, @alignOf(Instance));
instance_offset = bytes; instance_offset = bytes;
bytes += @sizeOf(Instance); bytes += @sizeOf(Instance);
bytes = std.mem.alignForward(bytes, page_size); bytes = std.mem.alignForward(usize, bytes, page_size);
break :blk bytes; break :blk bytes;
}; };

View File

@ -124,7 +124,7 @@ pub const ElfDynLib = struct {
// corresponding to the actual LOAD sections. // corresponding to the actual LOAD sections.
const file_bytes = try os.mmap( const file_bytes = try os.mmap(
null, null,
mem.alignForward(size, mem.page_size), mem.alignForward(usize, size, mem.page_size),
os.PROT.READ, os.PROT.READ,
os.MAP.PRIVATE, os.MAP.PRIVATE,
fd, fd,
@ -187,7 +187,7 @@ pub const ElfDynLib = struct {
// extra nonsense mapped before/after the VirtAddr,MemSiz // extra nonsense mapped before/after the VirtAddr,MemSiz
const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, mem.page_size) - 1); const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, mem.page_size) - 1);
const extra_bytes = (base + ph.p_vaddr) - aligned_addr; const extra_bytes = (base + ph.p_vaddr) - aligned_addr;
const extended_memsz = mem.alignForward(ph.p_memsz + extra_bytes, mem.page_size); const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, mem.page_size);
const ptr = @intToPtr([*]align(mem.page_size) u8, aligned_addr); const ptr = @intToPtr([*]align(mem.page_size) u8, aligned_addr);
const prot = elfToMmapProt(ph.p_flags); const prot = elfToMmapProt(ph.p_flags);
if ((ph.p_flags & elf.PF_W) == 0) { if ((ph.p_flags & elf.PF_W) == 0) {

View File

@ -1545,13 +1545,13 @@ pub fn HashMapUnmanaged(
const meta_size = @sizeOf(Header) + new_capacity * @sizeOf(Metadata); const meta_size = @sizeOf(Header) + new_capacity * @sizeOf(Metadata);
comptime assert(@alignOf(Metadata) == 1); comptime assert(@alignOf(Metadata) == 1);
const keys_start = std.mem.alignForward(meta_size, key_align); const keys_start = std.mem.alignForward(usize, meta_size, key_align);
const keys_end = keys_start + new_capacity * @sizeOf(K); const keys_end = keys_start + new_capacity * @sizeOf(K);
const vals_start = std.mem.alignForward(keys_end, val_align); const vals_start = std.mem.alignForward(usize, keys_end, val_align);
const vals_end = vals_start + new_capacity * @sizeOf(V); const vals_end = vals_start + new_capacity * @sizeOf(V);
const total_size = std.mem.alignForward(vals_end, max_align); const total_size = std.mem.alignForward(usize, vals_end, max_align);
const slice = try allocator.alignedAlloc(u8, max_align, total_size); const slice = try allocator.alignedAlloc(u8, max_align, total_size);
const ptr = @ptrToInt(slice.ptr); const ptr = @ptrToInt(slice.ptr);
@ -1581,13 +1581,13 @@ pub fn HashMapUnmanaged(
const meta_size = @sizeOf(Header) + cap * @sizeOf(Metadata); const meta_size = @sizeOf(Header) + cap * @sizeOf(Metadata);
comptime assert(@alignOf(Metadata) == 1); comptime assert(@alignOf(Metadata) == 1);
const keys_start = std.mem.alignForward(meta_size, key_align); const keys_start = std.mem.alignForward(usize, meta_size, key_align);
const keys_end = keys_start + cap * @sizeOf(K); const keys_end = keys_start + cap * @sizeOf(K);
const vals_start = std.mem.alignForward(keys_end, val_align); const vals_start = std.mem.alignForward(usize, keys_end, val_align);
const vals_end = vals_start + cap * @sizeOf(V); const vals_end = vals_start + cap * @sizeOf(V);
const total_size = std.mem.alignForward(vals_end, max_align); const total_size = std.mem.alignForward(usize, vals_end, max_align);
const slice = @intToPtr([*]align(max_align) u8, @ptrToInt(self.header()))[0..total_size]; const slice = @intToPtr([*]align(max_align) u8, @ptrToInt(self.header()))[0..total_size];
allocator.free(slice); allocator.free(slice);

View File

@ -83,7 +83,7 @@ const CAllocator = struct {
// the aligned address. // the aligned address.
var unaligned_ptr = @ptrCast([*]u8, c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null); var unaligned_ptr = @ptrCast([*]u8, c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null);
const unaligned_addr = @ptrToInt(unaligned_ptr); const unaligned_addr = @ptrToInt(unaligned_ptr);
const aligned_addr = mem.alignForward(unaligned_addr + @sizeOf(usize), alignment); const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment);
var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr); var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr);
getHeader(aligned_ptr).* = unaligned_ptr; getHeader(aligned_ptr).* = unaligned_ptr;
@ -249,7 +249,7 @@ pub const wasm_allocator = Allocator{
/// Verifies that the adjusted length will still map to the full length /// Verifies that the adjusted length will still map to the full length
pub fn alignPageAllocLen(full_len: usize, len: usize) usize { pub fn alignPageAllocLen(full_len: usize, len: usize) usize {
const aligned_len = mem.alignAllocLen(full_len, len); const aligned_len = mem.alignAllocLen(full_len, len);
assert(mem.alignForward(aligned_len, mem.page_size) == full_len); assert(mem.alignForward(usize, aligned_len, mem.page_size) == full_len);
return aligned_len; return aligned_len;
} }
@ -307,7 +307,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
}; };
const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return null; const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return null;
const root_addr = @ptrToInt(ptr); const root_addr = @ptrToInt(ptr);
const aligned_addr = mem.alignForward(root_addr, ptr_align); const aligned_addr = mem.alignForward(usize, root_addr, ptr_align);
const buf = @intToPtr([*]u8, aligned_addr)[0..n]; const buf = @intToPtr([*]u8, aligned_addr)[0..n];
getRecordPtr(buf).* = root_addr; getRecordPtr(buf).* = root_addr;
return buf.ptr; return buf.ptr;
@ -840,7 +840,7 @@ pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
// which is 16 pages, hence the 32. This test may require to increase // which is 16 pages, hence the 32. This test may require to increase
// the size of the allocations feeding the `allocator` parameter if they // the size of the allocations feeding the `allocator` parameter if they
// fail, because of this high over-alignment we want to have. // fail, because of this high over-alignment we want to have.
while (@ptrToInt(slice.ptr) == mem.alignForward(@ptrToInt(slice.ptr), mem.page_size * 32)) { while (@ptrToInt(slice.ptr) == mem.alignForward(usize, @ptrToInt(slice.ptr), mem.page_size * 32)) {
try stuff_to_free.append(slice); try stuff_to_free.append(slice);
slice = try allocator.alignedAlloc(u8, 16, alloc_size); slice = try allocator.alignedAlloc(u8, 16, alloc_size);
} }

View File

@ -17,7 +17,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
_ = log2_align; _ = log2_align;
assert(n > 0); assert(n > 0);
if (n > maxInt(usize) - (mem.page_size - 1)) return null; if (n > maxInt(usize) - (mem.page_size - 1)) return null;
const aligned_len = mem.alignForward(n, mem.page_size); const aligned_len = mem.alignForward(usize, n, mem.page_size);
if (builtin.os.tag == .windows) { if (builtin.os.tag == .windows) {
const w = os.windows; const w = os.windows;
@ -54,14 +54,14 @@ fn resize(
) bool { ) bool {
_ = log2_buf_align; _ = log2_buf_align;
_ = return_address; _ = return_address;
const new_size_aligned = mem.alignForward(new_size, mem.page_size); const new_size_aligned = mem.alignForward(usize, new_size, mem.page_size);
if (builtin.os.tag == .windows) { if (builtin.os.tag == .windows) {
const w = os.windows; const w = os.windows;
if (new_size <= buf_unaligned.len) { if (new_size <= buf_unaligned.len) {
const base_addr = @ptrToInt(buf_unaligned.ptr); const base_addr = @ptrToInt(buf_unaligned.ptr);
const old_addr_end = base_addr + buf_unaligned.len; const old_addr_end = base_addr + buf_unaligned.len;
const new_addr_end = mem.alignForward(base_addr + new_size, mem.page_size); const new_addr_end = mem.alignForward(usize, base_addr + new_size, mem.page_size);
if (old_addr_end > new_addr_end) { if (old_addr_end > new_addr_end) {
// For shrinking that is not releasing, we will only // For shrinking that is not releasing, we will only
// decommit the pages not needed anymore. // decommit the pages not needed anymore.
@ -73,14 +73,14 @@ fn resize(
} }
return true; return true;
} }
const old_size_aligned = mem.alignForward(buf_unaligned.len, mem.page_size); const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, mem.page_size);
if (new_size_aligned <= old_size_aligned) { if (new_size_aligned <= old_size_aligned) {
return true; return true;
} }
return false; return false;
} }
const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size); const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, mem.page_size);
if (new_size_aligned == buf_aligned_len) if (new_size_aligned == buf_aligned_len)
return true; return true;
@ -103,7 +103,7 @@ fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) v
if (builtin.os.tag == .windows) { if (builtin.os.tag == .windows) {
os.windows.VirtualFree(slice.ptr, 0, os.windows.MEM_RELEASE); os.windows.VirtualFree(slice.ptr, 0, os.windows.MEM_RELEASE);
} else { } else {
const buf_aligned_len = mem.alignForward(slice.len, mem.page_size); const buf_aligned_len = mem.alignForward(usize, slice.len, mem.page_size);
const ptr = @alignCast(mem.page_size, slice.ptr); const ptr = @alignCast(mem.page_size, slice.ptr);
os.munmap(ptr[0..buf_aligned_len]); os.munmap(ptr[0..buf_aligned_len]);
} }

View File

@ -100,7 +100,7 @@ fn extendedOffset() usize {
} }
fn nPages(memsize: usize) usize { fn nPages(memsize: usize) usize {
return mem.alignForward(memsize, mem.page_size) / mem.page_size; return mem.alignForward(usize, memsize, mem.page_size) / mem.page_size;
} }
fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 { fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 {
@ -170,7 +170,7 @@ fn resize(
_ = ctx; _ = ctx;
_ = log2_buf_align; _ = log2_buf_align;
_ = return_address; _ = return_address;
const aligned_len = mem.alignForward(buf.len, mem.page_size); const aligned_len = mem.alignForward(usize, buf.len, mem.page_size);
if (new_len > aligned_len) return false; if (new_len > aligned_len) return false;
const current_n = nPages(aligned_len); const current_n = nPages(aligned_len);
const new_n = nPages(new_len); const new_n = nPages(new_len);
@ -190,7 +190,7 @@ fn free(
_ = ctx; _ = ctx;
_ = log2_buf_align; _ = log2_buf_align;
_ = return_address; _ = return_address;
const aligned_len = mem.alignForward(buf.len, mem.page_size); const aligned_len = mem.alignForward(usize, buf.len, mem.page_size);
const current_n = nPages(aligned_len); const current_n = nPages(aligned_len);
const base = nPages(@ptrToInt(buf.ptr)); const base = nPages(@ptrToInt(buf.ptr));
freePages(base, base + current_n); freePages(base, base + current_n);

View File

@ -186,7 +186,7 @@ pub const ArenaAllocator = struct {
const cur_alloc_buf = @ptrCast([*]u8, cur_node)[0..cur_node.data]; const cur_alloc_buf = @ptrCast([*]u8, cur_node)[0..cur_node.data];
const cur_buf = cur_alloc_buf[@sizeOf(BufNode)..]; const cur_buf = cur_alloc_buf[@sizeOf(BufNode)..];
const addr = @ptrToInt(cur_buf.ptr) + self.state.end_index; const addr = @ptrToInt(cur_buf.ptr) + self.state.end_index;
const adjusted_addr = mem.alignForward(addr, ptr_align); const adjusted_addr = mem.alignForward(usize, addr, ptr_align);
const adjusted_index = self.state.end_index + (adjusted_addr - addr); const adjusted_index = self.state.end_index + (adjusted_addr - addr);
const new_end_index = adjusted_index + n; const new_end_index = adjusted_index + n;

View File

@ -309,6 +309,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
fn bucketStackFramesStart(size_class: usize) usize { fn bucketStackFramesStart(size_class: usize) usize {
return mem.alignForward( return mem.alignForward(
usize,
@sizeOf(BucketHeader) + usedBitsCount(size_class), @sizeOf(BucketHeader) + usedBitsCount(size_class),
@alignOf(usize), @alignOf(usize),
); );

View File

@ -4213,23 +4213,17 @@ test "sliceAsBytes preserves pointer attributes" {
/// Round an address up to the next (or current) aligned address. /// Round an address up to the next (or current) aligned address.
/// The alignment must be a power of 2 and greater than 0. /// The alignment must be a power of 2 and greater than 0.
/// Asserts that rounding up the address does not cause integer overflow. /// Asserts that rounding up the address does not cause integer overflow.
pub fn alignForward(addr: usize, alignment: usize) usize { pub fn alignForward(comptime T: type, addr: T, alignment: T) T {
return alignForwardGeneric(usize, addr, alignment); assert(isValidAlignGeneric(T, alignment));
return alignBackward(T, addr + (alignment - 1), alignment);
} }
pub fn alignForwardLog2(addr: usize, log2_alignment: u8) usize { pub fn alignForwardLog2(addr: usize, log2_alignment: u8) usize {
const alignment = @as(usize, 1) << @intCast(math.Log2Int(usize), log2_alignment); const alignment = @as(usize, 1) << @intCast(math.Log2Int(usize), log2_alignment);
return alignForward(addr, alignment); return alignForward(usize, addr, alignment);
} }
/// Round an address up to the next (or current) aligned address. pub const alignForwardGeneric = @compileError("renamed to alignForward");
/// The alignment must be a power of 2 and greater than 0.
/// Asserts that rounding up the address does not cause integer overflow.
pub fn alignForwardGeneric(comptime T: type, addr: T, alignment: T) T {
assert(alignment > 0);
assert(std.math.isPowerOfTwo(alignment));
return alignBackwardGeneric(T, addr + (alignment - 1), alignment);
}
/// Force an evaluation of the expression; this tries to prevent /// Force an evaluation of the expression; this tries to prevent
/// the compiler from optimizing the computation away even if the /// the compiler from optimizing the computation away even if the
@ -4322,38 +4316,32 @@ test "doNotOptimizeAway" {
} }
test "alignForward" { test "alignForward" {
try testing.expect(alignForward(1, 1) == 1); try testing.expect(alignForward(usize, 1, 1) == 1);
try testing.expect(alignForward(2, 1) == 2); try testing.expect(alignForward(usize, 2, 1) == 2);
try testing.expect(alignForward(1, 2) == 2); try testing.expect(alignForward(usize, 1, 2) == 2);
try testing.expect(alignForward(2, 2) == 2); try testing.expect(alignForward(usize, 2, 2) == 2);
try testing.expect(alignForward(3, 2) == 4); try testing.expect(alignForward(usize, 3, 2) == 4);
try testing.expect(alignForward(4, 2) == 4); try testing.expect(alignForward(usize, 4, 2) == 4);
try testing.expect(alignForward(7, 8) == 8); try testing.expect(alignForward(usize, 7, 8) == 8);
try testing.expect(alignForward(8, 8) == 8); try testing.expect(alignForward(usize, 8, 8) == 8);
try testing.expect(alignForward(9, 8) == 16); try testing.expect(alignForward(usize, 9, 8) == 16);
try testing.expect(alignForward(15, 8) == 16); try testing.expect(alignForward(usize, 15, 8) == 16);
try testing.expect(alignForward(16, 8) == 16); try testing.expect(alignForward(usize, 16, 8) == 16);
try testing.expect(alignForward(17, 8) == 24); try testing.expect(alignForward(usize, 17, 8) == 24);
} }
/// Round an address down to the previous (or current) aligned address. /// Round an address down to the previous (or current) aligned address.
/// Unlike `alignBackward`, `alignment` can be any positive number, not just a power of 2. /// Unlike `alignBackward`, `alignment` can be any positive number, not just a power of 2.
pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize { pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize {
if (isValidAlign(alignment)) if (isValidAlign(alignment))
return alignBackward(i, alignment); return alignBackward(usize, i, alignment);
assert(alignment != 0); assert(alignment != 0);
return i - @mod(i, alignment); return i - @mod(i, alignment);
} }
/// Round an address down to the previous (or current) aligned address. /// Round an address down to the previous (or current) aligned address.
/// The alignment must be a power of 2 and greater than 0. /// The alignment must be a power of 2 and greater than 0.
pub fn alignBackward(addr: usize, alignment: usize) usize { pub fn alignBackward(comptime T: type, addr: T, alignment: T) T {
return alignBackwardGeneric(usize, addr, alignment);
}
/// Round an address down to the previous (or current) aligned address.
/// The alignment must be a power of 2 and greater than 0.
pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T {
assert(isValidAlignGeneric(T, alignment)); assert(isValidAlignGeneric(T, alignment));
// 000010000 // example alignment // 000010000 // example alignment
// 000001111 // subtract 1 // 000001111 // subtract 1
@ -4361,6 +4349,8 @@ pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T {
return addr & ~(alignment - 1); return addr & ~(alignment - 1);
} }
pub const alignBackwardGeneric = @compileError("renamed to alignBackward");
/// Returns whether `alignment` is a valid alignment, meaning it is /// Returns whether `alignment` is a valid alignment, meaning it is
/// a positive power of 2. /// a positive power of 2.
pub fn isValidAlign(alignment: usize) bool { pub fn isValidAlign(alignment: usize) bool {
@ -4391,7 +4381,7 @@ pub fn isAligned(addr: usize, alignment: usize) bool {
} }
pub fn isAlignedGeneric(comptime T: type, addr: T, alignment: T) bool { pub fn isAlignedGeneric(comptime T: type, addr: T, alignment: T) bool {
return alignBackwardGeneric(T, addr, alignment) == addr; return alignBackward(T, addr, alignment) == addr;
} }
test "isAligned" { test "isAligned" {
@ -4439,7 +4429,7 @@ pub fn alignInBytes(bytes: []u8, comptime new_alignment: usize) ?[]align(new_ali
const begin_address = @ptrToInt(bytes.ptr); const begin_address = @ptrToInt(bytes.ptr);
const end_address = begin_address + bytes.len; const end_address = begin_address + bytes.len;
const begin_address_aligned = mem.alignForward(begin_address, new_alignment); const begin_address_aligned = mem.alignForward(usize, begin_address, new_alignment);
const new_length = std.math.sub(usize, end_address, begin_address_aligned) catch |e| switch (e) { const new_length = std.math.sub(usize, end_address, begin_address_aligned) catch |e| switch (e) {
error.Overflow => return null, error.Overflow => return null,
}; };

View File

@ -208,7 +208,7 @@ pub fn allocAdvancedWithRetAddr(
comptime assert(a <= mem.page_size); comptime assert(a <= mem.page_size);
if (n == 0) { if (n == 0) {
const ptr = comptime std.mem.alignBackward(math.maxInt(usize), a); const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), a);
return @intToPtr([*]align(a) T, ptr)[0..0]; return @intToPtr([*]align(a) T, ptr)[0..0];
} }
@ -267,7 +267,7 @@ pub fn reallocAdvanced(
} }
if (new_n == 0) { if (new_n == 0) {
self.free(old_mem); self.free(old_mem);
const ptr = comptime std.mem.alignBackward(math.maxInt(usize), Slice.alignment); const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), Slice.alignment);
return @intToPtr([*]align(Slice.alignment) T, ptr)[0..0]; return @intToPtr([*]align(Slice.alignment) T, ptr)[0..0];
} }

View File

@ -105,9 +105,9 @@ pub fn TrailerFlags(comptime Fields: type) type {
const active = (self.bits & (1 << i)) != 0; const active = (self.bits & (1 << i)) != 0;
if (i == @enumToInt(field)) { if (i == @enumToInt(field)) {
assert(active); assert(active);
return mem.alignForwardGeneric(usize, off, @alignOf(field_info.type)); return mem.alignForward(usize, off, @alignOf(field_info.type));
} else if (active) { } else if (active) {
off = mem.alignForwardGeneric(usize, off, @alignOf(field_info.type)); off = mem.alignForward(usize, off, @alignOf(field_info.type));
off += @sizeOf(field_info.type); off += @sizeOf(field_info.type);
} }
} }
@ -123,7 +123,7 @@ pub fn TrailerFlags(comptime Fields: type) type {
if (@sizeOf(field.type) == 0) if (@sizeOf(field.type) == 0)
continue; continue;
if ((self.bits & (1 << i)) != 0) { if ((self.bits & (1 << i)) != 0) {
off = mem.alignForwardGeneric(usize, off, @alignOf(field.type)); off = mem.alignForward(usize, off, @alignOf(field.type));
off += @sizeOf(field.type); off += @sizeOf(field.type);
} }
} }

View File

@ -233,7 +233,7 @@ fn initTLS(phdrs: []elf.Phdr) void {
l += tls_align_factor - delta; l += tls_align_factor - delta;
l += @sizeOf(CustomData); l += @sizeOf(CustomData);
tcb_offset = l; tcb_offset = l;
l += mem.alignForward(tls_tcb_size, tls_align_factor); l += mem.alignForward(usize, tls_tcb_size, tls_align_factor);
data_offset = l; data_offset = l;
l += tls_data_alloc_size; l += tls_data_alloc_size;
break :blk l; break :blk l;
@ -241,14 +241,14 @@ fn initTLS(phdrs: []elf.Phdr) void {
.VariantII => blk: { .VariantII => blk: {
var l: usize = 0; var l: usize = 0;
data_offset = l; data_offset = l;
l += mem.alignForward(tls_data_alloc_size, tls_align_factor); l += mem.alignForward(usize, tls_data_alloc_size, tls_align_factor);
// The thread pointer is aligned to p_align // The thread pointer is aligned to p_align
tcb_offset = l; tcb_offset = l;
l += tls_tcb_size; l += tls_tcb_size;
// The CustomData structure is right after the TCB with no padding // The CustomData structure is right after the TCB with no padding
// in between so it can be easily found // in between so it can be easily found
l += @sizeOf(CustomData); l += @sizeOf(CustomData);
l = mem.alignForward(l, @alignOf(DTV)); l = mem.alignForward(usize, l, @alignOf(DTV));
dtv_offset = l; dtv_offset = l;
l += @sizeOf(DTV); l += @sizeOf(DTV);
break :blk l; break :blk l;
@ -329,7 +329,7 @@ pub fn initStaticTLS(phdrs: []elf.Phdr) void {
// Make sure the slice is correctly aligned. // Make sure the slice is correctly aligned.
const begin_addr = @ptrToInt(alloc_tls_area.ptr); const begin_addr = @ptrToInt(alloc_tls_area.ptr);
const begin_aligned_addr = mem.alignForward(begin_addr, tls_image.alloc_align); const begin_aligned_addr = mem.alignForward(usize, begin_addr, tls_image.alloc_align);
const start = begin_aligned_addr - begin_addr; const start = begin_aligned_addr - begin_addr;
break :blk alloc_tls_area[start .. start + tls_image.alloc_size]; break :blk alloc_tls_area[start .. start + tls_image.alloc_size];
}; };

View File

@ -24,7 +24,7 @@ const UefiPoolAllocator = struct {
const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align); const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
const metadata_len = mem.alignForward(@sizeOf(usize), ptr_align); const metadata_len = mem.alignForward(usize, @sizeOf(usize), ptr_align);
const full_len = metadata_len + len; const full_len = metadata_len + len;
@ -32,7 +32,7 @@ const UefiPoolAllocator = struct {
if (uefi.system_table.boot_services.?.allocatePool(uefi.efi_pool_memory_type, full_len, &unaligned_ptr) != .Success) return null; if (uefi.system_table.boot_services.?.allocatePool(uefi.efi_pool_memory_type, full_len, &unaligned_ptr) != .Success) return null;
const unaligned_addr = @ptrToInt(unaligned_ptr); const unaligned_addr = @ptrToInt(unaligned_ptr);
const aligned_addr = mem.alignForward(unaligned_addr + @sizeOf(usize), ptr_align); const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), ptr_align);
var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr); var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr);
getHeader(aligned_ptr).* = unaligned_ptr; getHeader(aligned_ptr).* = unaligned_ptr;

View File

@ -116,7 +116,7 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !voi
const header: Header = .{ .bytes = buffer[start..][0..512] }; const header: Header = .{ .bytes = buffer[start..][0..512] };
start += 512; start += 512;
const file_size = try header.fileSize(); const file_size = try header.fileSize();
const rounded_file_size = std.mem.alignForwardGeneric(u64, file_size, 512); const rounded_file_size = std.mem.alignForward(u64, file_size, 512);
const pad_len = @intCast(usize, rounded_file_size - file_size); const pad_len = @intCast(usize, rounded_file_size - file_size);
const unstripped_file_name = try header.fullFileName(&file_name_buffer); const unstripped_file_name = try header.fullFileName(&file_name_buffer);
switch (header.fileType()) { switch (header.fileType()) {

View File

@ -1944,7 +1944,7 @@ pub const Target = struct {
16 => 2, 16 => 2,
32 => 4, 32 => 4,
64 => 8, 64 => 8,
80 => @intCast(u16, mem.alignForward(10, c_type_alignment(t, .longdouble))), 80 => @intCast(u16, mem.alignForward(usize, 10, c_type_alignment(t, .longdouble))),
128 => 16, 128 => 16,
else => unreachable, else => unreachable,
}, },

View File

@ -305,7 +305,7 @@ pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const
var window_start: usize = 0; var window_start: usize = 0;
if (@max(actual.len, expected.len) > max_window_size) { if (@max(actual.len, expected.len) > max_window_size) {
const alignment = if (T == u8) 16 else 2; const alignment = if (T == u8) 16 else 2;
window_start = std.mem.alignBackward(diff_index - @min(diff_index, alignment), alignment); window_start = std.mem.alignBackward(usize, diff_index - @min(diff_index, alignment), alignment);
} }
const expected_window = expected[window_start..@min(expected.len, window_start + max_window_size)]; const expected_window = expected[window_start..@min(expected.len, window_start + max_window_size)];
const expected_truncated = window_start + expected_window.len < expected.len; const expected_truncated = window_start + expected_window.len < expected.len;

View File

@ -1293,7 +1293,7 @@ pub const Union = struct {
payload_align = @max(payload_align, 1); payload_align = @max(payload_align, 1);
if (!have_tag or !u.tag_ty.hasRuntimeBits(mod)) { if (!have_tag or !u.tag_ty.hasRuntimeBits(mod)) {
return .{ return .{
.abi_size = std.mem.alignForwardGeneric(u64, payload_size, payload_align), .abi_size = std.mem.alignForward(u64, payload_size, payload_align),
.abi_align = payload_align, .abi_align = payload_align,
.most_aligned_field = most_aligned_field, .most_aligned_field = most_aligned_field,
.most_aligned_field_size = most_aligned_field_size, .most_aligned_field_size = most_aligned_field_size,
@ -1314,18 +1314,18 @@ pub const Union = struct {
if (tag_align >= payload_align) { if (tag_align >= payload_align) {
// {Tag, Payload} // {Tag, Payload}
size += tag_size; size += tag_size;
size = std.mem.alignForwardGeneric(u64, size, payload_align); size = std.mem.alignForward(u64, size, payload_align);
size += payload_size; size += payload_size;
const prev_size = size; const prev_size = size;
size = std.mem.alignForwardGeneric(u64, size, tag_align); size = std.mem.alignForward(u64, size, tag_align);
padding = @intCast(u32, size - prev_size); padding = @intCast(u32, size - prev_size);
} else { } else {
// {Payload, Tag} // {Payload, Tag}
size += payload_size; size += payload_size;
size = std.mem.alignForwardGeneric(u64, size, tag_align); size = std.mem.alignForward(u64, size, tag_align);
size += tag_size; size += tag_size;
const prev_size = size; const prev_size = size;
size = std.mem.alignForwardGeneric(u64, size, payload_align); size = std.mem.alignForward(u64, size, payload_align);
padding = @intCast(u32, size - prev_size); padding = @intCast(u32, size - prev_size);
} }
return .{ return .{

View File

@ -566,7 +566,7 @@ fn gen(self: *Self) !void {
// Backpatch stack offset // Backpatch stack offset
const total_stack_size = self.max_end_stack + self.saved_regs_stack_space; const total_stack_size = self.max_end_stack + self.saved_regs_stack_space;
const aligned_total_stack_end = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align); const aligned_total_stack_end = mem.alignForward(u32, total_stack_size, self.stack_align);
const stack_size = aligned_total_stack_end - self.saved_regs_stack_space; const stack_size = aligned_total_stack_end - self.saved_regs_stack_space;
self.max_end_stack = stack_size; self.max_end_stack = stack_size;
if (math.cast(u12, stack_size)) |size| { if (math.cast(u12, stack_size)) |size| {
@ -1011,7 +1011,7 @@ fn allocMem(
std.math.ceilPowerOfTwoAssert(u32, abi_size); std.math.ceilPowerOfTwoAssert(u32, abi_size);
// TODO find a free slot instead of always appending // TODO find a free slot instead of always appending
const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, adjusted_align) + abi_size; const offset = mem.alignForward(u32, self.next_stack_offset, adjusted_align) + abi_size;
self.next_stack_offset = offset; self.next_stack_offset = offset;
self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset); self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset);
@ -6328,7 +6328,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
const param_size = @intCast(u32, ty.toType().abiSize(mod)); const param_size = @intCast(u32, ty.toType().abiSize(mod));
const param_alignment = ty.toType().abiAlignment(mod); const param_alignment = ty.toType().abiAlignment(mod);
stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment);
result.args[i] = .{ .stack_argument_offset = stack_offset }; result.args[i] = .{ .stack_argument_offset = stack_offset };
stack_offset += param_size; stack_offset += param_size;
} else { } else {

View File

@ -560,7 +560,7 @@ fn gen(self: *Self) !void {
// Backpatch stack offset // Backpatch stack offset
const total_stack_size = self.max_end_stack + self.saved_regs_stack_space; const total_stack_size = self.max_end_stack + self.saved_regs_stack_space;
const aligned_total_stack_end = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align); const aligned_total_stack_end = mem.alignForward(u32, total_stack_size, self.stack_align);
const stack_size = aligned_total_stack_end - self.saved_regs_stack_space; const stack_size = aligned_total_stack_end - self.saved_regs_stack_space;
self.max_end_stack = stack_size; self.max_end_stack = stack_size;
self.mir_instructions.set(sub_reloc, .{ self.mir_instructions.set(sub_reloc, .{
@ -991,7 +991,7 @@ fn allocMem(
assert(abi_align > 0); assert(abi_align > 0);
// TODO find a free slot instead of always appending // TODO find a free slot instead of always appending
const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size; const offset = mem.alignForward(u32, self.next_stack_offset, abi_align) + abi_size;
self.next_stack_offset = offset; self.next_stack_offset = offset;
self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset); self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset);
@ -6214,7 +6214,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
for (fn_info.param_types, 0..) |ty, i| { for (fn_info.param_types, 0..) |ty, i| {
if (ty.toType().abiAlignment(mod) == 8) if (ty.toType().abiAlignment(mod) == 8)
ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2); ncrn = std.mem.alignForward(usize, ncrn, 2);
const param_size = @intCast(u32, ty.toType().abiSize(mod)); const param_size = @intCast(u32, ty.toType().abiSize(mod));
if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) {
@ -6229,7 +6229,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
} else { } else {
ncrn = 4; ncrn = 4;
if (ty.toType().abiAlignment(mod) == 8) if (ty.toType().abiAlignment(mod) == 8)
nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8); nsaa = std.mem.alignForward(u32, nsaa, 8);
result.args[i] = .{ .stack_argument_offset = nsaa }; result.args[i] = .{ .stack_argument_offset = nsaa };
nsaa += param_size; nsaa += param_size;
@ -6267,7 +6267,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
const param_size = @intCast(u32, ty.toType().abiSize(mod)); const param_size = @intCast(u32, ty.toType().abiSize(mod));
const param_alignment = ty.toType().abiAlignment(mod); const param_alignment = ty.toType().abiAlignment(mod);
stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment);
result.args[i] = .{ .stack_argument_offset = stack_offset }; result.args[i] = .{ .stack_argument_offset = stack_offset };
stack_offset += param_size; stack_offset += param_size;
} else { } else {

View File

@ -13,7 +13,7 @@ pub const Class = union(enum) {
i64_array: u8, i64_array: u8,
fn arrSize(total_size: u64, arr_size: u64) Class { fn arrSize(total_size: u64, arr_size: u64) Class {
const count = @intCast(u8, std.mem.alignForwardGeneric(u64, total_size, arr_size) / arr_size); const count = @intCast(u8, std.mem.alignForward(u64, total_size, arr_size) / arr_size);
if (arr_size == 32) { if (arr_size == 32) {
return .{ .i32_array = count }; return .{ .i32_array = count };
} else { } else {

View File

@ -792,7 +792,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u
if (abi_align > self.stack_align) if (abi_align > self.stack_align)
self.stack_align = abi_align; self.stack_align = abi_align;
// TODO find a free slot instead of always appending // TODO find a free slot instead of always appending
const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align); const offset = mem.alignForward(u32, self.next_stack_offset, abi_align);
self.next_stack_offset = offset + abi_size; self.next_stack_offset = offset + abi_size;
if (self.next_stack_offset > self.max_end_stack) if (self.next_stack_offset > self.max_end_stack)
self.max_end_stack = self.next_stack_offset; self.max_end_stack = self.next_stack_offset;

View File

@ -423,7 +423,7 @@ fn gen(self: *Self) !void {
// Backpatch stack offset // Backpatch stack offset
const total_stack_size = self.max_end_stack + abi.stack_reserved_area; const total_stack_size = self.max_end_stack + abi.stack_reserved_area;
const stack_size = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align); const stack_size = mem.alignForward(u32, total_stack_size, self.stack_align);
if (math.cast(i13, stack_size)) |size| { if (math.cast(i13, stack_size)) |size| {
self.mir_instructions.set(save_inst, .{ self.mir_instructions.set(save_inst, .{
.tag = .save, .tag = .save,
@ -2781,7 +2781,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u
if (abi_align > self.stack_align) if (abi_align > self.stack_align)
self.stack_align = abi_align; self.stack_align = abi_align;
// TODO find a free slot instead of always appending // TODO find a free slot instead of always appending
const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size; const offset = mem.alignForward(u32, self.next_stack_offset, abi_align) + abi_size;
self.next_stack_offset = offset; self.next_stack_offset = offset;
if (self.next_stack_offset > self.max_end_stack) if (self.next_stack_offset > self.max_end_stack)
self.max_end_stack = self.next_stack_offset; self.max_end_stack = self.next_stack_offset;

View File

@ -1286,7 +1286,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
// store stack pointer so we can restore it when we return from the function // store stack pointer so we can restore it when we return from the function
try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } }); try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } });
// get the total stack size // get the total stack size
const aligned_stack = std.mem.alignForwardGeneric(u32, func.stack_size, func.stack_alignment); const aligned_stack = std.mem.alignForward(u32, func.stack_size, func.stack_alignment);
try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, aligned_stack) } }); try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, aligned_stack) } });
// substract it from the current stack pointer // substract it from the current stack pointer
try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } }); try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } });
@ -1531,7 +1531,7 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue {
func.stack_alignment = abi_align; func.stack_alignment = abi_align;
} }
const offset = std.mem.alignForwardGeneric(u32, func.stack_size, abi_align); const offset = std.mem.alignForward(u32, func.stack_size, abi_align);
defer func.stack_size = offset + abi_size; defer func.stack_size = offset + abi_size;
return WValue{ .stack_offset = .{ .value = offset, .references = 1 } }; return WValue{ .stack_offset = .{ .value = offset, .references = 1 } };
@ -1564,7 +1564,7 @@ fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
func.stack_alignment = abi_alignment; func.stack_alignment = abi_alignment;
} }
const offset = std.mem.alignForwardGeneric(u32, func.stack_size, abi_alignment); const offset = std.mem.alignForward(u32, func.stack_size, abi_alignment);
defer func.stack_size = offset + abi_size; defer func.stack_size = offset + abi_size;
return WValue{ .stack_offset = .{ .value = offset, .references = 1 } }; return WValue{ .stack_offset = .{ .value = offset, .references = 1 } };
@ -2975,7 +2975,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
if (layout.payload_align > layout.tag_align) break :blk 0; if (layout.payload_align > layout.tag_align) break :blk 0;
// tag is stored first so calculate offset from where payload starts // tag is stored first so calculate offset from where payload starts
break :blk @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); break :blk @intCast(u32, std.mem.alignForward(u64, layout.tag_size, layout.tag_align));
}, },
}, },
.Pointer => switch (parent_ty.ptrSize(mod)) { .Pointer => switch (parent_ty.ptrSize(mod)) {

View File

@ -2150,7 +2150,7 @@ fn setFrameLoc(
const frame_i = @enumToInt(frame_index); const frame_i = @enumToInt(frame_index);
if (aligned) { if (aligned) {
const alignment = @as(i32, 1) << self.frame_allocs.items(.abi_align)[frame_i]; const alignment = @as(i32, 1) << self.frame_allocs.items(.abi_align)[frame_i];
offset.* = mem.alignForwardGeneric(i32, offset.*, alignment); offset.* = mem.alignForward(i32, offset.*, alignment);
} }
self.frame_locs.set(frame_i, .{ .base = base, .disp = offset.* }); self.frame_locs.set(frame_i, .{ .base = base, .disp = offset.* });
offset.* += self.frame_allocs.items(.abi_size)[frame_i]; offset.* += self.frame_allocs.items(.abi_size)[frame_i];
@ -2207,7 +2207,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout {
self.setFrameLoc(.stack_frame, .rsp, &rsp_offset, true); self.setFrameLoc(.stack_frame, .rsp, &rsp_offset, true);
for (stack_frame_order) |frame_index| self.setFrameLoc(frame_index, .rsp, &rsp_offset, true); for (stack_frame_order) |frame_index| self.setFrameLoc(frame_index, .rsp, &rsp_offset, true);
rsp_offset += stack_frame_align_offset; rsp_offset += stack_frame_align_offset;
rsp_offset = mem.alignForwardGeneric(i32, rsp_offset, @as(i32, 1) << needed_align); rsp_offset = mem.alignForward(i32, rsp_offset, @as(i32, 1) << needed_align);
rsp_offset -= stack_frame_align_offset; rsp_offset -= stack_frame_align_offset;
frame_size[@enumToInt(FrameIndex.call_frame)] = frame_size[@enumToInt(FrameIndex.call_frame)] =
@intCast(u31, rsp_offset - frame_offset[@enumToInt(FrameIndex.stack_frame)]); @intCast(u31, rsp_offset - frame_offset[@enumToInt(FrameIndex.stack_frame)]);
@ -11807,7 +11807,7 @@ fn resolveCallingConventionValues(
const param_size = @intCast(u31, ty.abiSize(mod)); const param_size = @intCast(u31, ty.abiSize(mod));
const param_align = @intCast(u31, ty.abiAlignment(mod)); const param_align = @intCast(u31, ty.abiAlignment(mod));
result.stack_byte_count = result.stack_byte_count =
mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); mem.alignForward(u31, result.stack_byte_count, param_align);
arg.* = .{ .load_frame = .{ arg.* = .{ .load_frame = .{
.index = stack_frame_base, .index = stack_frame_base,
.off = result.stack_byte_count, .off = result.stack_byte_count,
@ -11847,7 +11847,7 @@ fn resolveCallingConventionValues(
const param_size = @intCast(u31, ty.abiSize(mod)); const param_size = @intCast(u31, ty.abiSize(mod));
const param_align = @intCast(u31, ty.abiAlignment(mod)); const param_align = @intCast(u31, ty.abiAlignment(mod));
result.stack_byte_count = result.stack_byte_count =
mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); mem.alignForward(u31, result.stack_byte_count, param_align);
arg.* = .{ .load_frame = .{ arg.* = .{ .load_frame = .{
.index = stack_frame_base, .index = stack_frame_base,
.off = result.stack_byte_count, .off = result.stack_byte_count,
@ -11858,7 +11858,7 @@ fn resolveCallingConventionValues(
else => return self.fail("TODO implement function parameters and return values for {} on x86_64", .{cc}), else => return self.fail("TODO implement function parameters and return values for {} on x86_64", .{cc}),
} }
result.stack_byte_count = mem.alignForwardGeneric(u31, result.stack_byte_count, result.stack_align); result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, result.stack_align);
return result; return result;
} }

View File

@ -290,7 +290,7 @@ pub fn generateSymbol(
.fail => |em| return .{ .fail = em }, .fail => |em| return .{ .fail = em },
} }
const unpadded_end = code.items.len - begin; const unpadded_end = code.items.len - begin;
const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); const padded_end = mem.alignForward(u64, unpadded_end, abi_align);
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
if (padding > 0) { if (padding > 0) {
@ -303,7 +303,7 @@ pub fn generateSymbol(
const begin = code.items.len; const begin = code.items.len;
try code.writer().writeInt(u16, err_val, endian); try code.writer().writeInt(u16, err_val, endian);
const unpadded_end = code.items.len - begin; const unpadded_end = code.items.len - begin;
const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); const padded_end = mem.alignForward(u64, unpadded_end, abi_align);
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
if (padding > 0) { if (padding > 0) {
@ -1020,7 +1020,7 @@ pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 {
if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return 0; return 0;
} else { } else {
return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(mod), payload_align); return mem.alignForward(u64, Type.anyerror.abiSize(mod), payload_align);
} }
} }
@ -1029,7 +1029,7 @@ pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 {
const payload_align = payload_ty.abiAlignment(mod); const payload_align = payload_ty.abiAlignment(mod);
const error_align = Type.anyerror.abiAlignment(mod); const error_align = Type.anyerror.abiAlignment(mod);
if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return mem.alignForwardGeneric(u64, payload_ty.abiSize(mod), error_align); return mem.alignForward(u64, payload_ty.abiSize(mod), error_align);
} else { } else {
return 0; return 0;
} }

View File

@ -1633,7 +1633,7 @@ pub const Object = struct {
var offset: u64 = 0; var offset: u64 = 0;
offset += ptr_size; offset += ptr_size;
offset = std.mem.alignForwardGeneric(u64, offset, len_align); offset = std.mem.alignForward(u64, offset, len_align);
const len_offset = offset; const len_offset = offset;
const fields: [2]*llvm.DIType = .{ const fields: [2]*llvm.DIType = .{
@ -1801,7 +1801,7 @@ pub const Object = struct {
var offset: u64 = 0; var offset: u64 = 0;
offset += payload_size; offset += payload_size;
offset = std.mem.alignForwardGeneric(u64, offset, non_null_align); offset = std.mem.alignForward(u64, offset, non_null_align);
const non_null_offset = offset; const non_null_offset = offset;
const fields: [2]*llvm.DIType = .{ const fields: [2]*llvm.DIType = .{
@ -1888,12 +1888,12 @@ pub const Object = struct {
error_index = 0; error_index = 0;
payload_index = 1; payload_index = 1;
error_offset = 0; error_offset = 0;
payload_offset = std.mem.alignForwardGeneric(u64, error_size, payload_align); payload_offset = std.mem.alignForward(u64, error_size, payload_align);
} else { } else {
payload_index = 0; payload_index = 0;
error_index = 1; error_index = 1;
payload_offset = 0; payload_offset = 0;
error_offset = std.mem.alignForwardGeneric(u64, payload_size, error_align); error_offset = std.mem.alignForward(u64, payload_size, error_align);
} }
var fields: [2]*llvm.DIType = undefined; var fields: [2]*llvm.DIType = undefined;
@ -1995,7 +1995,7 @@ pub const Object = struct {
const field_size = field_ty.toType().abiSize(mod); const field_size = field_ty.toType().abiSize(mod);
const field_align = field_ty.toType().abiAlignment(mod); const field_align = field_ty.toType().abiAlignment(mod);
const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); const field_offset = std.mem.alignForward(u64, offset, field_align);
offset = field_offset + field_size; offset = field_offset + field_size;
const field_name = if (tuple.names.len != 0) const field_name = if (tuple.names.len != 0)
@ -2086,7 +2086,7 @@ pub const Object = struct {
const field = field_and_index.field; const field = field_and_index.field;
const field_size = field.ty.abiSize(mod); const field_size = field.ty.abiSize(mod);
const field_align = field.alignment(mod, layout); const field_align = field.alignment(mod, layout);
const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); const field_offset = std.mem.alignForward(u64, offset, field_align);
offset = field_offset + field_size; offset = field_offset + field_size;
const field_name = mod.intern_pool.stringToSlice(fields.keys()[field_and_index.index]); const field_name = mod.intern_pool.stringToSlice(fields.keys()[field_and_index.index]);
@ -2242,10 +2242,10 @@ pub const Object = struct {
var payload_offset: u64 = undefined; var payload_offset: u64 = undefined;
if (layout.tag_align >= layout.payload_align) { if (layout.tag_align >= layout.payload_align) {
tag_offset = 0; tag_offset = 0;
payload_offset = std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); payload_offset = std.mem.alignForward(u64, layout.tag_size, layout.payload_align);
} else { } else {
payload_offset = 0; payload_offset = 0;
tag_offset = std.mem.alignForwardGeneric(u64, layout.payload_size, layout.tag_align); tag_offset = std.mem.alignForward(u64, layout.payload_size, layout.tag_align);
} }
const tag_di = dib.createMemberType( const tag_di = dib.createMemberType(
@ -2861,9 +2861,9 @@ pub const DeclGen = struct {
fields_buf[0] = llvm_error_type; fields_buf[0] = llvm_error_type;
fields_buf[1] = llvm_payload_type; fields_buf[1] = llvm_payload_type;
const payload_end = const payload_end =
std.mem.alignForwardGeneric(u64, error_size, payload_align) + std.mem.alignForward(u64, error_size, payload_align) +
payload_size; payload_size;
const abi_size = std.mem.alignForwardGeneric(u64, payload_end, error_align); const abi_size = std.mem.alignForward(u64, payload_end, error_align);
const padding = @intCast(c_uint, abi_size - payload_end); const padding = @intCast(c_uint, abi_size - payload_end);
if (padding == 0) { if (padding == 0) {
return dg.context.structType(&fields_buf, 2, .False); return dg.context.structType(&fields_buf, 2, .False);
@ -2874,9 +2874,9 @@ pub const DeclGen = struct {
fields_buf[0] = llvm_payload_type; fields_buf[0] = llvm_payload_type;
fields_buf[1] = llvm_error_type; fields_buf[1] = llvm_error_type;
const error_end = const error_end =
std.mem.alignForwardGeneric(u64, payload_size, error_align) + std.mem.alignForward(u64, payload_size, error_align) +
error_size; error_size;
const abi_size = std.mem.alignForwardGeneric(u64, error_end, payload_align); const abi_size = std.mem.alignForward(u64, error_end, payload_align);
const padding = @intCast(c_uint, abi_size - error_end); const padding = @intCast(c_uint, abi_size - error_end);
if (padding == 0) { if (padding == 0) {
return dg.context.structType(&fields_buf, 2, .False); return dg.context.structType(&fields_buf, 2, .False);
@ -2910,7 +2910,7 @@ pub const DeclGen = struct {
const field_align = field_ty.toType().abiAlignment(mod); const field_align = field_ty.toType().abiAlignment(mod);
big_align = @max(big_align, field_align); big_align = @max(big_align, field_align);
const prev_offset = offset; const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align); offset = std.mem.alignForward(u64, offset, field_align);
const padding_len = offset - prev_offset; const padding_len = offset - prev_offset;
if (padding_len > 0) { if (padding_len > 0) {
@ -2924,7 +2924,7 @@ pub const DeclGen = struct {
} }
{ {
const prev_offset = offset; const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, big_align); offset = std.mem.alignForward(u64, offset, big_align);
const padding_len = offset - prev_offset; const padding_len = offset - prev_offset;
if (padding_len > 0) { if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
@ -2979,7 +2979,7 @@ pub const DeclGen = struct {
field_align < field_ty_align; field_align < field_ty_align;
big_align = @max(big_align, field_align); big_align = @max(big_align, field_align);
const prev_offset = offset; const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align); offset = std.mem.alignForward(u64, offset, field_align);
const padding_len = offset - prev_offset; const padding_len = offset - prev_offset;
if (padding_len > 0) { if (padding_len > 0) {
@ -2993,7 +2993,7 @@ pub const DeclGen = struct {
} }
{ {
const prev_offset = offset; const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, big_align); offset = std.mem.alignForward(u64, offset, big_align);
const padding_len = offset - prev_offset; const padding_len = offset - prev_offset;
if (padding_len > 0) { if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
@ -3552,7 +3552,7 @@ pub const DeclGen = struct {
const field_align = field_ty.toType().abiAlignment(mod); const field_align = field_ty.toType().abiAlignment(mod);
big_align = @max(big_align, field_align); big_align = @max(big_align, field_align);
const prev_offset = offset; const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align); offset = std.mem.alignForward(u64, offset, field_align);
const padding_len = offset - prev_offset; const padding_len = offset - prev_offset;
if (padding_len > 0) { if (padding_len > 0) {
@ -3575,7 +3575,7 @@ pub const DeclGen = struct {
} }
{ {
const prev_offset = offset; const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, big_align); offset = std.mem.alignForward(u64, offset, big_align);
const padding_len = offset - prev_offset; const padding_len = offset - prev_offset;
if (padding_len > 0) { if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
@ -3650,7 +3650,7 @@ pub const DeclGen = struct {
const field_align = field.alignment(mod, struct_obj.layout); const field_align = field.alignment(mod, struct_obj.layout);
big_align = @max(big_align, field_align); big_align = @max(big_align, field_align);
const prev_offset = offset; const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align); offset = std.mem.alignForward(u64, offset, field_align);
const padding_len = offset - prev_offset; const padding_len = offset - prev_offset;
if (padding_len > 0) { if (padding_len > 0) {
@ -3673,7 +3673,7 @@ pub const DeclGen = struct {
} }
{ {
const prev_offset = offset; const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, big_align); offset = std.mem.alignForward(u64, offset, big_align);
const padding_len = offset - prev_offset; const padding_len = offset - prev_offset;
if (padding_len > 0) { if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
@ -10274,7 +10274,7 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField {
const field_align = field_ty.toType().abiAlignment(mod); const field_align = field_ty.toType().abiAlignment(mod);
big_align = @max(big_align, field_align); big_align = @max(big_align, field_align);
const prev_offset = offset; const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align); offset = std.mem.alignForward(u64, offset, field_align);
const padding_len = offset - prev_offset; const padding_len = offset - prev_offset;
if (padding_len > 0) { if (padding_len > 0) {
@ -10308,7 +10308,7 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField {
const field_align = field.alignment(mod, layout); const field_align = field.alignment(mod, layout);
big_align = @max(big_align, field_align); big_align = @max(big_align, field_align);
const prev_offset = offset; const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align); offset = std.mem.alignForward(u64, offset, field_align);
const padding_len = offset - prev_offset; const padding_len = offset - prev_offset;
if (padding_len > 0) { if (padding_len > 0) {

View File

@ -472,12 +472,12 @@ pub const DeclGen = struct {
try self.initializers.append(result_id); try self.initializers.append(result_id);
self.partial_word.len = 0; self.partial_word.len = 0;
self.size = std.mem.alignForwardGeneric(u32, self.size, @sizeOf(Word)); self.size = std.mem.alignForward(u32, self.size, @sizeOf(Word));
} }
/// Fill the buffer with undefined values until the size is aligned to `align`. /// Fill the buffer with undefined values until the size is aligned to `align`.
fn fillToAlign(self: *@This(), alignment: u32) !void { fn fillToAlign(self: *@This(), alignment: u32) !void {
const target_size = std.mem.alignForwardGeneric(u32, self.size, alignment); const target_size = std.mem.alignForward(u32, self.size, alignment);
try self.addUndef(target_size - self.size); try self.addUndef(target_size - self.size);
} }

View File

@ -437,10 +437,10 @@ fn allocateSection(self: *Coff, name: []const u8, size: u32, flags: coff.Section
const vaddr = blk: { const vaddr = blk: {
if (index == 0) break :blk self.page_size; if (index == 0) break :blk self.page_size;
const prev_header = self.sections.items(.header)[index - 1]; const prev_header = self.sections.items(.header)[index - 1];
break :blk mem.alignForwardGeneric(u32, prev_header.virtual_address + prev_header.virtual_size, self.page_size); break :blk mem.alignForward(u32, prev_header.virtual_address + prev_header.virtual_size, self.page_size);
}; };
// We commit more memory than needed upfront so that we don't have to reallocate too soon. // We commit more memory than needed upfront so that we don't have to reallocate too soon.
const memsz = mem.alignForwardGeneric(u32, size, self.page_size) * 100; const memsz = mem.alignForward(u32, size, self.page_size) * 100;
log.debug("found {s} free space 0x{x} to 0x{x} (0x{x} - 0x{x})", .{ log.debug("found {s} free space 0x{x} to 0x{x} (0x{x} - 0x{x})", .{
name, name,
off, off,
@ -505,8 +505,8 @@ fn growSection(self: *Coff, sect_id: u32, needed_size: u32) !void {
fn growSectionVirtualMemory(self: *Coff, sect_id: u32, needed_size: u32) !void { fn growSectionVirtualMemory(self: *Coff, sect_id: u32, needed_size: u32) !void {
const header = &self.sections.items(.header)[sect_id]; const header = &self.sections.items(.header)[sect_id];
const increased_size = padToIdeal(needed_size); const increased_size = padToIdeal(needed_size);
const old_aligned_end = header.virtual_address + mem.alignForwardGeneric(u32, header.virtual_size, self.page_size); const old_aligned_end = header.virtual_address + mem.alignForward(u32, header.virtual_size, self.page_size);
const new_aligned_end = header.virtual_address + mem.alignForwardGeneric(u32, increased_size, self.page_size); const new_aligned_end = header.virtual_address + mem.alignForward(u32, increased_size, self.page_size);
const diff = new_aligned_end - old_aligned_end; const diff = new_aligned_end - old_aligned_end;
log.debug("growing {s} in virtual memory by {x}", .{ self.getSectionName(header), diff }); log.debug("growing {s} in virtual memory by {x}", .{ self.getSectionName(header), diff });
@ -567,7 +567,7 @@ fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignme
const ideal_capacity_end_vaddr = math.add(u32, sym.value, ideal_capacity) catch ideal_capacity; const ideal_capacity_end_vaddr = math.add(u32, sym.value, ideal_capacity) catch ideal_capacity;
const capacity_end_vaddr = sym.value + capacity; const capacity_end_vaddr = sym.value + capacity;
const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
const new_start_vaddr = mem.alignBackwardGeneric(u32, new_start_vaddr_unaligned, alignment); const new_start_vaddr = mem.alignBackward(u32, new_start_vaddr_unaligned, alignment);
if (new_start_vaddr < ideal_capacity_end_vaddr) { if (new_start_vaddr < ideal_capacity_end_vaddr) {
// Additional bookkeeping here to notice if this free list node // Additional bookkeeping here to notice if this free list node
// should be deleted because the atom that it points to has grown to take up // should be deleted because the atom that it points to has grown to take up
@ -596,11 +596,11 @@ fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignme
const last_symbol = last.getSymbol(self); const last_symbol = last.getSymbol(self);
const ideal_capacity = if (header.isCode()) padToIdeal(last.size) else last.size; const ideal_capacity = if (header.isCode()) padToIdeal(last.size) else last.size;
const ideal_capacity_end_vaddr = last_symbol.value + ideal_capacity; const ideal_capacity_end_vaddr = last_symbol.value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u32, ideal_capacity_end_vaddr, alignment); const new_start_vaddr = mem.alignForward(u32, ideal_capacity_end_vaddr, alignment);
atom_placement = last_index; atom_placement = last_index;
break :blk new_start_vaddr; break :blk new_start_vaddr;
} else { } else {
break :blk mem.alignForwardGeneric(u32, header.virtual_address, alignment); break :blk mem.alignForward(u32, header.virtual_address, alignment);
} }
}; };
@ -722,7 +722,7 @@ pub fn createAtom(self: *Coff) !Atom.Index {
fn growAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 { fn growAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
const atom = self.getAtom(atom_index); const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self); const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u32, sym.value, alignment) == sym.value; const align_ok = mem.alignBackward(u32, sym.value, alignment) == sym.value;
const need_realloc = !align_ok or new_atom_size > atom.capacity(self); const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
if (!need_realloc) return sym.value; if (!need_realloc) return sym.value;
return self.allocateAtom(atom_index, new_atom_size, alignment); return self.allocateAtom(atom_index, new_atom_size, alignment);
@ -1798,7 +1798,7 @@ fn writeBaseRelocations(self: *Coff) !void {
for (offsets.items) |offset| { for (offsets.items) |offset| {
const rva = sym.value + offset; const rva = sym.value + offset;
const page = mem.alignBackwardGeneric(u32, rva, self.page_size); const page = mem.alignBackward(u32, rva, self.page_size);
const gop = try page_table.getOrPut(page); const gop = try page_table.getOrPut(page);
if (!gop.found_existing) { if (!gop.found_existing) {
gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa); gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa);
@ -1819,7 +1819,7 @@ fn writeBaseRelocations(self: *Coff) !void {
if (sym.section_number == .UNDEFINED) continue; if (sym.section_number == .UNDEFINED) continue;
const rva = @intCast(u32, header.virtual_address + index * self.ptr_width.size()); const rva = @intCast(u32, header.virtual_address + index * self.ptr_width.size());
const page = mem.alignBackwardGeneric(u32, rva, self.page_size); const page = mem.alignBackward(u32, rva, self.page_size);
const gop = try page_table.getOrPut(page); const gop = try page_table.getOrPut(page);
if (!gop.found_existing) { if (!gop.found_existing) {
gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa); gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa);
@ -1907,7 +1907,7 @@ fn writeImportTables(self: *Coff) !void {
lookup_table_size += @intCast(u32, itable.entries.items.len + 1) * @sizeOf(coff.ImportLookupEntry64.ByName); lookup_table_size += @intCast(u32, itable.entries.items.len + 1) * @sizeOf(coff.ImportLookupEntry64.ByName);
for (itable.entries.items) |entry| { for (itable.entries.items) |entry| {
const sym_name = self.getSymbolName(entry); const sym_name = self.getSymbolName(entry);
names_table_size += 2 + mem.alignForwardGeneric(u32, @intCast(u32, sym_name.len + 1), 2); names_table_size += 2 + mem.alignForward(u32, @intCast(u32, sym_name.len + 1), 2);
} }
dll_names_size += @intCast(u32, lib_name.len + ext.len + 1); dll_names_size += @intCast(u32, lib_name.len + ext.len + 1);
} }
@ -2102,7 +2102,7 @@ fn writeHeader(self: *Coff) !void {
}; };
const subsystem: coff.Subsystem = .WINDOWS_CUI; const subsystem: coff.Subsystem = .WINDOWS_CUI;
const size_of_image: u32 = self.getSizeOfImage(); const size_of_image: u32 = self.getSizeOfImage();
const size_of_headers: u32 = mem.alignForwardGeneric(u32, self.getSizeOfHeaders(), default_file_alignment); const size_of_headers: u32 = mem.alignForward(u32, self.getSizeOfHeaders(), default_file_alignment);
const image_base = self.getImageBase(); const image_base = self.getImageBase();
const base_of_code = self.sections.get(self.text_section_index.?).header.virtual_address; const base_of_code = self.sections.get(self.text_section_index.?).header.virtual_address;
@ -2247,7 +2247,7 @@ fn allocatedSize(self: *Coff, start: u32) u32 {
fn findFreeSpace(self: *Coff, object_size: u32, min_alignment: u32) u32 { fn findFreeSpace(self: *Coff, object_size: u32, min_alignment: u32) u32 {
var start: u32 = 0; var start: u32 = 0;
while (self.detectAllocCollision(start, object_size)) |item_end| { while (self.detectAllocCollision(start, object_size)) |item_end| {
start = mem.alignForwardGeneric(u32, item_end, min_alignment); start = mem.alignForward(u32, item_end, min_alignment);
} }
return start; return start;
} }
@ -2294,9 +2294,9 @@ inline fn getSectionHeadersOffset(self: Coff) u32 {
} }
inline fn getSizeOfImage(self: Coff) u32 { inline fn getSizeOfImage(self: Coff) u32 {
var image_size: u32 = mem.alignForwardGeneric(u32, self.getSizeOfHeaders(), self.page_size); var image_size: u32 = mem.alignForward(u32, self.getSizeOfHeaders(), self.page_size);
for (self.sections.items(.header)) |header| { for (self.sections.items(.header)) |header| {
image_size += mem.alignForwardGeneric(u32, header.virtual_size, self.page_size); image_size += mem.alignForward(u32, header.virtual_size, self.page_size);
} }
return image_size; return image_size;
} }

View File

@ -2152,7 +2152,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void {
di_buf.appendAssumeCapacity(0); // segment_selector_size di_buf.appendAssumeCapacity(0); // segment_selector_size
const end_header_offset = di_buf.items.len; const end_header_offset = di_buf.items.len;
const begin_entries_offset = mem.alignForward(end_header_offset, ptr_width_bytes * 2); const begin_entries_offset = mem.alignForward(usize, end_header_offset, ptr_width_bytes * 2);
di_buf.appendNTimesAssumeCapacity(0, begin_entries_offset - end_header_offset); di_buf.appendNTimesAssumeCapacity(0, begin_entries_offset - end_header_offset);
// Currently only one compilation unit is supported, so the address range is simply // Currently only one compilation unit is supported, so the address range is simply

View File

@ -439,7 +439,7 @@ pub fn allocatedSize(self: *Elf, start: u64) u64 {
pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u32) u64 { pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u32) u64 {
var start: u64 = 0; var start: u64 = 0;
while (self.detectAllocCollision(start, object_size)) |item_end| { while (self.detectAllocCollision(start, object_size)) |item_end| {
start = mem.alignForwardGeneric(u64, item_end, min_alignment); start = mem.alignForward(u64, item_end, min_alignment);
} }
return start; return start;
} }
@ -1173,7 +1173,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
phdr_table.p_offset = self.findFreeSpace(needed_size, @intCast(u32, phdr_table.p_align)); phdr_table.p_offset = self.findFreeSpace(needed_size, @intCast(u32, phdr_table.p_align));
} }
phdr_table_load.p_offset = mem.alignBackwardGeneric(u64, phdr_table.p_offset, phdr_table_load.p_align); phdr_table_load.p_offset = mem.alignBackward(u64, phdr_table.p_offset, phdr_table_load.p_align);
const load_align_offset = phdr_table.p_offset - phdr_table_load.p_offset; const load_align_offset = phdr_table.p_offset - phdr_table_load.p_offset;
phdr_table_load.p_filesz = load_align_offset + needed_size; phdr_table_load.p_filesz = load_align_offset + needed_size;
phdr_table_load.p_memsz = load_align_offset + needed_size; phdr_table_load.p_memsz = load_align_offset + needed_size;
@ -2215,7 +2215,7 @@ fn shrinkAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64) void {
fn growAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment: u64) !u64 { fn growAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment: u64) !u64 {
const atom = self.getAtom(atom_index); const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self); const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u64, sym.st_value, alignment) == sym.st_value; const align_ok = mem.alignBackward(u64, sym.st_value, alignment) == sym.st_value;
const need_realloc = !align_ok or new_block_size > atom.capacity(self); const need_realloc = !align_ok or new_block_size > atom.capacity(self);
if (!need_realloc) return sym.st_value; if (!need_realloc) return sym.st_value;
return self.allocateAtom(atom_index, new_block_size, alignment); return self.allocateAtom(atom_index, new_block_size, alignment);
@ -2269,7 +2269,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme
const ideal_capacity_end_vaddr = std.math.add(u64, big_atom_sym.st_value, ideal_capacity) catch ideal_capacity; const ideal_capacity_end_vaddr = std.math.add(u64, big_atom_sym.st_value, ideal_capacity) catch ideal_capacity;
const capacity_end_vaddr = big_atom_sym.st_value + capacity; const capacity_end_vaddr = big_atom_sym.st_value + capacity;
const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment); const new_start_vaddr = mem.alignBackward(u64, new_start_vaddr_unaligned, alignment);
if (new_start_vaddr < ideal_capacity_end_vaddr) { if (new_start_vaddr < ideal_capacity_end_vaddr) {
// Additional bookkeeping here to notice if this free list node // Additional bookkeeping here to notice if this free list node
// should be deleted because the block that it points to has grown to take up // should be deleted because the block that it points to has grown to take up
@ -2298,7 +2298,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme
const last_sym = last.getSymbol(self); const last_sym = last.getSymbol(self);
const ideal_capacity = padToIdeal(last_sym.st_size); const ideal_capacity = padToIdeal(last_sym.st_size);
const ideal_capacity_end_vaddr = last_sym.st_value + ideal_capacity; const ideal_capacity_end_vaddr = last_sym.st_value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment); const new_start_vaddr = mem.alignForward(u64, ideal_capacity_end_vaddr, alignment);
// Set up the metadata to be updated, after errors are no longer possible. // Set up the metadata to be updated, after errors are no longer possible.
atom_placement = last_index; atom_placement = last_index;
break :blk new_start_vaddr; break :blk new_start_vaddr;

View File

@ -1777,7 +1777,7 @@ fn shrinkAtom(self: *MachO, atom_index: Atom.Index, new_block_size: u64) void {
fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 { fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
const atom = self.getAtom(atom_index); const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self); const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u64, sym.n_value, alignment) == sym.n_value; const align_ok = mem.alignBackward(u64, sym.n_value, alignment) == sym.n_value;
const need_realloc = !align_ok or new_atom_size > atom.capacity(self); const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
if (!need_realloc) return sym.n_value; if (!need_realloc) return sym.n_value;
return self.allocateAtom(atom_index, new_atom_size, alignment); return self.allocateAtom(atom_index, new_atom_size, alignment);
@ -2598,7 +2598,7 @@ fn populateMissingMetadata(self: *MachO) !void {
// The first __TEXT segment is immovable and covers MachO header and load commands. // The first __TEXT segment is immovable and covers MachO header and load commands.
self.header_segment_cmd_index = @intCast(u8, self.segments.items.len); self.header_segment_cmd_index = @intCast(u8, self.segments.items.len);
const ideal_size = @max(self.base.options.headerpad_size orelse 0, default_headerpad_size); const ideal_size = @max(self.base.options.headerpad_size orelse 0, default_headerpad_size);
const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.page_size); const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size);
log.debug("found __TEXT segment (header-only) free space 0x{x} to 0x{x}", .{ 0, needed_size }); log.debug("found __TEXT segment (header-only) free space 0x{x} to 0x{x}", .{ 0, needed_size });
@ -2735,7 +2735,7 @@ fn populateMissingMetadata(self: *MachO) !void {
fn calcPagezeroSize(self: *MachO) u64 { fn calcPagezeroSize(self: *MachO) u64 {
const pagezero_vmsize = self.base.options.pagezero_size orelse default_pagezero_vmsize; const pagezero_vmsize = self.base.options.pagezero_size orelse default_pagezero_vmsize;
const aligned_pagezero_vmsize = mem.alignBackwardGeneric(u64, pagezero_vmsize, self.page_size); const aligned_pagezero_vmsize = mem.alignBackward(u64, pagezero_vmsize, self.page_size);
if (self.base.options.output_mode == .Lib) return 0; if (self.base.options.output_mode == .Lib) return 0;
if (aligned_pagezero_vmsize == 0) return 0; if (aligned_pagezero_vmsize == 0) return 0;
if (aligned_pagezero_vmsize != pagezero_vmsize) { if (aligned_pagezero_vmsize != pagezero_vmsize) {
@ -2759,10 +2759,10 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts
const section_id = @intCast(u8, self.sections.slice().len); const section_id = @intCast(u8, self.sections.slice().len);
const vmaddr = blk: { const vmaddr = blk: {
const prev_segment = self.segments.items[segment_id - 1]; const prev_segment = self.segments.items[segment_id - 1];
break :blk mem.alignForwardGeneric(u64, prev_segment.vmaddr + prev_segment.vmsize, self.page_size); break :blk mem.alignForward(u64, prev_segment.vmaddr + prev_segment.vmsize, self.page_size);
}; };
// We commit more memory than needed upfront so that we don't have to reallocate too soon. // We commit more memory than needed upfront so that we don't have to reallocate too soon.
const vmsize = mem.alignForwardGeneric(u64, opts.size, self.page_size); const vmsize = mem.alignForward(u64, opts.size, self.page_size);
const off = self.findFreeSpace(opts.size, self.page_size); const off = self.findFreeSpace(opts.size, self.page_size);
log.debug("found {s},{s} free space 0x{x} to 0x{x} (0x{x} - 0x{x})", .{ log.debug("found {s},{s} free space 0x{x} to 0x{x} (0x{x} - 0x{x})", .{
@ -2790,8 +2790,8 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts
var section = macho.section_64{ var section = macho.section_64{
.sectname = makeStaticString(sectname), .sectname = makeStaticString(sectname),
.segname = makeStaticString(segname), .segname = makeStaticString(segname),
.addr = mem.alignForwardGeneric(u64, vmaddr, opts.alignment), .addr = mem.alignForward(u64, vmaddr, opts.alignment),
.offset = mem.alignForwardGeneric(u32, @intCast(u32, off), opts.alignment), .offset = mem.alignForward(u32, @intCast(u32, off), opts.alignment),
.size = opts.size, .size = opts.size,
.@"align" = math.log2(opts.alignment), .@"align" = math.log2(opts.alignment),
.flags = opts.flags, .flags = opts.flags,
@ -2846,8 +2846,8 @@ fn growSection(self: *MachO, sect_id: u8, needed_size: u64) !void {
} }
header.size = needed_size; header.size = needed_size;
segment.filesize = mem.alignForwardGeneric(u64, needed_size, self.page_size); segment.filesize = mem.alignForward(u64, needed_size, self.page_size);
segment.vmsize = mem.alignForwardGeneric(u64, needed_size, self.page_size); segment.vmsize = mem.alignForward(u64, needed_size, self.page_size);
} }
fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void { fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void {
@ -2855,7 +2855,7 @@ fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void {
const segment = self.getSegmentPtr(sect_id); const segment = self.getSegmentPtr(sect_id);
const increased_size = padToIdeal(needed_size); const increased_size = padToIdeal(needed_size);
const old_aligned_end = segment.vmaddr + segment.vmsize; const old_aligned_end = segment.vmaddr + segment.vmsize;
const new_aligned_end = segment.vmaddr + mem.alignForwardGeneric(u64, increased_size, self.page_size); const new_aligned_end = segment.vmaddr + mem.alignForward(u64, increased_size, self.page_size);
const diff = new_aligned_end - old_aligned_end; const diff = new_aligned_end - old_aligned_end;
log.debug("shifting every segment after {s},{s} in virtual memory by {x}", .{ log.debug("shifting every segment after {s},{s} in virtual memory by {x}", .{
header.segName(), header.segName(),
@ -2927,7 +2927,7 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm
const ideal_capacity_end_vaddr = math.add(u64, sym.n_value, ideal_capacity) catch ideal_capacity; const ideal_capacity_end_vaddr = math.add(u64, sym.n_value, ideal_capacity) catch ideal_capacity;
const capacity_end_vaddr = sym.n_value + capacity; const capacity_end_vaddr = sym.n_value + capacity;
const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment); const new_start_vaddr = mem.alignBackward(u64, new_start_vaddr_unaligned, alignment);
if (new_start_vaddr < ideal_capacity_end_vaddr) { if (new_start_vaddr < ideal_capacity_end_vaddr) {
// Additional bookkeeping here to notice if this free list node // Additional bookkeeping here to notice if this free list node
// should be deleted because the atom that it points to has grown to take up // should be deleted because the atom that it points to has grown to take up
@ -2956,11 +2956,11 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm
const last_symbol = last.getSymbol(self); const last_symbol = last.getSymbol(self);
const ideal_capacity = if (requires_padding) padToIdeal(last.size) else last.size; const ideal_capacity = if (requires_padding) padToIdeal(last.size) else last.size;
const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity; const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment); const new_start_vaddr = mem.alignForward(u64, ideal_capacity_end_vaddr, alignment);
atom_placement = last_index; atom_placement = last_index;
break :blk new_start_vaddr; break :blk new_start_vaddr;
} else { } else {
break :blk mem.alignForwardGeneric(u64, segment.vmaddr, alignment); break :blk mem.alignForward(u64, segment.vmaddr, alignment);
} }
}; };
@ -3034,17 +3034,17 @@ fn writeLinkeditSegmentData(self: *MachO) !void {
for (self.segments.items, 0..) |segment, id| { for (self.segments.items, 0..) |segment, id| {
if (self.linkedit_segment_cmd_index.? == @intCast(u8, id)) continue; if (self.linkedit_segment_cmd_index.? == @intCast(u8, id)) continue;
if (seg.vmaddr < segment.vmaddr + segment.vmsize) { if (seg.vmaddr < segment.vmaddr + segment.vmsize) {
seg.vmaddr = mem.alignForwardGeneric(u64, segment.vmaddr + segment.vmsize, self.page_size); seg.vmaddr = mem.alignForward(u64, segment.vmaddr + segment.vmsize, self.page_size);
} }
if (seg.fileoff < segment.fileoff + segment.filesize) { if (seg.fileoff < segment.fileoff + segment.filesize) {
seg.fileoff = mem.alignForwardGeneric(u64, segment.fileoff + segment.filesize, self.page_size); seg.fileoff = mem.alignForward(u64, segment.fileoff + segment.filesize, self.page_size);
} }
} }
try self.writeDyldInfoData(); try self.writeDyldInfoData();
try self.writeSymtabs(); try self.writeSymtabs();
seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size);
} }
fn collectRebaseDataFromTableSection(self: *MachO, sect_id: u8, rebase: *Rebase, table: anytype) !void { fn collectRebaseDataFromTableSection(self: *MachO, sect_id: u8, rebase: *Rebase, table: anytype) !void {
@ -3236,17 +3236,17 @@ fn writeDyldInfoData(self: *MachO) !void {
assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64))); assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64)));
const rebase_off = link_seg.fileoff; const rebase_off = link_seg.fileoff;
const rebase_size = rebase.size(); const rebase_size = rebase.size();
const rebase_size_aligned = mem.alignForwardGeneric(u64, rebase_size, @alignOf(u64)); const rebase_size_aligned = mem.alignForward(u64, rebase_size, @alignOf(u64));
log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned }); log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned });
const bind_off = rebase_off + rebase_size_aligned; const bind_off = rebase_off + rebase_size_aligned;
const bind_size = bind.size(); const bind_size = bind.size();
const bind_size_aligned = mem.alignForwardGeneric(u64, bind_size, @alignOf(u64)); const bind_size_aligned = mem.alignForward(u64, bind_size, @alignOf(u64));
log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned }); log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned });
const lazy_bind_off = bind_off + bind_size_aligned; const lazy_bind_off = bind_off + bind_size_aligned;
const lazy_bind_size = lazy_bind.size(); const lazy_bind_size = lazy_bind.size();
const lazy_bind_size_aligned = mem.alignForwardGeneric(u64, lazy_bind_size, @alignOf(u64)); const lazy_bind_size_aligned = mem.alignForward(u64, lazy_bind_size, @alignOf(u64));
log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{ log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{
lazy_bind_off, lazy_bind_off,
lazy_bind_off + lazy_bind_size_aligned, lazy_bind_off + lazy_bind_size_aligned,
@ -3254,7 +3254,7 @@ fn writeDyldInfoData(self: *MachO) !void {
const export_off = lazy_bind_off + lazy_bind_size_aligned; const export_off = lazy_bind_off + lazy_bind_size_aligned;
const export_size = trie.size; const export_size = trie.size;
const export_size_aligned = mem.alignForwardGeneric(u64, export_size, @alignOf(u64)); const export_size_aligned = mem.alignForward(u64, export_size, @alignOf(u64));
log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned }); log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned });
const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse
@ -3412,7 +3412,7 @@ fn writeStrtab(self: *MachO) !void {
const offset = seg.fileoff + seg.filesize; const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = self.strtab.buffer.items.len; const needed_size = self.strtab.buffer.items.len;
const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff; seg.filesize = offset + needed_size_aligned - seg.fileoff;
log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
@ -3447,7 +3447,7 @@ fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void {
const offset = seg.fileoff + seg.filesize; const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = nindirectsyms * @sizeOf(u32); const needed_size = nindirectsyms * @sizeOf(u32);
const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff; seg.filesize = offset + needed_size_aligned - seg.fileoff;
log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
@ -3514,10 +3514,10 @@ fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void {
const seg = self.getLinkeditSegmentPtr(); const seg = self.getLinkeditSegmentPtr();
// Code signature data has to be 16-bytes aligned for Apple tools to recognize the file // Code signature data has to be 16-bytes aligned for Apple tools to recognize the file
// https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271 // https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271
const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, 16); const offset = mem.alignForward(u64, seg.fileoff + seg.filesize, 16);
const needed_size = code_sig.estimateSize(offset); const needed_size = code_sig.estimateSize(offset);
seg.filesize = offset + needed_size - seg.fileoff; seg.filesize = offset + needed_size - seg.fileoff;
seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size);
log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
// Pad out the space. We need to do this to calculate valid hashes for everything in the file // Pad out the space. We need to do this to calculate valid hashes for everything in the file
// except for code signature data. // except for code signature data.
@ -3630,7 +3630,7 @@ fn allocatedSize(self: *MachO, start: u64) u64 {
fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u32) u64 { fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u32) u64 {
var start: u64 = 0; var start: u64 = 0;
while (self.detectAllocCollision(start, object_size)) |item_end| { while (self.detectAllocCollision(start, object_size)) |item_end| {
start = mem.alignForwardGeneric(u64, item_end, min_alignment); start = mem.alignForward(u64, item_end, min_alignment);
} }
return start; return start;
} }

View File

@ -282,7 +282,7 @@ pub fn writeAdhocSignature(
self.code_directory.inner.execSegFlags = if (opts.output_mode == .Exe) macho.CS_EXECSEG_MAIN_BINARY else 0; self.code_directory.inner.execSegFlags = if (opts.output_mode == .Exe) macho.CS_EXECSEG_MAIN_BINARY else 0;
self.code_directory.inner.codeLimit = opts.file_size; self.code_directory.inner.codeLimit = opts.file_size;
const total_pages = @intCast(u32, mem.alignForward(opts.file_size, self.page_size) / self.page_size); const total_pages = @intCast(u32, mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size);
try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages); try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages);
self.code_directory.code_slots.items.len = total_pages; self.code_directory.code_slots.items.len = total_pages;
@ -357,7 +357,7 @@ fn parallelHash(
) !void { ) !void {
var wg: WaitGroup = .{}; var wg: WaitGroup = .{};
const total_num_chunks = mem.alignForward(file_size, self.page_size) / self.page_size; const total_num_chunks = mem.alignForward(usize, file_size, self.page_size) / self.page_size;
assert(self.code_directory.code_slots.items.len >= total_num_chunks); assert(self.code_directory.code_slots.items.len >= total_num_chunks);
const buffer = try gpa.alloc(u8, self.page_size * total_num_chunks); const buffer = try gpa.alloc(u8, self.page_size * total_num_chunks);
@ -421,7 +421,7 @@ pub fn size(self: CodeSignature) u32 {
pub fn estimateSize(self: CodeSignature, file_size: u64) u32 { pub fn estimateSize(self: CodeSignature, file_size: u64) u32 {
var ssize: u64 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) + self.code_directory.size(); var ssize: u64 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) + self.code_directory.size();
// Approx code slots // Approx code slots
const total_pages = mem.alignForwardGeneric(u64, file_size, self.page_size) / self.page_size; const total_pages = mem.alignForward(u64, file_size, self.page_size) / self.page_size;
ssize += total_pages * hash_size; ssize += total_pages * hash_size;
var n_special_slots: u32 = 0; var n_special_slots: u32 = 0;
if (self.requirements) |req| { if (self.requirements) |req| {
@ -436,7 +436,7 @@ pub fn estimateSize(self: CodeSignature, file_size: u64) u32 {
ssize += @sizeOf(macho.BlobIndex) + sig.size(); ssize += @sizeOf(macho.BlobIndex) + sig.size();
} }
ssize += n_special_slots * hash_size; ssize += n_special_slots * hash_size;
return @intCast(u32, mem.alignForwardGeneric(u64, ssize, @sizeOf(u64))); return @intCast(u32, mem.alignForward(u64, ssize, @sizeOf(u64)));
} }
pub fn clear(self: *CodeSignature, allocator: Allocator) void { pub fn clear(self: *CodeSignature, allocator: Allocator) void {

View File

@ -68,7 +68,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void {
const off = @intCast(u64, self.page_size); const off = @intCast(u64, self.page_size);
const ideal_size: u16 = 200 + 128 + 160 + 250; const ideal_size: u16 = 200 + 128 + 160 + 250;
const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.page_size); const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size);
log.debug("found __DWARF segment free space 0x{x} to 0x{x}", .{ off, off + needed_size }); log.debug("found __DWARF segment free space 0x{x} to 0x{x}", .{ off, off + needed_size });
@ -213,7 +213,7 @@ fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) u64
const segment = self.getDwarfSegmentPtr(); const segment = self.getDwarfSegmentPtr();
var offset: u64 = segment.fileoff; var offset: u64 = segment.fileoff;
while (self.detectAllocCollision(offset, object_size)) |item_end| { while (self.detectAllocCollision(offset, object_size)) |item_end| {
offset = mem.alignForwardGeneric(u64, item_end, min_alignment); offset = mem.alignForward(u64, item_end, min_alignment);
} }
return offset; return offset;
} }
@ -355,18 +355,18 @@ fn finalizeDwarfSegment(self: *DebugSymbols, macho_file: *MachO) void {
file_size = @max(file_size, header.offset + header.size); file_size = @max(file_size, header.offset + header.size);
} }
const aligned_size = mem.alignForwardGeneric(u64, file_size, self.page_size); const aligned_size = mem.alignForward(u64, file_size, self.page_size);
dwarf_segment.vmaddr = base_vmaddr; dwarf_segment.vmaddr = base_vmaddr;
dwarf_segment.filesize = aligned_size; dwarf_segment.filesize = aligned_size;
dwarf_segment.vmsize = aligned_size; dwarf_segment.vmsize = aligned_size;
const linkedit = self.getLinkeditSegmentPtr(); const linkedit = self.getLinkeditSegmentPtr();
linkedit.vmaddr = mem.alignForwardGeneric( linkedit.vmaddr = mem.alignForward(
u64, u64,
dwarf_segment.vmaddr + aligned_size, dwarf_segment.vmaddr + aligned_size,
self.page_size, self.page_size,
); );
linkedit.fileoff = mem.alignForwardGeneric( linkedit.fileoff = mem.alignForward(
u64, u64,
dwarf_segment.fileoff + aligned_size, dwarf_segment.fileoff + aligned_size,
self.page_size, self.page_size,
@ -458,7 +458,7 @@ fn writeLinkeditSegmentData(self: *DebugSymbols, macho_file: *MachO) !void {
try self.writeStrtab(); try self.writeStrtab();
const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; const seg = &self.segments.items[self.linkedit_segment_cmd_index.?];
const aligned_size = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); const aligned_size = mem.alignForward(u64, seg.filesize, self.page_size);
seg.vmsize = aligned_size; seg.vmsize = aligned_size;
} }
@ -497,7 +497,7 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void {
const nsyms = nlocals + nexports; const nsyms = nlocals + nexports;
const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; const seg = &self.segments.items[self.linkedit_segment_cmd_index.?];
const offset = mem.alignForwardGeneric(u64, seg.fileoff, @alignOf(macho.nlist_64)); const offset = mem.alignForward(u64, seg.fileoff, @alignOf(macho.nlist_64));
const needed_size = nsyms * @sizeOf(macho.nlist_64); const needed_size = nsyms * @sizeOf(macho.nlist_64);
seg.filesize = offset + needed_size - seg.fileoff; seg.filesize = offset + needed_size - seg.fileoff;
@ -522,8 +522,8 @@ fn writeStrtab(self: *DebugSymbols) !void {
const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; const seg = &self.segments.items[self.linkedit_segment_cmd_index.?];
const symtab_size = @intCast(u32, self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64)); const symtab_size = @intCast(u32, self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64));
const offset = mem.alignForwardGeneric(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64)); const offset = mem.alignForward(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64));
const needed_size = mem.alignForwardGeneric(u64, self.strtab.buffer.items.len, @alignOf(u64)); const needed_size = mem.alignForward(u64, self.strtab.buffer.items.len, @alignOf(u64));
seg.filesize = offset + needed_size - seg.fileoff; seg.filesize = offset + needed_size - seg.fileoff;
self.symtab_cmd.stroff = @intCast(u32, offset); self.symtab_cmd.stroff = @intCast(u32, offset);

View File

@ -17,7 +17,7 @@ pub const default_dyld_path: [*:0]const u8 = "/usr/lib/dyld";
fn calcInstallNameLen(cmd_size: u64, name: []const u8, assume_max_path_len: bool) u64 { fn calcInstallNameLen(cmd_size: u64, name: []const u8, assume_max_path_len: bool) u64 {
const darwin_path_max = 1024; const darwin_path_max = 1024;
const name_len = if (assume_max_path_len) darwin_path_max else name.len + 1; const name_len = if (assume_max_path_len) darwin_path_max else name.len + 1;
return mem.alignForwardGeneric(u64, cmd_size + name_len, @alignOf(u64)); return mem.alignForward(u64, cmd_size + name_len, @alignOf(u64));
} }
const CalcLCsSizeCtx = struct { const CalcLCsSizeCtx = struct {
@ -149,7 +149,7 @@ pub fn calcNumOfLCs(lc_buffer: []const u8) u32 {
pub fn writeDylinkerLC(lc_writer: anytype) !void { pub fn writeDylinkerLC(lc_writer: anytype) !void {
const name_len = mem.sliceTo(default_dyld_path, 0).len; const name_len = mem.sliceTo(default_dyld_path, 0).len;
const cmdsize = @intCast(u32, mem.alignForwardGeneric( const cmdsize = @intCast(u32, mem.alignForward(
u64, u64,
@sizeOf(macho.dylinker_command) + name_len, @sizeOf(macho.dylinker_command) + name_len,
@sizeOf(u64), @sizeOf(u64),
@ -176,7 +176,7 @@ const WriteDylibLCCtx = struct {
fn writeDylibLC(ctx: WriteDylibLCCtx, lc_writer: anytype) !void { fn writeDylibLC(ctx: WriteDylibLCCtx, lc_writer: anytype) !void {
const name_len = ctx.name.len + 1; const name_len = ctx.name.len + 1;
const cmdsize = @intCast(u32, mem.alignForwardGeneric( const cmdsize = @intCast(u32, mem.alignForward(
u64, u64,
@sizeOf(macho.dylib_command) + name_len, @sizeOf(macho.dylib_command) + name_len,
@sizeOf(u64), @sizeOf(u64),
@ -253,7 +253,7 @@ pub fn writeRpathLCs(gpa: Allocator, options: *const link.Options, lc_writer: an
while (try it.next()) |rpath| { while (try it.next()) |rpath| {
const rpath_len = rpath.len + 1; const rpath_len = rpath.len + 1;
const cmdsize = @intCast(u32, mem.alignForwardGeneric( const cmdsize = @intCast(u32, mem.alignForward(
u64, u64,
@sizeOf(macho.rpath_command) + rpath_len, @sizeOf(macho.rpath_command) + rpath_len,
@sizeOf(u64), @sizeOf(u64),

View File

@ -109,7 +109,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void {
while (true) { while (true) {
const atom = zld.getAtom(group_end); const atom = zld.getAtom(group_end);
offset = mem.alignForwardGeneric(u64, offset, try math.powi(u32, 2, atom.alignment)); offset = mem.alignForward(u64, offset, try math.powi(u32, 2, atom.alignment));
const sym = zld.getSymbolPtr(atom.getSymbolWithLoc()); const sym = zld.getSymbolPtr(atom.getSymbolWithLoc());
sym.n_value = offset; sym.n_value = offset;
@ -153,7 +153,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void {
} else break; } else break;
} }
offset = mem.alignForwardGeneric(u64, offset, Thunk.getAlignment()); offset = mem.alignForward(u64, offset, Thunk.getAlignment());
allocateThunk(zld, thunk_index, offset, header); allocateThunk(zld, thunk_index, offset, header);
offset += zld.thunks.items[thunk_index].getSize(); offset += zld.thunks.items[thunk_index].getSize();
@ -193,7 +193,7 @@ fn allocateThunk(
var offset = base_offset; var offset = base_offset;
while (true) { while (true) {
const atom = zld.getAtom(atom_index); const atom = zld.getAtom(atom_index);
offset = mem.alignForwardGeneric(u64, offset, Thunk.getAlignment()); offset = mem.alignForward(u64, offset, Thunk.getAlignment());
const sym = zld.getSymbolPtr(atom.getSymbolWithLoc()); const sym = zld.getSymbolPtr(atom.getSymbolWithLoc());
sym.n_value = offset; sym.n_value = offset;

View File

@ -1207,7 +1207,7 @@ pub const Zld = struct {
fn createSegments(self: *Zld) !void { fn createSegments(self: *Zld) !void {
const pagezero_vmsize = self.options.pagezero_size orelse MachO.default_pagezero_vmsize; const pagezero_vmsize = self.options.pagezero_size orelse MachO.default_pagezero_vmsize;
const aligned_pagezero_vmsize = mem.alignBackwardGeneric(u64, pagezero_vmsize, self.page_size); const aligned_pagezero_vmsize = mem.alignBackward(u64, pagezero_vmsize, self.page_size);
if (self.options.output_mode != .Lib and aligned_pagezero_vmsize > 0) { if (self.options.output_mode != .Lib and aligned_pagezero_vmsize > 0) {
if (aligned_pagezero_vmsize != pagezero_vmsize) { if (aligned_pagezero_vmsize != pagezero_vmsize) {
log.warn("requested __PAGEZERO size (0x{x}) is not page aligned", .{pagezero_vmsize}); log.warn("requested __PAGEZERO size (0x{x}) is not page aligned", .{pagezero_vmsize});
@ -1466,7 +1466,7 @@ pub const Zld = struct {
while (true) { while (true) {
const atom = self.getAtom(atom_index); const atom = self.getAtom(atom_index);
const atom_alignment = try math.powi(u32, 2, atom.alignment); const atom_alignment = try math.powi(u32, 2, atom.alignment);
const atom_offset = mem.alignForwardGeneric(u64, header.size, atom_alignment); const atom_offset = mem.alignForward(u64, header.size, atom_alignment);
const padding = atom_offset - header.size; const padding = atom_offset - header.size;
const sym = self.getSymbolPtr(atom.getSymbolWithLoc()); const sym = self.getSymbolPtr(atom.getSymbolWithLoc());
@ -1534,7 +1534,7 @@ pub const Zld = struct {
const slice = self.sections.slice(); const slice = self.sections.slice();
for (slice.items(.header)[indexes.start..indexes.end], 0..) |*header, sect_id| { for (slice.items(.header)[indexes.start..indexes.end], 0..) |*header, sect_id| {
const alignment = try math.powi(u32, 2, header.@"align"); const alignment = try math.powi(u32, 2, header.@"align");
const start_aligned = mem.alignForwardGeneric(u64, start, alignment); const start_aligned = mem.alignForward(u64, start, alignment);
const n_sect = @intCast(u8, indexes.start + sect_id + 1); const n_sect = @intCast(u8, indexes.start + sect_id + 1);
header.offset = if (header.isZerofill()) header.offset = if (header.isZerofill())
@ -1598,8 +1598,8 @@ pub const Zld = struct {
segment.vmsize = start; segment.vmsize = start;
} }
segment.filesize = mem.alignForwardGeneric(u64, segment.filesize, self.page_size); segment.filesize = mem.alignForward(u64, segment.filesize, self.page_size);
segment.vmsize = mem.alignForwardGeneric(u64, segment.vmsize, self.page_size); segment.vmsize = mem.alignForward(u64, segment.vmsize, self.page_size);
} }
const InitSectionOpts = struct { const InitSectionOpts = struct {
@ -1709,7 +1709,7 @@ pub const Zld = struct {
try self.writeSymtabs(); try self.writeSymtabs();
const seg = self.getLinkeditSegmentPtr(); const seg = self.getLinkeditSegmentPtr();
seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size);
} }
fn collectRebaseDataFromContainer( fn collectRebaseDataFromContainer(
@ -2112,17 +2112,17 @@ pub const Zld = struct {
assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64))); assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64)));
const rebase_off = link_seg.fileoff; const rebase_off = link_seg.fileoff;
const rebase_size = rebase.size(); const rebase_size = rebase.size();
const rebase_size_aligned = mem.alignForwardGeneric(u64, rebase_size, @alignOf(u64)); const rebase_size_aligned = mem.alignForward(u64, rebase_size, @alignOf(u64));
log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned }); log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned });
const bind_off = rebase_off + rebase_size_aligned; const bind_off = rebase_off + rebase_size_aligned;
const bind_size = bind.size(); const bind_size = bind.size();
const bind_size_aligned = mem.alignForwardGeneric(u64, bind_size, @alignOf(u64)); const bind_size_aligned = mem.alignForward(u64, bind_size, @alignOf(u64));
log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned }); log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned });
const lazy_bind_off = bind_off + bind_size_aligned; const lazy_bind_off = bind_off + bind_size_aligned;
const lazy_bind_size = lazy_bind.size(); const lazy_bind_size = lazy_bind.size();
const lazy_bind_size_aligned = mem.alignForwardGeneric(u64, lazy_bind_size, @alignOf(u64)); const lazy_bind_size_aligned = mem.alignForward(u64, lazy_bind_size, @alignOf(u64));
log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{ log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{
lazy_bind_off, lazy_bind_off,
lazy_bind_off + lazy_bind_size_aligned, lazy_bind_off + lazy_bind_size_aligned,
@ -2130,7 +2130,7 @@ pub const Zld = struct {
const export_off = lazy_bind_off + lazy_bind_size_aligned; const export_off = lazy_bind_off + lazy_bind_size_aligned;
const export_size = trie.size; const export_size = trie.size;
const export_size_aligned = mem.alignForwardGeneric(u64, export_size, @alignOf(u64)); const export_size_aligned = mem.alignForward(u64, export_size, @alignOf(u64));
log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned }); log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned });
const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse
@ -2268,7 +2268,7 @@ pub const Zld = struct {
const offset = link_seg.fileoff + link_seg.filesize; const offset = link_seg.fileoff + link_seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = buffer.items.len; const needed_size = buffer.items.len;
const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64));
const padding = math.cast(usize, needed_size_aligned - needed_size) orelse return error.Overflow; const padding = math.cast(usize, needed_size_aligned - needed_size) orelse return error.Overflow;
if (padding > 0) { if (padding > 0) {
try buffer.ensureUnusedCapacity(padding); try buffer.ensureUnusedCapacity(padding);
@ -2347,7 +2347,7 @@ pub const Zld = struct {
const offset = seg.fileoff + seg.filesize; const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = out_dice.items.len * @sizeOf(macho.data_in_code_entry); const needed_size = out_dice.items.len * @sizeOf(macho.data_in_code_entry);
const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff; seg.filesize = offset + needed_size_aligned - seg.fileoff;
const buffer = try self.gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow); const buffer = try self.gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow);
@ -2480,7 +2480,7 @@ pub const Zld = struct {
const offset = seg.fileoff + seg.filesize; const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = self.strtab.buffer.items.len; const needed_size = self.strtab.buffer.items.len;
const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff; seg.filesize = offset + needed_size_aligned - seg.fileoff;
log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
@ -2515,7 +2515,7 @@ pub const Zld = struct {
const offset = seg.fileoff + seg.filesize; const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = nindirectsyms * @sizeOf(u32); const needed_size = nindirectsyms * @sizeOf(u32);
const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff; seg.filesize = offset + needed_size_aligned - seg.fileoff;
log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
@ -2690,7 +2690,7 @@ pub const Zld = struct {
for (subsections[0..count]) |cut| { for (subsections[0..count]) |cut| {
const size = cut.end - cut.start; const size = cut.end - cut.start;
const num_chunks = mem.alignForward(size, chunk_size) / chunk_size; const num_chunks = mem.alignForward(usize, size, chunk_size) / chunk_size;
var i: usize = 0; var i: usize = 0;
while (i < num_chunks) : (i += 1) { while (i < num_chunks) : (i += 1) {
@ -2725,10 +2725,10 @@ pub const Zld = struct {
const seg = self.getLinkeditSegmentPtr(); const seg = self.getLinkeditSegmentPtr();
// Code signature data has to be 16-bytes aligned for Apple tools to recognize the file // Code signature data has to be 16-bytes aligned for Apple tools to recognize the file
// https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271 // https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271
const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, 16); const offset = mem.alignForward(u64, seg.fileoff + seg.filesize, 16);
const needed_size = code_sig.estimateSize(offset); const needed_size = code_sig.estimateSize(offset);
seg.filesize = offset + needed_size - seg.fileoff; seg.filesize = offset + needed_size - seg.fileoff;
seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size);
log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
// Pad out the space. We need to do this to calculate valid hashes for everything in the file // Pad out the space. We need to do this to calculate valid hashes for everything in the file
// except for code signature data. // except for code signature data.

View File

@ -2118,7 +2118,7 @@ fn allocateAtoms(wasm: *Wasm) !void {
} }
} }
} }
offset = std.mem.alignForwardGeneric(u32, offset, atom.alignment); offset = std.mem.alignForward(u32, offset, atom.alignment);
atom.offset = offset; atom.offset = offset;
log.debug("Atom '{s}' allocated from 0x{x:0>8} to 0x{x:0>8} size={d}", .{ log.debug("Atom '{s}' allocated from 0x{x:0>8} to 0x{x:0>8} size={d}", .{
symbol_loc.getName(wasm), symbol_loc.getName(wasm),
@ -2129,7 +2129,7 @@ fn allocateAtoms(wasm: *Wasm) !void {
offset += atom.size; offset += atom.size;
atom_index = atom.prev orelse break; atom_index = atom.prev orelse break;
} }
segment.size = std.mem.alignForwardGeneric(u32, offset, segment.alignment); segment.size = std.mem.alignForward(u32, offset, segment.alignment);
} }
} }
@ -2731,7 +2731,7 @@ fn setupMemory(wasm: *Wasm) !void {
const is_obj = wasm.base.options.output_mode == .Obj; const is_obj = wasm.base.options.output_mode == .Obj;
if (place_stack_first and !is_obj) { if (place_stack_first and !is_obj) {
memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, stack_alignment); memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment);
memory_ptr += stack_size; memory_ptr += stack_size;
// We always put the stack pointer global at index 0 // We always put the stack pointer global at index 0
wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr)); wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr));
@ -2741,7 +2741,7 @@ fn setupMemory(wasm: *Wasm) !void {
var data_seg_it = wasm.data_segments.iterator(); var data_seg_it = wasm.data_segments.iterator();
while (data_seg_it.next()) |entry| { while (data_seg_it.next()) |entry| {
const segment = &wasm.segments.items[entry.value_ptr.*]; const segment = &wasm.segments.items[entry.value_ptr.*];
memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, segment.alignment); memory_ptr = std.mem.alignForward(u64, memory_ptr, segment.alignment);
// set TLS-related symbols // set TLS-related symbols
if (mem.eql(u8, entry.key_ptr.*, ".tdata")) { if (mem.eql(u8, entry.key_ptr.*, ".tdata")) {
@ -2779,7 +2779,7 @@ fn setupMemory(wasm: *Wasm) !void {
// create the memory init flag which is used by the init memory function // create the memory init flag which is used by the init memory function
if (wasm.base.options.shared_memory and wasm.hasPassiveInitializationSegments()) { if (wasm.base.options.shared_memory and wasm.hasPassiveInitializationSegments()) {
// align to pointer size // align to pointer size
memory_ptr = mem.alignForwardGeneric(u64, memory_ptr, 4); memory_ptr = mem.alignForward(u64, memory_ptr, 4);
const loc = try wasm.createSyntheticSymbol("__wasm_init_memory_flag", .data); const loc = try wasm.createSyntheticSymbol("__wasm_init_memory_flag", .data);
const sym = loc.getSymbol(wasm); const sym = loc.getSymbol(wasm);
sym.virtual_address = @intCast(u32, memory_ptr); sym.virtual_address = @intCast(u32, memory_ptr);
@ -2787,7 +2787,7 @@ fn setupMemory(wasm: *Wasm) !void {
} }
if (!place_stack_first and !is_obj) { if (!place_stack_first and !is_obj) {
memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, stack_alignment); memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment);
memory_ptr += stack_size; memory_ptr += stack_size;
wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr)); wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr));
} }
@ -2796,7 +2796,7 @@ fn setupMemory(wasm: *Wasm) !void {
// We must set its virtual address so it can be used in relocations. // We must set its virtual address so it can be used in relocations.
if (wasm.findGlobalSymbol("__heap_base")) |loc| { if (wasm.findGlobalSymbol("__heap_base")) |loc| {
const symbol = loc.getSymbol(wasm); const symbol = loc.getSymbol(wasm);
symbol.virtual_address = @intCast(u32, mem.alignForwardGeneric(u64, memory_ptr, heap_alignment)); symbol.virtual_address = @intCast(u32, mem.alignForward(u64, memory_ptr, heap_alignment));
} }
// Setup the max amount of pages // Setup the max amount of pages
@ -2818,7 +2818,7 @@ fn setupMemory(wasm: *Wasm) !void {
} }
memory_ptr = initial_memory; memory_ptr = initial_memory;
} }
memory_ptr = mem.alignForwardGeneric(u64, memory_ptr, std.wasm.page_size); memory_ptr = mem.alignForward(u64, memory_ptr, std.wasm.page_size);
// In case we do not import memory, but define it ourselves, // In case we do not import memory, but define it ourselves,
// set the minimum amount of pages on the memory section. // set the minimum amount of pages on the memory section.
wasm.memories.limits.min = @intCast(u32, memory_ptr / page_size); wasm.memories.limits.min = @intCast(u32, memory_ptr / page_size);

View File

@ -1024,7 +1024,7 @@ fn ElfFile(comptime is_64: bool) type {
dest.sh_size = @intCast(Elf_OffSize, data.len); dest.sh_size = @intCast(Elf_OffSize, data.len);
const addralign = if (src.sh_addralign == 0 or dest.sh_type == elf.SHT_NOBITS) 1 else src.sh_addralign; const addralign = if (src.sh_addralign == 0 or dest.sh_type == elf.SHT_NOBITS) 1 else src.sh_addralign;
dest.sh_offset = std.mem.alignForwardGeneric(Elf_OffSize, eof_offset, addralign); dest.sh_offset = std.mem.alignForward(Elf_OffSize, eof_offset, addralign);
if (src.sh_offset != dest.sh_offset and section.segment != null and update.action != .empty and dest.sh_type != elf.SHT_NOTE) { if (src.sh_offset != dest.sh_offset and section.segment != null and update.action != .empty and dest.sh_type != elf.SHT_NOTE) {
if (src.sh_offset > dest.sh_offset) { if (src.sh_offset > dest.sh_offset) {
dest.sh_offset = src.sh_offset; // add padding to avoid modifing the program segments dest.sh_offset = src.sh_offset; // add padding to avoid modifing the program segments
@ -1085,7 +1085,7 @@ fn ElfFile(comptime is_64: bool) type {
// add a ".gnu_debuglink" section // add a ".gnu_debuglink" section
if (options.debuglink) |link| { if (options.debuglink) |link| {
const payload = payload: { const payload = payload: {
const crc_offset = std.mem.alignForward(link.name.len + 1, 4); const crc_offset = std.mem.alignForward(usize, link.name.len + 1, 4);
const buf = try allocator.alignedAlloc(u8, 4, crc_offset + 4); const buf = try allocator.alignedAlloc(u8, 4, crc_offset + 4);
@memcpy(buf[0..link.name.len], link.name); @memcpy(buf[0..link.name.len], link.name);
@memset(buf[link.name.len..crc_offset], 0); @memset(buf[link.name.len..crc_offset], 0);
@ -1117,7 +1117,7 @@ fn ElfFile(comptime is_64: bool) type {
// write the section header at the tail // write the section header at the tail
{ {
const offset = std.mem.alignForwardGeneric(Elf_OffSize, eof_offset, @alignOf(Elf_Shdr)); const offset = std.mem.alignForward(Elf_OffSize, eof_offset, @alignOf(Elf_Shdr));
const data = std.mem.sliceAsBytes(updated_section_header); const data = std.mem.sliceAsBytes(updated_section_header);
assert(data.len == @as(usize, updated_elf_header.e_shentsize) * new_shnum); assert(data.len == @as(usize, updated_elf_header.e_shentsize) * new_shnum);

View File

@ -1339,7 +1339,7 @@ pub const Type = struct {
.storage = .{ .lazy_size = ty.toIntern() }, .storage = .{ .lazy_size = ty.toIntern() },
} })).toValue() }, } })).toValue() },
}; };
const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); const result = std.mem.alignForward(u32, total_bytes, alignment);
return AbiSizeAdvanced{ .scalar = result }; return AbiSizeAdvanced{ .scalar = result };
}, },
@ -1380,14 +1380,14 @@ pub const Type = struct {
var size: u64 = 0; var size: u64 = 0;
if (code_align > payload_align) { if (code_align > payload_align) {
size += code_size; size += code_size;
size = std.mem.alignForwardGeneric(u64, size, payload_align); size = std.mem.alignForward(u64, size, payload_align);
size += payload_size; size += payload_size;
size = std.mem.alignForwardGeneric(u64, size, code_align); size = std.mem.alignForward(u64, size, code_align);
} else { } else {
size += payload_size; size += payload_size;
size = std.mem.alignForwardGeneric(u64, size, code_align); size = std.mem.alignForward(u64, size, code_align);
size += code_size; size += code_size;
size = std.mem.alignForwardGeneric(u64, size, payload_align); size = std.mem.alignForward(u64, size, payload_align);
} }
return AbiSizeAdvanced{ .scalar = size }; return AbiSizeAdvanced{ .scalar = size };
}, },
@ -1595,7 +1595,7 @@ pub const Type = struct {
fn intAbiSize(bits: u16, target: Target) u64 { fn intAbiSize(bits: u16, target: Target) u64 {
const alignment = intAbiAlignment(bits, target); const alignment = intAbiAlignment(bits, target);
return std.mem.alignForwardGeneric(u64, @intCast(u16, (@as(u17, bits) + 7) / 8), alignment); return std.mem.alignForward(u64, @intCast(u16, (@as(u17, bits) + 7) / 8), alignment);
} }
fn intAbiAlignment(bits: u16, target: Target) u32 { fn intAbiAlignment(bits: u16, target: Target) u32 {
@ -3194,7 +3194,7 @@ pub const Type = struct {
const field_align = field.alignment(mod, it.struct_obj.layout); const field_align = field.alignment(mod, it.struct_obj.layout);
it.big_align = @max(it.big_align, field_align); it.big_align = @max(it.big_align, field_align);
const field_offset = std.mem.alignForwardGeneric(u64, it.offset, field_align); const field_offset = std.mem.alignForward(u64, it.offset, field_align);
it.offset = field_offset + field.ty.abiSize(mod); it.offset = field_offset + field.ty.abiSize(mod);
return FieldOffset{ .field = i, .offset = field_offset }; return FieldOffset{ .field = i, .offset = field_offset };
} }
@ -3223,7 +3223,7 @@ pub const Type = struct {
return field_offset.offset; return field_offset.offset;
} }
return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); return std.mem.alignForward(u64, it.offset, @max(it.big_align, 1));
}, },
.anon_struct_type => |tuple| { .anon_struct_type => |tuple| {
@ -3239,11 +3239,11 @@ pub const Type = struct {
const field_align = field_ty.toType().abiAlignment(mod); const field_align = field_ty.toType().abiAlignment(mod);
big_align = @max(big_align, field_align); big_align = @max(big_align, field_align);
offset = std.mem.alignForwardGeneric(u64, offset, field_align); offset = std.mem.alignForward(u64, offset, field_align);
if (i == index) return offset; if (i == index) return offset;
offset += field_ty.toType().abiSize(mod); offset += field_ty.toType().abiSize(mod);
} }
offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1)); offset = std.mem.alignForward(u64, offset, @max(big_align, 1));
return offset; return offset;
}, },
@ -3254,7 +3254,7 @@ pub const Type = struct {
const layout = union_obj.getLayout(mod, true); const layout = union_obj.getLayout(mod, true);
if (layout.tag_align >= layout.payload_align) { if (layout.tag_align >= layout.payload_align) {
// {Tag, Payload} // {Tag, Payload}
return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); return std.mem.alignForward(u64, layout.tag_size, layout.payload_align);
} else { } else {
// {Payload, Tag} // {Payload, Tag}
return 0; return 0;