sema: rework type resolution to use Zcu when possible

This commit is contained in:
David Rubin 2024-08-11 03:14:12 -07:00
parent 849c31a6cc
commit b4bb64ce78
No known key found for this signature in database
GPG Key ID: A4390FEB5F00C0A5
32 changed files with 7329 additions and 7119 deletions

View File

@ -3483,7 +3483,7 @@ pub const LoadedStructType = struct {
return s.field_aligns.get(ip)[i]; return s.field_aligns.get(ip)[i];
} }
pub fn fieldInit(s: LoadedStructType, ip: *InternPool, i: usize) Index { pub fn fieldInit(s: LoadedStructType, ip: *const InternPool, i: usize) Index {
if (s.field_inits.len == 0) return .none; if (s.field_inits.len == 0) return .none;
assert(s.haveFieldInits(ip)); assert(s.haveFieldInits(ip));
return s.field_inits.get(ip)[i]; return s.field_inits.get(ip)[i];
@ -11066,7 +11066,7 @@ pub fn destroyNamespace(
local.mutate.namespaces.free_list = @intFromEnum(namespace_index); local.mutate.namespaces.free_list = @intFromEnum(namespace_index);
} }
pub fn filePtr(ip: *InternPool, file_index: FileIndex) *Zcu.File { pub fn filePtr(ip: *const InternPool, file_index: FileIndex) *Zcu.File {
const file_index_unwrapped = file_index.unwrap(ip); const file_index_unwrapped = file_index.unwrap(ip);
const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire(); const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire();
return files.view().items(.file)[file_index_unwrapped.index]; return files.view().items(.file)[file_index_unwrapped.index];

View File

@ -9,7 +9,7 @@ const Zcu = @import("Zcu.zig");
const RangeSet = @This(); const RangeSet = @This();
const LazySrcLoc = Zcu.LazySrcLoc; const LazySrcLoc = Zcu.LazySrcLoc;
pt: Zcu.PerThread, zcu: *Zcu,
ranges: std.ArrayList(Range), ranges: std.ArrayList(Range),
pub const Range = struct { pub const Range = struct {
@ -18,9 +18,9 @@ pub const Range = struct {
src: LazySrcLoc, src: LazySrcLoc,
}; };
pub fn init(allocator: std.mem.Allocator, pt: Zcu.PerThread) RangeSet { pub fn init(allocator: std.mem.Allocator, zcu: *Zcu) RangeSet {
return .{ return .{
.pt = pt, .zcu = zcu,
.ranges = std.ArrayList(Range).init(allocator), .ranges = std.ArrayList(Range).init(allocator),
}; };
} }
@ -35,8 +35,8 @@ pub fn add(
last: InternPool.Index, last: InternPool.Index,
src: LazySrcLoc, src: LazySrcLoc,
) !?LazySrcLoc { ) !?LazySrcLoc {
const pt = self.pt; const zcu = self.zcu;
const ip = &pt.zcu.intern_pool; const ip = &zcu.intern_pool;
const ty = ip.typeOf(first); const ty = ip.typeOf(first);
assert(ty == ip.typeOf(last)); assert(ty == ip.typeOf(last));
@ -45,8 +45,8 @@ pub fn add(
assert(ty == ip.typeOf(range.first)); assert(ty == ip.typeOf(range.first));
assert(ty == ip.typeOf(range.last)); assert(ty == ip.typeOf(range.last));
if (Value.fromInterned(last).compareScalar(.gte, Value.fromInterned(range.first), Type.fromInterned(ty), pt) and if (Value.fromInterned(last).compareScalar(.gte, Value.fromInterned(range.first), Type.fromInterned(ty), zcu) and
Value.fromInterned(first).compareScalar(.lte, Value.fromInterned(range.last), Type.fromInterned(ty), pt)) Value.fromInterned(first).compareScalar(.lte, Value.fromInterned(range.last), Type.fromInterned(ty), zcu))
{ {
return range.src; // They overlap. return range.src; // They overlap.
} }
@ -61,20 +61,20 @@ pub fn add(
} }
/// Assumes a and b do not overlap /// Assumes a and b do not overlap
fn lessThan(pt: Zcu.PerThread, a: Range, b: Range) bool { fn lessThan(zcu: *Zcu, a: Range, b: Range) bool {
const ty = Type.fromInterned(pt.zcu.intern_pool.typeOf(a.first)); const ty = Type.fromInterned(zcu.intern_pool.typeOf(a.first));
return Value.fromInterned(a.first).compareScalar(.lt, Value.fromInterned(b.first), ty, pt); return Value.fromInterned(a.first).compareScalar(.lt, Value.fromInterned(b.first), ty, zcu);
} }
pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !bool { pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !bool {
const pt = self.pt; const zcu = self.zcu;
const ip = &pt.zcu.intern_pool; const ip = &zcu.intern_pool;
assert(ip.typeOf(first) == ip.typeOf(last)); assert(ip.typeOf(first) == ip.typeOf(last));
if (self.ranges.items.len == 0) if (self.ranges.items.len == 0)
return false; return false;
std.mem.sort(Range, self.ranges.items, pt, lessThan); std.mem.sort(Range, self.ranges.items, zcu, lessThan);
if (self.ranges.items[0].first != first or if (self.ranges.items[0].first != first or
self.ranges.items[self.ranges.items.len - 1].last != last) self.ranges.items[self.ranges.items.len - 1].last != last)
@ -93,10 +93,10 @@ pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !
const prev = self.ranges.items[i]; const prev = self.ranges.items[i];
// prev.last + 1 == cur.first // prev.last + 1 == cur.first
try counter.copy(Value.fromInterned(prev.last).toBigInt(&space, pt)); try counter.copy(Value.fromInterned(prev.last).toBigInt(&space, zcu));
try counter.addScalar(&counter, 1); try counter.addScalar(&counter, 1);
const cur_start_int = Value.fromInterned(cur.first).toBigInt(&space, pt); const cur_start_int = Value.fromInterned(cur.first).toBigInt(&space, zcu);
if (!cur_start_int.eql(counter.toConst())) { if (!cur_start_int.eql(counter.toConst())) {
return false; return false;
} }

File diff suppressed because it is too large Load Diff

View File

@ -85,23 +85,23 @@ fn bitCastInner(
assert(val_ty.hasWellDefinedLayout(zcu)); assert(val_ty.hasWellDefinedLayout(zcu));
const abi_pad_bits, const host_pad_bits = if (host_bits > 0) const abi_pad_bits, const host_pad_bits = if (host_bits > 0)
.{ val_ty.abiSize(pt) * 8 - host_bits, host_bits - val_ty.bitSize(pt) } .{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) }
else else
.{ val_ty.abiSize(pt) * 8 - val_ty.bitSize(pt), 0 }; .{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 };
const skip_bits = switch (endian) { const skip_bits = switch (endian) {
.little => bit_offset + byte_offset * 8, .little => bit_offset + byte_offset * 8,
.big => if (host_bits > 0) .big => if (host_bits > 0)
val_ty.abiSize(pt) * 8 - byte_offset * 8 - host_bits + bit_offset val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset
else else
val_ty.abiSize(pt) * 8 - byte_offset * 8 - dest_ty.bitSize(pt), val_ty.abiSize(zcu) * 8 - byte_offset * 8 - dest_ty.bitSize(zcu),
}; };
var unpack: UnpackValueBits = .{ var unpack: UnpackValueBits = .{
.pt = sema.pt, .pt = sema.pt,
.arena = sema.arena, .arena = sema.arena,
.skip_bits = skip_bits, .skip_bits = skip_bits,
.remaining_bits = dest_ty.bitSize(pt), .remaining_bits = dest_ty.bitSize(zcu),
.unpacked = std.ArrayList(InternPool.Index).init(sema.arena), .unpacked = std.ArrayList(InternPool.Index).init(sema.arena),
}; };
switch (endian) { switch (endian) {
@ -141,22 +141,22 @@ fn bitCastSpliceInner(
try val_ty.resolveLayout(pt); try val_ty.resolveLayout(pt);
try splice_val_ty.resolveLayout(pt); try splice_val_ty.resolveLayout(pt);
const splice_bits = splice_val_ty.bitSize(pt); const splice_bits = splice_val_ty.bitSize(zcu);
const splice_offset = switch (endian) { const splice_offset = switch (endian) {
.little => bit_offset + byte_offset * 8, .little => bit_offset + byte_offset * 8,
.big => if (host_bits > 0) .big => if (host_bits > 0)
val_ty.abiSize(pt) * 8 - byte_offset * 8 - host_bits + bit_offset val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset
else else
val_ty.abiSize(pt) * 8 - byte_offset * 8 - splice_bits, val_ty.abiSize(zcu) * 8 - byte_offset * 8 - splice_bits,
}; };
assert(splice_offset + splice_bits <= val_ty.abiSize(pt) * 8); assert(splice_offset + splice_bits <= val_ty.abiSize(zcu) * 8);
const abi_pad_bits, const host_pad_bits = if (host_bits > 0) const abi_pad_bits, const host_pad_bits = if (host_bits > 0)
.{ val_ty.abiSize(pt) * 8 - host_bits, host_bits - val_ty.bitSize(pt) } .{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) }
else else
.{ val_ty.abiSize(pt) * 8 - val_ty.bitSize(pt), 0 }; .{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 };
var unpack: UnpackValueBits = .{ var unpack: UnpackValueBits = .{
.pt = pt, .pt = pt,
@ -181,7 +181,7 @@ fn bitCastSpliceInner(
try unpack.add(splice_val); try unpack.add(splice_val);
unpack.skip_bits = splice_offset + splice_bits; unpack.skip_bits = splice_offset + splice_bits;
unpack.remaining_bits = val_ty.abiSize(pt) * 8 - splice_offset - splice_bits; unpack.remaining_bits = val_ty.abiSize(zcu) * 8 - splice_offset - splice_bits;
switch (endian) { switch (endian) {
.little => { .little => {
try unpack.add(val); try unpack.add(val);
@ -229,7 +229,7 @@ const UnpackValueBits = struct {
} }
const ty = val.typeOf(zcu); const ty = val.typeOf(zcu);
const bit_size = ty.bitSize(pt); const bit_size = ty.bitSize(zcu);
if (unpack.skip_bits >= bit_size) { if (unpack.skip_bits >= bit_size) {
unpack.skip_bits -= bit_size; unpack.skip_bits -= bit_size;
@ -291,7 +291,7 @@ const UnpackValueBits = struct {
// The final element does not have trailing padding. // The final element does not have trailing padding.
// Elements are reversed in packed memory on BE targets. // Elements are reversed in packed memory on BE targets.
const elem_ty = ty.childType(zcu); const elem_ty = ty.childType(zcu);
const pad_bits = elem_ty.abiSize(pt) * 8 - elem_ty.bitSize(pt); const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu);
const len = ty.arrayLen(zcu); const len = ty.arrayLen(zcu);
const maybe_sent = ty.sentinel(zcu); const maybe_sent = ty.sentinel(zcu);
@ -323,12 +323,12 @@ const UnpackValueBits = struct {
var cur_bit_off: u64 = 0; var cur_bit_off: u64 = 0;
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip); var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip);
while (it.next()) |field_idx| { while (it.next()) |field_idx| {
const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8; const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8;
const pad_bits = want_bit_off - cur_bit_off; const pad_bits = want_bit_off - cur_bit_off;
const field_val = try val.fieldValue(pt, field_idx); const field_val = try val.fieldValue(pt, field_idx);
try unpack.padding(pad_bits); try unpack.padding(pad_bits);
try unpack.add(field_val); try unpack.add(field_val);
cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(pt); cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(zcu);
} }
// Add trailing padding bits. // Add trailing padding bits.
try unpack.padding(bit_size - cur_bit_off); try unpack.padding(bit_size - cur_bit_off);
@ -339,11 +339,11 @@ const UnpackValueBits = struct {
while (it.next()) |field_idx| { while (it.next()) |field_idx| {
const field_val = try val.fieldValue(pt, field_idx); const field_val = try val.fieldValue(pt, field_idx);
const field_ty = field_val.typeOf(zcu); const field_ty = field_val.typeOf(zcu);
const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8 + field_ty.bitSize(pt); const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu);
const pad_bits = cur_bit_off - want_bit_off; const pad_bits = cur_bit_off - want_bit_off;
try unpack.padding(pad_bits); try unpack.padding(pad_bits);
try unpack.add(field_val); try unpack.add(field_val);
cur_bit_off = want_bit_off - field_ty.bitSize(pt); cur_bit_off = want_bit_off - field_ty.bitSize(zcu);
} }
assert(cur_bit_off == 0); assert(cur_bit_off == 0);
}, },
@ -366,7 +366,7 @@ const UnpackValueBits = struct {
// This correctly handles the case where `tag == .none`, since the payload is then // This correctly handles the case where `tag == .none`, since the payload is then
// either an integer or a byte array, both of which we can unpack. // either an integer or a byte array, both of which we can unpack.
const payload_val = Value.fromInterned(un.val); const payload_val = Value.fromInterned(un.val);
const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(pt); const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(zcu);
if (endian == .little or ty.containerLayout(zcu) == .@"packed") { if (endian == .little or ty.containerLayout(zcu) == .@"packed") {
try unpack.add(payload_val); try unpack.add(payload_val);
try unpack.padding(pad_bits); try unpack.padding(pad_bits);
@ -398,13 +398,14 @@ const UnpackValueBits = struct {
fn primitive(unpack: *UnpackValueBits, val: Value) BitCastError!void { fn primitive(unpack: *UnpackValueBits, val: Value) BitCastError!void {
const pt = unpack.pt; const pt = unpack.pt;
const zcu = pt.zcu;
if (unpack.remaining_bits == 0) { if (unpack.remaining_bits == 0) {
return; return;
} }
const ty = val.typeOf(pt.zcu); const ty = val.typeOf(pt.zcu);
const bit_size = ty.bitSize(pt); const bit_size = ty.bitSize(zcu);
// Note that this skips all zero-bit types. // Note that this skips all zero-bit types.
if (unpack.skip_bits >= bit_size) { if (unpack.skip_bits >= bit_size) {
@ -429,9 +430,10 @@ const UnpackValueBits = struct {
fn splitPrimitive(unpack: *UnpackValueBits, val: Value, bit_offset: u64, bit_count: u64) BitCastError!void { fn splitPrimitive(unpack: *UnpackValueBits, val: Value, bit_offset: u64, bit_count: u64) BitCastError!void {
const pt = unpack.pt; const pt = unpack.pt;
const zcu = pt.zcu;
const ty = val.typeOf(pt.zcu); const ty = val.typeOf(pt.zcu);
const val_bits = ty.bitSize(pt); const val_bits = ty.bitSize(zcu);
assert(bit_offset + bit_count <= val_bits); assert(bit_offset + bit_count <= val_bits);
switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) { switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) {
@ -499,12 +501,12 @@ const PackValueBits = struct {
const len = ty.arrayLen(zcu); const len = ty.arrayLen(zcu);
const elem_ty = ty.childType(zcu); const elem_ty = ty.childType(zcu);
const maybe_sent = ty.sentinel(zcu); const maybe_sent = ty.sentinel(zcu);
const pad_bits = elem_ty.abiSize(pt) * 8 - elem_ty.bitSize(pt); const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu);
const elems = try arena.alloc(InternPool.Index, @intCast(len)); const elems = try arena.alloc(InternPool.Index, @intCast(len));
if (endian == .big and maybe_sent != null) { if (endian == .big and maybe_sent != null) {
// TODO: validate sentinel was preserved! // TODO: validate sentinel was preserved!
try pack.padding(elem_ty.bitSize(pt)); try pack.padding(elem_ty.bitSize(zcu));
if (len != 0) try pack.padding(pad_bits); if (len != 0) try pack.padding(pad_bits);
} }
@ -520,7 +522,7 @@ const PackValueBits = struct {
if (endian == .little and maybe_sent != null) { if (endian == .little and maybe_sent != null) {
// TODO: validate sentinel was preserved! // TODO: validate sentinel was preserved!
if (len != 0) try pack.padding(pad_bits); if (len != 0) try pack.padding(pad_bits);
try pack.padding(elem_ty.bitSize(pt)); try pack.padding(elem_ty.bitSize(zcu));
} }
return Value.fromInterned(try pt.intern(.{ .aggregate = .{ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
@ -538,23 +540,23 @@ const PackValueBits = struct {
var cur_bit_off: u64 = 0; var cur_bit_off: u64 = 0;
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip); var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip);
while (it.next()) |field_idx| { while (it.next()) |field_idx| {
const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8; const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8;
try pack.padding(want_bit_off - cur_bit_off); try pack.padding(want_bit_off - cur_bit_off);
const field_ty = ty.structFieldType(field_idx, zcu); const field_ty = ty.structFieldType(field_idx, zcu);
elems[field_idx] = (try pack.get(field_ty)).toIntern(); elems[field_idx] = (try pack.get(field_ty)).toIntern();
cur_bit_off = want_bit_off + field_ty.bitSize(pt); cur_bit_off = want_bit_off + field_ty.bitSize(zcu);
} }
try pack.padding(ty.bitSize(pt) - cur_bit_off); try pack.padding(ty.bitSize(zcu) - cur_bit_off);
}, },
.big => { .big => {
var cur_bit_off: u64 = ty.bitSize(pt); var cur_bit_off: u64 = ty.bitSize(zcu);
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip); var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip);
while (it.next()) |field_idx| { while (it.next()) |field_idx| {
const field_ty = ty.structFieldType(field_idx, zcu); const field_ty = ty.structFieldType(field_idx, zcu);
const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8 + field_ty.bitSize(pt); const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu);
try pack.padding(cur_bit_off - want_bit_off); try pack.padding(cur_bit_off - want_bit_off);
elems[field_idx] = (try pack.get(field_ty)).toIntern(); elems[field_idx] = (try pack.get(field_ty)).toIntern();
cur_bit_off = want_bit_off - field_ty.bitSize(pt); cur_bit_off = want_bit_off - field_ty.bitSize(zcu);
} }
assert(cur_bit_off == 0); assert(cur_bit_off == 0);
}, },
@ -622,16 +624,16 @@ const PackValueBits = struct {
for (field_order, 0..) |*f, i| f.* = @intCast(i); for (field_order, 0..) |*f, i| f.* = @intCast(i);
// Sort `field_order` to put the fields with the largest bit sizes first. // Sort `field_order` to put the fields with the largest bit sizes first.
const SizeSortCtx = struct { const SizeSortCtx = struct {
pt: Zcu.PerThread, zcu: *Zcu,
field_types: []const InternPool.Index, field_types: []const InternPool.Index,
fn lessThan(ctx: @This(), a_idx: u32, b_idx: u32) bool { fn lessThan(ctx: @This(), a_idx: u32, b_idx: u32) bool {
const a_ty = Type.fromInterned(ctx.field_types[a_idx]); const a_ty = Type.fromInterned(ctx.field_types[a_idx]);
const b_ty = Type.fromInterned(ctx.field_types[b_idx]); const b_ty = Type.fromInterned(ctx.field_types[b_idx]);
return a_ty.bitSize(ctx.pt) > b_ty.bitSize(ctx.pt); return a_ty.bitSize(ctx.zcu) > b_ty.bitSize(ctx.zcu);
} }
}; };
std.mem.sortUnstable(u32, field_order, SizeSortCtx{ std.mem.sortUnstable(u32, field_order, SizeSortCtx{
.pt = pt, .zcu = zcu,
.field_types = zcu.typeToUnion(ty).?.field_types.get(ip), .field_types = zcu.typeToUnion(ty).?.field_types.get(ip),
}, SizeSortCtx.lessThan); }, SizeSortCtx.lessThan);
@ -639,7 +641,7 @@ const PackValueBits = struct {
for (field_order) |field_idx| { for (field_order) |field_idx| {
const field_ty = Type.fromInterned(zcu.typeToUnion(ty).?.field_types.get(ip)[field_idx]); const field_ty = Type.fromInterned(zcu.typeToUnion(ty).?.field_types.get(ip)[field_idx]);
const pad_bits = ty.bitSize(pt) - field_ty.bitSize(pt); const pad_bits = ty.bitSize(zcu) - field_ty.bitSize(zcu);
if (!padding_after) try pack.padding(pad_bits); if (!padding_after) try pack.padding(pad_bits);
const field_val = pack.get(field_ty) catch |err| switch (err) { const field_val = pack.get(field_ty) catch |err| switch (err) {
error.ReinterpretDeclRef => { error.ReinterpretDeclRef => {
@ -682,10 +684,11 @@ const PackValueBits = struct {
fn primitive(pack: *PackValueBits, want_ty: Type) BitCastError!Value { fn primitive(pack: *PackValueBits, want_ty: Type) BitCastError!Value {
const pt = pack.pt; const pt = pack.pt;
const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(pt)); const zcu = pt.zcu;
const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(zcu));
for (vals) |val| { for (vals) |val| {
if (!Value.fromInterned(val).isUndef(pt.zcu)) break; if (!Value.fromInterned(val).isUndef(zcu)) break;
} else { } else {
// All bits of the value are `undefined`. // All bits of the value are `undefined`.
return pt.undefValue(want_ty); return pt.undefValue(want_ty);
@ -706,8 +709,8 @@ const PackValueBits = struct {
ptr_cast: { ptr_cast: {
if (vals.len != 1) break :ptr_cast; if (vals.len != 1) break :ptr_cast;
const val = Value.fromInterned(vals[0]); const val = Value.fromInterned(vals[0]);
if (!val.typeOf(pt.zcu).isPtrAtRuntime(pt.zcu)) break :ptr_cast; if (!val.typeOf(zcu).isPtrAtRuntime(zcu)) break :ptr_cast;
if (!want_ty.isPtrAtRuntime(pt.zcu)) break :ptr_cast; if (!want_ty.isPtrAtRuntime(zcu)) break :ptr_cast;
return pt.getCoerced(val, want_ty); return pt.getCoerced(val, want_ty);
} }
@ -717,7 +720,7 @@ const PackValueBits = struct {
for (vals) |ip_val| { for (vals) |ip_val| {
const val = Value.fromInterned(ip_val); const val = Value.fromInterned(ip_val);
const ty = val.typeOf(pt.zcu); const ty = val.typeOf(pt.zcu);
buf_bits += ty.bitSize(pt); buf_bits += ty.bitSize(zcu);
} }
const buf = try pack.arena.alloc(u8, @intCast((buf_bits + 7) / 8)); const buf = try pack.arena.alloc(u8, @intCast((buf_bits + 7) / 8));
@ -726,11 +729,11 @@ const PackValueBits = struct {
var cur_bit_off: usize = 0; var cur_bit_off: usize = 0;
for (vals) |ip_val| { for (vals) |ip_val| {
const val = Value.fromInterned(ip_val); const val = Value.fromInterned(ip_val);
const ty = val.typeOf(pt.zcu); const ty = val.typeOf(zcu);
if (!val.isUndef(pt.zcu)) { if (!val.isUndef(zcu)) {
try val.writeToPackedMemory(ty, pt, buf, cur_bit_off); try val.writeToPackedMemory(ty, pt, buf, cur_bit_off);
} }
cur_bit_off += @intCast(ty.bitSize(pt)); cur_bit_off += @intCast(ty.bitSize(zcu));
} }
return Value.readFromPackedMemory(want_ty, pt, buf, @intCast(bit_offset), pack.arena); return Value.readFromPackedMemory(want_ty, pt, buf, @intCast(bit_offset), pack.arena);
@ -740,11 +743,12 @@ const PackValueBits = struct {
if (need_bits == 0) return .{ &.{}, 0 }; if (need_bits == 0) return .{ &.{}, 0 };
const pt = pack.pt; const pt = pack.pt;
const zcu = pt.zcu;
var bits: u64 = 0; var bits: u64 = 0;
var len: usize = 0; var len: usize = 0;
while (bits < pack.bit_offset + need_bits) { while (bits < pack.bit_offset + need_bits) {
bits += Value.fromInterned(pack.unpacked[len]).typeOf(pt.zcu).bitSize(pt); bits += Value.fromInterned(pack.unpacked[len]).typeOf(pt.zcu).bitSize(zcu);
len += 1; len += 1;
} }
@ -757,7 +761,7 @@ const PackValueBits = struct {
pack.bit_offset = 0; pack.bit_offset = 0;
} else { } else {
pack.unpacked = pack.unpacked[len - 1 ..]; pack.unpacked = pack.unpacked[len - 1 ..];
pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(pt.zcu).bitSize(pt) - extra_bits; pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(pt.zcu).bitSize(zcu) - extra_bits;
} }
return .{ result_vals, result_offset }; return .{ result_vals, result_offset };

View File

@ -13,14 +13,15 @@ pub const ComptimeLoadResult = union(enum) {
pub fn loadComptimePtr(sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Value) !ComptimeLoadResult { pub fn loadComptimePtr(sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Value) !ComptimeLoadResult {
const pt = sema.pt; const pt = sema.pt;
const zcu = pt.zcu;
const ptr_info = ptr.typeOf(pt.zcu).ptrInfo(pt.zcu); const ptr_info = ptr.typeOf(pt.zcu).ptrInfo(pt.zcu);
// TODO: host size for vectors is terrible // TODO: host size for vectors is terrible
const host_bits = switch (ptr_info.flags.vector_index) { const host_bits = switch (ptr_info.flags.vector_index) {
.none => ptr_info.packed_offset.host_size * 8, .none => ptr_info.packed_offset.host_size * 8,
else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(pt), else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu),
}; };
const bit_offset = if (host_bits != 0) bit_offset: { const bit_offset = if (host_bits != 0) bit_offset: {
const child_bits = Type.fromInterned(ptr_info.child).bitSize(pt); const child_bits = Type.fromInterned(ptr_info.child).bitSize(zcu);
const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) { const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
.none => 0, .none => 0,
.runtime => return .runtime_load, .runtime => return .runtime_load,
@ -67,18 +68,18 @@ pub fn storeComptimePtr(
// TODO: host size for vectors is terrible // TODO: host size for vectors is terrible
const host_bits = switch (ptr_info.flags.vector_index) { const host_bits = switch (ptr_info.flags.vector_index) {
.none => ptr_info.packed_offset.host_size * 8, .none => ptr_info.packed_offset.host_size * 8,
else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(pt), else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu),
}; };
const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) { const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
.none => 0, .none => 0,
.runtime => return .runtime_store, .runtime => return .runtime_store,
else => |idx| switch (zcu.getTarget().cpu.arch.endian()) { else => |idx| switch (zcu.getTarget().cpu.arch.endian()) {
.little => Type.fromInterned(ptr_info.child).bitSize(pt) * @intFromEnum(idx), .little => Type.fromInterned(ptr_info.child).bitSize(zcu) * @intFromEnum(idx),
.big => host_bits - Type.fromInterned(ptr_info.child).bitSize(pt) * (@intFromEnum(idx) + 1), // element order reversed on big endian .big => host_bits - Type.fromInterned(ptr_info.child).bitSize(zcu) * (@intFromEnum(idx) + 1), // element order reversed on big endian
}, },
}; };
const pseudo_store_ty = if (host_bits > 0) t: { const pseudo_store_ty = if (host_bits > 0) t: {
const need_bits = Type.fromInterned(ptr_info.child).bitSize(pt); const need_bits = Type.fromInterned(ptr_info.child).bitSize(zcu);
if (need_bits + bit_offset > host_bits) { if (need_bits + bit_offset > host_bits) {
return .exceeds_host_size; return .exceeds_host_size;
} }
@ -166,9 +167,9 @@ pub fn storeComptimePtr(
.direct => |direct| .{ direct.val, 0 }, .direct => |direct| .{ direct.val, 0 },
.index => |index| .{ .index => |index| .{
index.val, index.val,
index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(pt), index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(zcu),
}, },
.flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(pt) }, .flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(zcu) },
.reinterpret => |reinterpret| .{ reinterpret.val, reinterpret.byte_offset }, .reinterpret => |reinterpret| .{ reinterpret.val, reinterpret.byte_offset },
else => unreachable, else => unreachable,
}; };
@ -347,8 +348,8 @@ fn loadComptimePtrInner(
const load_one_ty, const load_count = load_ty.arrayBase(zcu); const load_one_ty, const load_count = load_ty.arrayBase(zcu);
const extra_base_index: u64 = if (ptr.byte_offset == 0) 0 else idx: { const extra_base_index: u64 = if (ptr.byte_offset == 0) 0 else idx: {
if (try sema.typeRequiresComptime(load_one_ty)) break :restructure_array; if (try load_one_ty.comptimeOnlySema(pt)) break :restructure_array;
const elem_len = try sema.typeAbiSize(load_one_ty); const elem_len = try load_one_ty.abiSizeSema(pt);
if (ptr.byte_offset % elem_len != 0) break :restructure_array; if (ptr.byte_offset % elem_len != 0) break :restructure_array;
break :idx @divExact(ptr.byte_offset, elem_len); break :idx @divExact(ptr.byte_offset, elem_len);
}; };
@ -394,12 +395,12 @@ fn loadComptimePtrInner(
var cur_offset = ptr.byte_offset; var cur_offset = ptr.byte_offset;
if (load_ty.zigTypeTag(zcu) == .Array and array_offset > 0) { if (load_ty.zigTypeTag(zcu) == .Array and array_offset > 0) {
cur_offset += try sema.typeAbiSize(load_ty.childType(zcu)) * array_offset; cur_offset += try load_ty.childType(zcu).abiSizeSema(pt) * array_offset;
} }
const need_bytes = if (host_bits > 0) (host_bits + 7) / 8 else try sema.typeAbiSize(load_ty); const need_bytes = if (host_bits > 0) (host_bits + 7) / 8 else try load_ty.abiSizeSema(pt);
if (cur_offset + need_bytes > try sema.typeAbiSize(cur_val.typeOf(zcu))) { if (cur_offset + need_bytes > try cur_val.typeOf(zcu).abiSizeSema(pt)) {
return .{ .out_of_bounds = cur_val.typeOf(zcu) }; return .{ .out_of_bounds = cur_val.typeOf(zcu) };
} }
@ -434,7 +435,7 @@ fn loadComptimePtrInner(
.Optional => break, // this can only be a pointer-like optional so is terminal .Optional => break, // this can only be a pointer-like optional so is terminal
.Array => { .Array => {
const elem_ty = cur_ty.childType(zcu); const elem_ty = cur_ty.childType(zcu);
const elem_size = try sema.typeAbiSize(elem_ty); const elem_size = try elem_ty.abiSizeSema(pt);
const elem_idx = cur_offset / elem_size; const elem_idx = cur_offset / elem_size;
const next_elem_off = elem_size * (elem_idx + 1); const next_elem_off = elem_size * (elem_idx + 1);
if (cur_offset + need_bytes <= next_elem_off) { if (cur_offset + need_bytes <= next_elem_off) {
@ -449,8 +450,8 @@ fn loadComptimePtrInner(
.auto => unreachable, // ill-defined layout .auto => unreachable, // ill-defined layout
.@"packed" => break, // let the bitcast logic handle this .@"packed" => break, // let the bitcast logic handle this
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| { .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
const start_off = cur_ty.structFieldOffset(field_idx, pt); const start_off = cur_ty.structFieldOffset(field_idx, zcu);
const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu)); const end_off = start_off + try cur_ty.structFieldType(field_idx, zcu).abiSizeSema(pt);
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) { if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
cur_val = try cur_val.getElem(sema.pt, field_idx); cur_val = try cur_val.getElem(sema.pt, field_idx);
cur_offset -= start_off; cur_offset -= start_off;
@ -477,7 +478,7 @@ fn loadComptimePtrInner(
}; };
// The payload always has offset 0. If it's big enough // The payload always has offset 0. If it's big enough
// to represent the whole load type, we can use it. // to represent the whole load type, we can use it.
if (try sema.typeAbiSize(payload.typeOf(zcu)) >= need_bytes) { if (try payload.typeOf(zcu).abiSizeSema(pt) >= need_bytes) {
cur_val = payload; cur_val = payload;
} else { } else {
break; break;
@ -746,8 +747,8 @@ fn prepareComptimePtrStore(
const store_one_ty, const store_count = store_ty.arrayBase(zcu); const store_one_ty, const store_count = store_ty.arrayBase(zcu);
const extra_base_index: u64 = if (ptr.byte_offset == 0) 0 else idx: { const extra_base_index: u64 = if (ptr.byte_offset == 0) 0 else idx: {
if (try sema.typeRequiresComptime(store_one_ty)) break :restructure_array; if (try store_one_ty.comptimeOnlySema(pt)) break :restructure_array;
const elem_len = try sema.typeAbiSize(store_one_ty); const elem_len = try store_one_ty.abiSizeSema(pt);
if (ptr.byte_offset % elem_len != 0) break :restructure_array; if (ptr.byte_offset % elem_len != 0) break :restructure_array;
break :idx @divExact(ptr.byte_offset, elem_len); break :idx @divExact(ptr.byte_offset, elem_len);
}; };
@ -800,11 +801,11 @@ fn prepareComptimePtrStore(
var cur_val: *MutableValue, var cur_offset: u64 = switch (base_strat) { var cur_val: *MutableValue, var cur_offset: u64 = switch (base_strat) {
.direct => |direct| .{ direct.val, 0 }, .direct => |direct| .{ direct.val, 0 },
// It's okay to do `abiSize` - the comptime-only case will be caught below. // It's okay to do `abiSize` - the comptime-only case will be caught below.
.index => |index| .{ index.val, index.elem_index * try sema.typeAbiSize(index.val.typeOf(zcu).childType(zcu)) }, .index => |index| .{ index.val, index.elem_index * try index.val.typeOf(zcu).childType(zcu).abiSizeSema(pt) },
.flat_index => |flat_index| .{ .flat_index => |flat_index| .{
flat_index.val, flat_index.val,
// It's okay to do `abiSize` - the comptime-only case will be caught below. // It's okay to do `abiSize` - the comptime-only case will be caught below.
flat_index.flat_elem_index * try sema.typeAbiSize(flat_index.val.typeOf(zcu).arrayBase(zcu)[0]), flat_index.flat_elem_index * try flat_index.val.typeOf(zcu).arrayBase(zcu)[0].abiSizeSema(pt),
}, },
.reinterpret => |r| .{ r.val, r.byte_offset }, .reinterpret => |r| .{ r.val, r.byte_offset },
else => unreachable, else => unreachable,
@ -816,12 +817,12 @@ fn prepareComptimePtrStore(
} }
if (store_ty.zigTypeTag(zcu) == .Array and array_offset > 0) { if (store_ty.zigTypeTag(zcu) == .Array and array_offset > 0) {
cur_offset += try sema.typeAbiSize(store_ty.childType(zcu)) * array_offset; cur_offset += try store_ty.childType(zcu).abiSizeSema(pt) * array_offset;
} }
const need_bytes = try sema.typeAbiSize(store_ty); const need_bytes = try store_ty.abiSizeSema(pt);
if (cur_offset + need_bytes > try sema.typeAbiSize(cur_val.typeOf(zcu))) { if (cur_offset + need_bytes > try cur_val.typeOf(zcu).abiSizeSema(pt)) {
return .{ .out_of_bounds = cur_val.typeOf(zcu) }; return .{ .out_of_bounds = cur_val.typeOf(zcu) };
} }
@ -856,7 +857,7 @@ fn prepareComptimePtrStore(
.Optional => break, // this can only be a pointer-like optional so is terminal .Optional => break, // this can only be a pointer-like optional so is terminal
.Array => { .Array => {
const elem_ty = cur_ty.childType(zcu); const elem_ty = cur_ty.childType(zcu);
const elem_size = try sema.typeAbiSize(elem_ty); const elem_size = try elem_ty.abiSizeSema(pt);
const elem_idx = cur_offset / elem_size; const elem_idx = cur_offset / elem_size;
const next_elem_off = elem_size * (elem_idx + 1); const next_elem_off = elem_size * (elem_idx + 1);
if (cur_offset + need_bytes <= next_elem_off) { if (cur_offset + need_bytes <= next_elem_off) {
@ -871,8 +872,8 @@ fn prepareComptimePtrStore(
.auto => unreachable, // ill-defined layout .auto => unreachable, // ill-defined layout
.@"packed" => break, // let the bitcast logic handle this .@"packed" => break, // let the bitcast logic handle this
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| { .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
const start_off = cur_ty.structFieldOffset(field_idx, pt); const start_off = cur_ty.structFieldOffset(field_idx, zcu);
const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu)); const end_off = start_off + try cur_ty.structFieldType(field_idx, zcu).abiSizeSema(pt);
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) { if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
cur_val = try cur_val.elem(pt, sema.arena, field_idx); cur_val = try cur_val.elem(pt, sema.arena, field_idx);
cur_offset -= start_off; cur_offset -= start_off;
@ -895,7 +896,7 @@ fn prepareComptimePtrStore(
}; };
// The payload always has offset 0. If it's big enough // The payload always has offset 0. If it's big enough
// to represent the whole load type, we can use it. // to represent the whole load type, we can use it.
if (try sema.typeAbiSize(payload.typeOf(zcu)) >= need_bytes) { if (try payload.typeOf(zcu).abiSizeSema(pt) >= need_bytes) {
cur_val = payload; cur_val = payload;
} else { } else {
break; break;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -2109,9 +2109,9 @@ pub const CompileError = error{
ComptimeBreak, ComptimeBreak,
}; };
pub fn init(mod: *Zcu, thread_count: usize) !void { pub fn init(zcu: *Zcu, thread_count: usize) !void {
const gpa = mod.gpa; const gpa = zcu.gpa;
try mod.intern_pool.init(gpa, thread_count); try zcu.intern_pool.init(gpa, thread_count);
} }
pub fn deinit(zcu: *Zcu) void { pub fn deinit(zcu: *Zcu) void {
@ -2204,8 +2204,8 @@ pub fn namespacePtr(zcu: *Zcu, index: Namespace.Index) *Namespace {
return zcu.intern_pool.namespacePtr(index); return zcu.intern_pool.namespacePtr(index);
} }
pub fn namespacePtrUnwrap(mod: *Zcu, index: Namespace.OptionalIndex) ?*Namespace { pub fn namespacePtrUnwrap(zcu: *Zcu, index: Namespace.OptionalIndex) ?*Namespace {
return mod.namespacePtr(index.unwrap() orelse return null); return zcu.namespacePtr(index.unwrap() orelse return null);
} }
// TODO https://github.com/ziglang/zig/issues/8643 // TODO https://github.com/ziglang/zig/issues/8643
@ -2682,7 +2682,7 @@ pub fn mapOldZirToNew(
/// ///
/// The caller is responsible for ensuring the function decl itself is already /// The caller is responsible for ensuring the function decl itself is already
/// analyzed, and for ensuring it can exist at runtime (see /// analyzed, and for ensuring it can exist at runtime (see
/// `sema.fnHasRuntimeBits`). This function does *not* guarantee that the body /// `Type.fnHasRuntimeBitsSema`). This function does *not* guarantee that the body
/// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`. /// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`.
pub fn ensureFuncBodyAnalysisQueued(zcu: *Zcu, func_index: InternPool.Index) !void { pub fn ensureFuncBodyAnalysisQueued(zcu: *Zcu, func_index: InternPool.Index) !void {
const ip = &zcu.intern_pool; const ip = &zcu.intern_pool;
@ -2846,16 +2846,16 @@ pub fn errorSetBits(mod: *Zcu) u16 {
} }
pub fn errNote( pub fn errNote(
mod: *Zcu, zcu: *Zcu,
src_loc: LazySrcLoc, src_loc: LazySrcLoc,
parent: *ErrorMsg, parent: *ErrorMsg,
comptime format: []const u8, comptime format: []const u8,
args: anytype, args: anytype,
) error{OutOfMemory}!void { ) error{OutOfMemory}!void {
const msg = try std.fmt.allocPrint(mod.gpa, format, args); const msg = try std.fmt.allocPrint(zcu.gpa, format, args);
errdefer mod.gpa.free(msg); errdefer zcu.gpa.free(msg);
parent.notes = try mod.gpa.realloc(parent.notes, parent.notes.len + 1); parent.notes = try zcu.gpa.realloc(parent.notes, parent.notes.len + 1);
parent.notes[parent.notes.len - 1] = .{ parent.notes[parent.notes.len - 1] = .{
.src_loc = src_loc, .src_loc = src_loc,
.msg = msg, .msg = msg,
@ -2876,14 +2876,14 @@ pub fn optimizeMode(zcu: *const Zcu) std.builtin.OptimizeMode {
return zcu.root_mod.optimize_mode; return zcu.root_mod.optimize_mode;
} }
fn lockAndClearFileCompileError(mod: *Zcu, file: *File) void { fn lockAndClearFileCompileError(zcu: *Zcu, file: *File) void {
switch (file.status) { switch (file.status) {
.success_zir, .retryable_failure => {}, .success_zir, .retryable_failure => {},
.never_loaded, .parse_failure, .astgen_failure => { .never_loaded, .parse_failure, .astgen_failure => {
mod.comp.mutex.lock(); zcu.comp.mutex.lock();
defer mod.comp.mutex.unlock(); defer zcu.comp.mutex.unlock();
if (mod.failed_files.fetchSwapRemove(file)) |kv| { if (zcu.failed_files.fetchSwapRemove(file)) |kv| {
if (kv.value) |msg| msg.destroy(mod.gpa); // Delete previous error message. if (kv.value) |msg| msg.destroy(zcu.gpa); // Delete previous error message.
} }
}, },
} }
@ -2965,11 +2965,11 @@ pub const AtomicPtrAlignmentDiagnostics = struct {
// TODO this function does not take into account CPU features, which can affect // TODO this function does not take into account CPU features, which can affect
// this value. Audit this! // this value. Audit this!
pub fn atomicPtrAlignment( pub fn atomicPtrAlignment(
mod: *Zcu, zcu: *Zcu,
ty: Type, ty: Type,
diags: *AtomicPtrAlignmentDiagnostics, diags: *AtomicPtrAlignmentDiagnostics,
) AtomicPtrAlignmentError!Alignment { ) AtomicPtrAlignmentError!Alignment {
const target = mod.getTarget(); const target = zcu.getTarget();
const max_atomic_bits: u16 = switch (target.cpu.arch) { const max_atomic_bits: u16 = switch (target.cpu.arch) {
.avr, .avr,
.msp430, .msp430,
@ -3039,8 +3039,8 @@ pub fn atomicPtrAlignment(
} }
return .none; return .none;
} }
if (ty.isAbiInt(mod)) { if (ty.isAbiInt(zcu)) {
const bit_count = ty.intInfo(mod).bits; const bit_count = ty.intInfo(zcu).bits;
if (bit_count > max_atomic_bits) { if (bit_count > max_atomic_bits) {
diags.* = .{ diags.* = .{
.bits = bit_count, .bits = bit_count,
@ -3050,7 +3050,7 @@ pub fn atomicPtrAlignment(
} }
return .none; return .none;
} }
if (ty.isPtrAtRuntime(mod)) return .none; if (ty.isPtrAtRuntime(zcu)) return .none;
return error.BadType; return error.BadType;
} }
@ -3058,45 +3058,45 @@ pub fn atomicPtrAlignment(
/// * `@TypeOf(.{})` /// * `@TypeOf(.{})`
/// * A struct which has no fields (`struct {}`). /// * A struct which has no fields (`struct {}`).
/// * Not a struct. /// * Not a struct.
pub fn typeToStruct(mod: *Zcu, ty: Type) ?InternPool.LoadedStructType { pub fn typeToStruct(zcu: *Zcu, ty: Type) ?InternPool.LoadedStructType {
if (ty.ip_index == .none) return null; if (ty.ip_index == .none) return null;
const ip = &mod.intern_pool; const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.ip_index)) { return switch (ip.indexToKey(ty.ip_index)) {
.struct_type => ip.loadStructType(ty.ip_index), .struct_type => ip.loadStructType(ty.ip_index),
else => null, else => null,
}; };
} }
pub fn typeToPackedStruct(mod: *Zcu, ty: Type) ?InternPool.LoadedStructType { pub fn typeToPackedStruct(zcu: *Zcu, ty: Type) ?InternPool.LoadedStructType {
const s = mod.typeToStruct(ty) orelse return null; const s = zcu.typeToStruct(ty) orelse return null;
if (s.layout != .@"packed") return null; if (s.layout != .@"packed") return null;
return s; return s;
} }
pub fn typeToUnion(mod: *Zcu, ty: Type) ?InternPool.LoadedUnionType { pub fn typeToUnion(zcu: *const Zcu, ty: Type) ?InternPool.LoadedUnionType {
if (ty.ip_index == .none) return null; if (ty.ip_index == .none) return null;
const ip = &mod.intern_pool; const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.ip_index)) { return switch (ip.indexToKey(ty.ip_index)) {
.union_type => ip.loadUnionType(ty.ip_index), .union_type => ip.loadUnionType(ty.ip_index),
else => null, else => null,
}; };
} }
pub fn typeToFunc(mod: *Zcu, ty: Type) ?InternPool.Key.FuncType { pub fn typeToFunc(zcu: *const Zcu, ty: Type) ?InternPool.Key.FuncType {
if (ty.ip_index == .none) return null; if (ty.ip_index == .none) return null;
return mod.intern_pool.indexToFuncType(ty.toIntern()); return zcu.intern_pool.indexToFuncType(ty.toIntern());
} }
pub fn iesFuncIndex(zcu: *const Zcu, ies_index: InternPool.Index) InternPool.Index { pub fn iesFuncIndex(zcu: *const Zcu, ies_index: InternPool.Index) InternPool.Index {
return zcu.intern_pool.iesFuncIndex(ies_index); return zcu.intern_pool.iesFuncIndex(ies_index);
} }
pub fn funcInfo(mod: *Zcu, func_index: InternPool.Index) InternPool.Key.Func { pub fn funcInfo(zcu: *const Zcu, func_index: InternPool.Index) InternPool.Key.Func {
return mod.intern_pool.indexToKey(func_index).func; return zcu.intern_pool.indexToKey(func_index).func;
} }
pub fn toEnum(mod: *Zcu, comptime E: type, val: Value) E { pub fn toEnum(zcu: *const Zcu, comptime E: type, val: Value) E {
return mod.intern_pool.toEnum(E, val.toIntern()); return zcu.intern_pool.toEnum(E, val.toIntern());
} }
pub const UnionLayout = struct { pub const UnionLayout = struct {
@ -3121,8 +3121,8 @@ pub const UnionLayout = struct {
}; };
/// Returns the index of the active field, given the current tag value /// Returns the index of the active field, given the current tag value
pub fn unionTagFieldIndex(mod: *Zcu, loaded_union: InternPool.LoadedUnionType, enum_tag: Value) ?u32 { pub fn unionTagFieldIndex(zcu: *const Zcu, loaded_union: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
const ip = &mod.intern_pool; const ip = &zcu.intern_pool;
if (enum_tag.toIntern() == .none) return null; if (enum_tag.toIntern() == .none) return null;
assert(ip.typeOf(enum_tag.toIntern()) == loaded_union.enum_tag_ty); assert(ip.typeOf(enum_tag.toIntern()) == loaded_union.enum_tag_ty);
return loaded_union.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern()); return loaded_union.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern());
@ -3348,7 +3348,7 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve
return result; return result;
} }
pub fn fileByIndex(zcu: *Zcu, file_index: File.Index) *File { pub fn fileByIndex(zcu: *const Zcu, file_index: File.Index) *File {
return zcu.intern_pool.filePtr(file_index); return zcu.intern_pool.filePtr(file_index);
} }

View File

@ -2756,7 +2756,7 @@ pub fn ptrType(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Allocator.Error!
// pointee type needs to be resolved more, that needs to be done before calling // pointee type needs to be resolved more, that needs to be done before calling
// this ptr() function. // this ptr() function.
if (info.flags.alignment != .none and if (info.flags.alignment != .none and
info.flags.alignment == Type.fromInterned(info.child).abiAlignment(pt)) info.flags.alignment == Type.fromInterned(info.child).abiAlignment(pt.zcu))
{ {
canon_info.flags.alignment = .none; canon_info.flags.alignment = .none;
} }
@ -2766,7 +2766,7 @@ pub fn ptrType(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Allocator.Error!
// we change it to 0 here. If this causes an assertion trip, the pointee type // we change it to 0 here. If this causes an assertion trip, the pointee type
// needs to be resolved before calling this ptr() function. // needs to be resolved before calling this ptr() function.
.none => if (info.packed_offset.host_size != 0) { .none => if (info.packed_offset.host_size != 0) {
const elem_bit_size = Type.fromInterned(info.child).bitSize(pt); const elem_bit_size = Type.fromInterned(info.child).bitSize(pt.zcu);
assert(info.packed_offset.bit_offset + elem_bit_size <= info.packed_offset.host_size * 8); assert(info.packed_offset.bit_offset + elem_bit_size <= info.packed_offset.host_size * 8);
if (info.packed_offset.host_size * 8 == elem_bit_size) { if (info.packed_offset.host_size * 8 == elem_bit_size) {
canon_info.packed_offset.host_size = 0; canon_info.packed_offset.host_size = 0;
@ -2784,7 +2784,7 @@ pub fn ptrType(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Allocator.Error!
/// In general, prefer this function during semantic analysis. /// In general, prefer this function during semantic analysis.
pub fn ptrTypeSema(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Zcu.SemaError!Type { pub fn ptrTypeSema(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Zcu.SemaError!Type {
if (info.flags.alignment != .none) { if (info.flags.alignment != .none) {
_ = try Type.fromInterned(info.child).abiAlignmentAdvanced(pt, .sema); _ = try Type.fromInterned(info.child).abiAlignmentSema(pt);
} }
return pt.ptrType(info); return pt.ptrType(info);
} }
@ -2984,15 +2984,15 @@ pub fn smallestUnsignedInt(pt: Zcu.PerThread, max: u64) Allocator.Error!Type {
/// `max`. Asserts that neither value is undef. /// `max`. Asserts that neither value is undef.
/// TODO: if #3806 is implemented, this becomes trivial /// TODO: if #3806 is implemented, this becomes trivial
pub fn intFittingRange(pt: Zcu.PerThread, min: Value, max: Value) !Type { pub fn intFittingRange(pt: Zcu.PerThread, min: Value, max: Value) !Type {
const mod = pt.zcu; const zcu = pt.zcu;
assert(!min.isUndef(mod)); assert(!min.isUndef(zcu));
assert(!max.isUndef(mod)); assert(!max.isUndef(zcu));
if (std.debug.runtime_safety) { if (std.debug.runtime_safety) {
assert(Value.order(min, max, pt).compare(.lte)); assert(Value.order(min, max, zcu).compare(.lte));
} }
const sign = min.orderAgainstZero(pt) == .lt; const sign = min.orderAgainstZero(zcu) == .lt;
const min_val_bits = pt.intBitsForValue(min, sign); const min_val_bits = pt.intBitsForValue(min, sign);
const max_val_bits = pt.intBitsForValue(max, sign); const max_val_bits = pt.intBitsForValue(max, sign);
@ -3032,120 +3032,30 @@ pub fn intBitsForValue(pt: Zcu.PerThread, val: Value, sign: bool) u16 {
return @as(u16, @intCast(big.bitCountTwosComp())); return @as(u16, @intCast(big.bitCountTwosComp()));
}, },
.lazy_align => |lazy_ty| { .lazy_align => |lazy_ty| {
return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(pt).toByteUnits() orelse 0) + @intFromBool(sign); return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(pt.zcu).toByteUnits() orelse 0) + @intFromBool(sign);
}, },
.lazy_size => |lazy_ty| { .lazy_size => |lazy_ty| {
return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiSize(pt)) + @intFromBool(sign); return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiSize(pt.zcu)) + @intFromBool(sign);
}, },
} }
} }
pub fn getUnionLayout(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionType) Zcu.UnionLayout {
const mod = pt.zcu;
const ip = &mod.intern_pool;
assert(loaded_union.haveLayout(ip));
var most_aligned_field: u32 = undefined;
var most_aligned_field_size: u64 = undefined;
var biggest_field: u32 = undefined;
var payload_size: u64 = 0;
var payload_align: InternPool.Alignment = .@"1";
for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| {
if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
const explicit_align = loaded_union.fieldAlign(ip, field_index);
const field_align = if (explicit_align != .none)
explicit_align
else
Type.fromInterned(field_ty).abiAlignment(pt);
const field_size = Type.fromInterned(field_ty).abiSize(pt);
if (field_size > payload_size) {
payload_size = field_size;
biggest_field = @intCast(field_index);
}
if (field_align.compare(.gte, payload_align)) {
payload_align = field_align;
most_aligned_field = @intCast(field_index);
most_aligned_field_size = field_size;
}
}
const have_tag = loaded_union.flagsUnordered(ip).runtime_tag.hasTag();
if (!have_tag or !Type.fromInterned(loaded_union.enum_tag_ty).hasRuntimeBits(pt)) {
return .{
.abi_size = payload_align.forward(payload_size),
.abi_align = payload_align,
.most_aligned_field = most_aligned_field,
.most_aligned_field_size = most_aligned_field_size,
.biggest_field = biggest_field,
.payload_size = payload_size,
.payload_align = payload_align,
.tag_align = .none,
.tag_size = 0,
.padding = 0,
};
}
const tag_size = Type.fromInterned(loaded_union.enum_tag_ty).abiSize(pt);
const tag_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(pt).max(.@"1");
return .{
.abi_size = loaded_union.sizeUnordered(ip),
.abi_align = tag_align.max(payload_align),
.most_aligned_field = most_aligned_field,
.most_aligned_field_size = most_aligned_field_size,
.biggest_field = biggest_field,
.payload_size = payload_size,
.payload_align = payload_align,
.tag_align = tag_align,
.tag_size = tag_size,
.padding = loaded_union.paddingUnordered(ip),
};
}
pub fn unionAbiSize(mod: *Module, loaded_union: InternPool.LoadedUnionType) u64 {
return mod.getUnionLayout(loaded_union).abi_size;
}
/// Returns 0 if the union is represented with 0 bits at runtime. /// Returns 0 if the union is represented with 0 bits at runtime.
pub fn unionAbiAlignment(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionType) InternPool.Alignment { pub fn unionAbiAlignment(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionType) InternPool.Alignment {
const mod = pt.zcu; const zcu = pt.zcu;
const ip = &mod.intern_pool; const ip = &zcu.intern_pool;
const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag(); const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag();
var max_align: InternPool.Alignment = .none; var max_align: InternPool.Alignment = .none;
if (have_tag) max_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(pt); if (have_tag) max_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(zcu);
for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| { for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| {
if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; if (!Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
const field_align = mod.unionFieldNormalAlignment(loaded_union, @intCast(field_index)); const field_align = zcu.unionFieldNormalAlignment(loaded_union, @intCast(field_index));
max_align = max_align.max(field_align); max_align = max_align.max(field_align);
} }
return max_align; return max_align;
} }
/// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
pub fn unionFieldNormalAlignment(
pt: Zcu.PerThread,
loaded_union: InternPool.LoadedUnionType,
field_index: u32,
) InternPool.Alignment {
return pt.unionFieldNormalAlignmentAdvanced(loaded_union, field_index, .normal) catch unreachable;
}
/// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
/// If `strat` is `.sema`, may perform type resolution.
pub fn unionFieldNormalAlignmentAdvanced(
pt: Zcu.PerThread,
loaded_union: InternPool.LoadedUnionType,
field_index: u32,
comptime strat: Type.ResolveStrat,
) Zcu.SemaError!InternPool.Alignment {
const ip = &pt.zcu.intern_pool;
assert(loaded_union.flagsUnordered(ip).layout != .@"packed");
const field_align = loaded_union.fieldAlign(ip, field_index);
if (field_align != .none) return field_align;
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
if (field_ty.isNoReturn(pt.zcu)) return .none;
return (try field_ty.abiAlignmentAdvanced(pt, strat.toLazy())).scalar;
}
/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed. /// Returns the field alignment of a non-packed struct. Asserts the layout is not packed.
pub fn structFieldAlignment( pub fn structFieldAlignment(
pt: Zcu.PerThread, pt: Zcu.PerThread,
@ -3153,31 +3063,13 @@ pub fn structFieldAlignment(
field_ty: Type, field_ty: Type,
layout: std.builtin.Type.ContainerLayout, layout: std.builtin.Type.ContainerLayout,
) InternPool.Alignment { ) InternPool.Alignment {
return pt.structFieldAlignmentAdvanced(explicit_alignment, field_ty, layout, .normal) catch unreachable; return field_ty.structFieldAlignmentAdvanced(
} explicit_alignment,
layout,
/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed. .normal,
/// If `strat` is `.sema`, may perform type resolution. pt.zcu,
pub fn structFieldAlignmentAdvanced( {},
pt: Zcu.PerThread, ) catch unreachable;
explicit_alignment: InternPool.Alignment,
field_ty: Type,
layout: std.builtin.Type.ContainerLayout,
comptime strat: Type.ResolveStrat,
) Zcu.SemaError!InternPool.Alignment {
assert(layout != .@"packed");
if (explicit_alignment != .none) return explicit_alignment;
const ty_abi_align = (try field_ty.abiAlignmentAdvanced(pt, strat.toLazy())).scalar;
switch (layout) {
.@"packed" => unreachable,
.auto => if (pt.zcu.getTarget().ofmt != .c) return ty_abi_align,
.@"extern" => {},
}
// extern
if (field_ty.isAbiInt(pt.zcu) and field_ty.intInfo(pt.zcu).bits >= 128) {
return ty_abi_align.maxStrict(.@"16");
}
return ty_abi_align;
} }
/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets /// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets
@ -3189,8 +3081,8 @@ pub fn structPackedFieldBitOffset(
struct_type: InternPool.LoadedStructType, struct_type: InternPool.LoadedStructType,
field_index: u32, field_index: u32,
) u16 { ) u16 {
const mod = pt.zcu; const zcu = pt.zcu;
const ip = &mod.intern_pool; const ip = &zcu.intern_pool;
assert(struct_type.layout == .@"packed"); assert(struct_type.layout == .@"packed");
assert(struct_type.haveLayout(ip)); assert(struct_type.haveLayout(ip));
var bit_sum: u64 = 0; var bit_sum: u64 = 0;
@ -3199,7 +3091,7 @@ pub fn structPackedFieldBitOffset(
return @intCast(bit_sum); return @intCast(bit_sum);
} }
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
bit_sum += field_ty.bitSize(pt); bit_sum += field_ty.bitSize(zcu);
} }
unreachable; // index out of bounds unreachable; // index out of bounds
} }
@ -3244,7 +3136,7 @@ pub fn navPtrType(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) Allocator.
return pt.ptrType(.{ return pt.ptrType(.{
.child = ty.toIntern(), .child = ty.toIntern(),
.flags = .{ .flags = .{
.alignment = if (r.alignment == ty.abiAlignment(pt)) .alignment = if (r.alignment == ty.abiAlignment(zcu))
.none .none
else else
r.alignment, r.alignment,
@ -3274,7 +3166,7 @@ pub fn navAlignment(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) InternPo
const zcu = pt.zcu; const zcu = pt.zcu;
const r = zcu.intern_pool.getNav(nav_index).status.resolved; const r = zcu.intern_pool.getNav(nav_index).status.resolved;
if (r.alignment != .none) return r.alignment; if (r.alignment != .none) return r.alignment;
return Value.fromInterned(r.val).typeOf(zcu).abiAlignment(pt); return Value.fromInterned(r.val).typeOf(zcu).abiAlignment(zcu);
} }
/// Given a container type requiring resolution, ensures that it is up-to-date. /// Given a container type requiring resolution, ensures that it is up-to-date.

File diff suppressed because it is too large Load Diff

View File

@ -15,44 +15,44 @@ pub const Class = union(enum) {
}; };
/// For `float_array` the second element will be the amount of floats. /// For `float_array` the second element will be the amount of floats.
pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class { pub fn classifyType(ty: Type, zcu: *Zcu) Class {
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(pt)); std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
var maybe_float_bits: ?u16 = null; var maybe_float_bits: ?u16 = null;
switch (ty.zigTypeTag(pt.zcu)) { switch (ty.zigTypeTag(zcu)) {
.Struct => { .Struct => {
if (ty.containerLayout(pt.zcu) == .@"packed") return .byval; if (ty.containerLayout(zcu) == .@"packed") return .byval;
const float_count = countFloats(ty, pt.zcu, &maybe_float_bits); const float_count = countFloats(ty, zcu, &maybe_float_bits);
if (float_count <= sret_float_count) return .{ .float_array = float_count }; if (float_count <= sret_float_count) return .{ .float_array = float_count };
const bit_size = ty.bitSize(pt); const bit_size = ty.bitSize(zcu);
if (bit_size > 128) return .memory; if (bit_size > 128) return .memory;
if (bit_size > 64) return .double_integer; if (bit_size > 64) return .double_integer;
return .integer; return .integer;
}, },
.Union => { .Union => {
if (ty.containerLayout(pt.zcu) == .@"packed") return .byval; if (ty.containerLayout(zcu) == .@"packed") return .byval;
const float_count = countFloats(ty, pt.zcu, &maybe_float_bits); const float_count = countFloats(ty, zcu, &maybe_float_bits);
if (float_count <= sret_float_count) return .{ .float_array = float_count }; if (float_count <= sret_float_count) return .{ .float_array = float_count };
const bit_size = ty.bitSize(pt); const bit_size = ty.bitSize(zcu);
if (bit_size > 128) return .memory; if (bit_size > 128) return .memory;
if (bit_size > 64) return .double_integer; if (bit_size > 64) return .double_integer;
return .integer; return .integer;
}, },
.Int, .Enum, .ErrorSet, .Float, .Bool => return .byval, .Int, .Enum, .ErrorSet, .Float, .Bool => return .byval,
.Vector => { .Vector => {
const bit_size = ty.bitSize(pt); const bit_size = ty.bitSize(zcu);
// TODO is this controlled by a cpu feature? // TODO is this controlled by a cpu feature?
if (bit_size > 128) return .memory; if (bit_size > 128) return .memory;
return .byval; return .byval;
}, },
.Optional => { .Optional => {
std.debug.assert(ty.isPtrLikeOptional(pt.zcu)); std.debug.assert(ty.isPtrLikeOptional(zcu));
return .byval; return .byval;
}, },
.Pointer => { .Pointer => {
std.debug.assert(!ty.isSlice(pt.zcu)); std.debug.assert(!ty.isSlice(zcu));
return .byval; return .byval;
}, },
.ErrorUnion, .ErrorUnion,

File diff suppressed because it is too large Load Diff

View File

@ -24,29 +24,29 @@ pub const Class = union(enum) {
pub const Context = enum { ret, arg }; pub const Context = enum { ret, arg };
pub fn classifyType(ty: Type, pt: Zcu.PerThread, ctx: Context) Class { pub fn classifyType(ty: Type, zcu: *Zcu, ctx: Context) Class {
assert(ty.hasRuntimeBitsIgnoreComptime(pt)); assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
var maybe_float_bits: ?u16 = null; var maybe_float_bits: ?u16 = null;
const max_byval_size = 512; const max_byval_size = 512;
const ip = &pt.zcu.intern_pool; const ip = &zcu.intern_pool;
switch (ty.zigTypeTag(pt.zcu)) { switch (ty.zigTypeTag(zcu)) {
.Struct => { .Struct => {
const bit_size = ty.bitSize(pt); const bit_size = ty.bitSize(zcu);
if (ty.containerLayout(pt.zcu) == .@"packed") { if (ty.containerLayout(zcu) == .@"packed") {
if (bit_size > 64) return .memory; if (bit_size > 64) return .memory;
return .byval; return .byval;
} }
if (bit_size > max_byval_size) return .memory; if (bit_size > max_byval_size) return .memory;
const float_count = countFloats(ty, pt.zcu, &maybe_float_bits); const float_count = countFloats(ty, zcu, &maybe_float_bits);
if (float_count <= byval_float_count) return .byval; if (float_count <= byval_float_count) return .byval;
const fields = ty.structFieldCount(pt.zcu); const fields = ty.structFieldCount(zcu);
var i: u32 = 0; var i: u32 = 0;
while (i < fields) : (i += 1) { while (i < fields) : (i += 1) {
const field_ty = ty.structFieldType(i, pt.zcu); const field_ty = ty.structFieldType(i, zcu);
const field_alignment = ty.structFieldAlign(i, pt); const field_alignment = ty.structFieldAlign(i, zcu);
const field_size = field_ty.bitSize(pt); const field_size = field_ty.bitSize(zcu);
if (field_size > 32 or field_alignment.compare(.gt, .@"32")) { if (field_size > 32 or field_alignment.compare(.gt, .@"32")) {
return Class.arrSize(bit_size, 64); return Class.arrSize(bit_size, 64);
} }
@ -54,19 +54,19 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread, ctx: Context) Class {
return Class.arrSize(bit_size, 32); return Class.arrSize(bit_size, 32);
}, },
.Union => { .Union => {
const bit_size = ty.bitSize(pt); const bit_size = ty.bitSize(zcu);
const union_obj = pt.zcu.typeToUnion(ty).?; const union_obj = zcu.typeToUnion(ty).?;
if (union_obj.flagsUnordered(ip).layout == .@"packed") { if (union_obj.flagsUnordered(ip).layout == .@"packed") {
if (bit_size > 64) return .memory; if (bit_size > 64) return .memory;
return .byval; return .byval;
} }
if (bit_size > max_byval_size) return .memory; if (bit_size > max_byval_size) return .memory;
const float_count = countFloats(ty, pt.zcu, &maybe_float_bits); const float_count = countFloats(ty, zcu, &maybe_float_bits);
if (float_count <= byval_float_count) return .byval; if (float_count <= byval_float_count) return .byval;
for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| { for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
if (Type.fromInterned(field_ty).bitSize(pt) > 32 or if (Type.fromInterned(field_ty).bitSize(zcu) > 32 or
pt.unionFieldNormalAlignment(union_obj, @intCast(field_index)).compare(.gt, .@"32")) Type.unionFieldNormalAlignment(union_obj, @intCast(field_index), zcu).compare(.gt, .@"32"))
{ {
return Class.arrSize(bit_size, 64); return Class.arrSize(bit_size, 64);
} }
@ -77,28 +77,28 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread, ctx: Context) Class {
.Int => { .Int => {
// TODO this is incorrect for _BitInt(128) but implementing // TODO this is incorrect for _BitInt(128) but implementing
// this correctly makes implementing compiler-rt impossible. // this correctly makes implementing compiler-rt impossible.
// const bit_size = ty.bitSize(pt); // const bit_size = ty.bitSize(zcu);
// if (bit_size > 64) return .memory; // if (bit_size > 64) return .memory;
return .byval; return .byval;
}, },
.Enum, .ErrorSet => { .Enum, .ErrorSet => {
const bit_size = ty.bitSize(pt); const bit_size = ty.bitSize(zcu);
if (bit_size > 64) return .memory; if (bit_size > 64) return .memory;
return .byval; return .byval;
}, },
.Vector => { .Vector => {
const bit_size = ty.bitSize(pt); const bit_size = ty.bitSize(zcu);
// TODO is this controlled by a cpu feature? // TODO is this controlled by a cpu feature?
if (ctx == .ret and bit_size > 128) return .memory; if (ctx == .ret and bit_size > 128) return .memory;
if (bit_size > 512) return .memory; if (bit_size > 512) return .memory;
return .byval; return .byval;
}, },
.Optional => { .Optional => {
assert(ty.isPtrLikeOptional(pt.zcu)); assert(ty.isPtrLikeOptional(zcu));
return .byval; return .byval;
}, },
.Pointer => { .Pointer => {
assert(!ty.isSlice(pt.zcu)); assert(!ty.isSlice(zcu));
return .byval; return .byval;
}, },
.ErrorUnion, .ErrorUnion,

File diff suppressed because it is too large Load Diff

View File

@ -49,6 +49,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index, options: struct {
relocs: []const Reloc, relocs: []const Reloc,
} { } {
const pt = lower.pt; const pt = lower.pt;
const zcu = pt.zcu;
lower.result_insts = undefined; lower.result_insts = undefined;
lower.result_relocs = undefined; lower.result_relocs = undefined;
@ -308,11 +309,11 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index, options: struct {
const class = rs1.class(); const class = rs1.class();
const ty = compare.ty; const ty = compare.ty;
const size = std.math.ceilPowerOfTwo(u64, ty.bitSize(pt)) catch { const size = std.math.ceilPowerOfTwo(u64, ty.bitSize(zcu)) catch {
return lower.fail("pseudo_compare size {}", .{ty.bitSize(pt)}); return lower.fail("pseudo_compare size {}", .{ty.bitSize(zcu)});
}; };
const is_unsigned = ty.isUnsignedInt(pt.zcu); const is_unsigned = ty.isUnsignedInt(zcu);
const less_than: Mnemonic = if (is_unsigned) .sltu else .slt; const less_than: Mnemonic = if (is_unsigned) .sltu else .slt;
switch (class) { switch (class) {

View File

@ -9,15 +9,15 @@ const assert = std.debug.assert;
pub const Class = enum { memory, byval, integer, double_integer, fields }; pub const Class = enum { memory, byval, integer, double_integer, fields };
pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class { pub fn classifyType(ty: Type, zcu: *Zcu) Class {
const target = pt.zcu.getTarget(); const target = zcu.getTarget();
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(pt)); std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
const max_byval_size = target.ptrBitWidth() * 2; const max_byval_size = target.ptrBitWidth() * 2;
switch (ty.zigTypeTag(pt.zcu)) { switch (ty.zigTypeTag(zcu)) {
.Struct => { .Struct => {
const bit_size = ty.bitSize(pt); const bit_size = ty.bitSize(zcu);
if (ty.containerLayout(pt.zcu) == .@"packed") { if (ty.containerLayout(zcu) == .@"packed") {
if (bit_size > max_byval_size) return .memory; if (bit_size > max_byval_size) return .memory;
return .byval; return .byval;
} }
@ -25,12 +25,12 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class {
if (std.Target.riscv.featureSetHas(target.cpu.features, .d)) fields: { if (std.Target.riscv.featureSetHas(target.cpu.features, .d)) fields: {
var any_fp = false; var any_fp = false;
var field_count: usize = 0; var field_count: usize = 0;
for (0..ty.structFieldCount(pt.zcu)) |field_index| { for (0..ty.structFieldCount(zcu)) |field_index| {
const field_ty = ty.structFieldType(field_index, pt.zcu); const field_ty = ty.structFieldType(field_index, zcu);
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (field_ty.isRuntimeFloat()) if (field_ty.isRuntimeFloat())
any_fp = true any_fp = true
else if (!field_ty.isAbiInt(pt.zcu)) else if (!field_ty.isAbiInt(zcu))
break :fields; break :fields;
field_count += 1; field_count += 1;
if (field_count > 2) break :fields; if (field_count > 2) break :fields;
@ -45,8 +45,8 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class {
return .integer; return .integer;
}, },
.Union => { .Union => {
const bit_size = ty.bitSize(pt); const bit_size = ty.bitSize(zcu);
if (ty.containerLayout(pt.zcu) == .@"packed") { if (ty.containerLayout(zcu) == .@"packed") {
if (bit_size > max_byval_size) return .memory; if (bit_size > max_byval_size) return .memory;
return .byval; return .byval;
} }
@ -58,21 +58,21 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class {
.Bool => return .integer, .Bool => return .integer,
.Float => return .byval, .Float => return .byval,
.Int, .Enum, .ErrorSet => { .Int, .Enum, .ErrorSet => {
const bit_size = ty.bitSize(pt); const bit_size = ty.bitSize(zcu);
if (bit_size > max_byval_size) return .memory; if (bit_size > max_byval_size) return .memory;
return .byval; return .byval;
}, },
.Vector => { .Vector => {
const bit_size = ty.bitSize(pt); const bit_size = ty.bitSize(zcu);
if (bit_size > max_byval_size) return .memory; if (bit_size > max_byval_size) return .memory;
return .integer; return .integer;
}, },
.Optional => { .Optional => {
std.debug.assert(ty.isPtrLikeOptional(pt.zcu)); std.debug.assert(ty.isPtrLikeOptional(zcu));
return .byval; return .byval;
}, },
.Pointer => { .Pointer => {
std.debug.assert(!ty.isSlice(pt.zcu)); std.debug.assert(!ty.isSlice(zcu));
return .byval; return .byval;
}, },
.ErrorUnion, .ErrorUnion,
@ -97,19 +97,18 @@ pub const SystemClass = enum { integer, float, memory, none };
/// There are a maximum of 8 possible return slots. Returned values are in /// There are a maximum of 8 possible return slots. Returned values are in
/// the beginning of the array; unused slots are filled with .none. /// the beginning of the array; unused slots are filled with .none.
pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass { pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
const zcu = pt.zcu;
var result = [1]SystemClass{.none} ** 8; var result = [1]SystemClass{.none} ** 8;
const memory_class = [_]SystemClass{ const memory_class = [_]SystemClass{
.memory, .none, .none, .none, .memory, .none, .none, .none,
.none, .none, .none, .none, .none, .none, .none, .none,
}; };
switch (ty.zigTypeTag(pt.zcu)) { switch (ty.zigTypeTag(zcu)) {
.Bool, .Void, .NoReturn => { .Bool, .Void, .NoReturn => {
result[0] = .integer; result[0] = .integer;
return result; return result;
}, },
.Pointer => switch (ty.ptrSize(pt.zcu)) { .Pointer => switch (ty.ptrSize(zcu)) {
.Slice => { .Slice => {
result[0] = .integer; result[0] = .integer;
result[1] = .integer; result[1] = .integer;
@ -121,14 +120,14 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
}, },
}, },
.Optional => { .Optional => {
if (ty.isPtrLikeOptional(pt.zcu)) { if (ty.isPtrLikeOptional(zcu)) {
result[0] = .integer; result[0] = .integer;
return result; return result;
} }
return memory_class; return memory_class;
}, },
.Int, .Enum, .ErrorSet => { .Int, .Enum, .ErrorSet => {
const int_bits = ty.intInfo(pt.zcu).bits; const int_bits = ty.intInfo(zcu).bits;
if (int_bits <= 64) { if (int_bits <= 64) {
result[0] = .integer; result[0] = .integer;
return result; return result;
@ -153,8 +152,8 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
unreachable; // support split float args unreachable; // support split float args
}, },
.ErrorUnion => { .ErrorUnion => {
const payload_ty = ty.errorUnionPayload(pt.zcu); const payload_ty = ty.errorUnionPayload(zcu);
const payload_bits = payload_ty.bitSize(pt); const payload_bits = payload_ty.bitSize(zcu);
// the error union itself // the error union itself
result[0] = .integer; result[0] = .integer;
@ -165,8 +164,8 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
return memory_class; return memory_class;
}, },
.Struct, .Union => { .Struct, .Union => {
const layout = ty.containerLayout(pt.zcu); const layout = ty.containerLayout(zcu);
const ty_size = ty.abiSize(pt); const ty_size = ty.abiSize(zcu);
if (layout == .@"packed") { if (layout == .@"packed") {
assert(ty_size <= 16); assert(ty_size <= 16);
@ -178,7 +177,7 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
return memory_class; return memory_class;
}, },
.Array => { .Array => {
const ty_size = ty.abiSize(pt); const ty_size = ty.abiSize(zcu);
if (ty_size <= 8) { if (ty_size <= 8) {
result[0] = .integer; result[0] = .integer;
return result; return result;
@ -192,7 +191,7 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
}, },
.Vector => { .Vector => {
// we pass vectors through integer registers if they are small enough to fit. // we pass vectors through integer registers if they are small enough to fit.
const vec_bits = ty.totalVectorBits(pt); const vec_bits = ty.totalVectorBits(zcu);
if (vec_bits <= 64) { if (vec_bits <= 64) {
result[0] = .integer; result[0] = .integer;
return result; return result;

View File

@ -1012,6 +1012,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
fn airArg(self: *Self, inst: Air.Inst.Index) !void { fn airArg(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt; const pt = self.pt;
const zcu = pt.zcu;
const arg_index = self.arg_index; const arg_index = self.arg_index;
self.arg_index += 1; self.arg_index += 1;
@ -1021,7 +1022,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
const mcv = blk: { const mcv = blk: {
switch (arg) { switch (arg) {
.stack_offset => |off| { .stack_offset => |off| {
const abi_size = math.cast(u32, ty.abiSize(pt)) orelse { const abi_size = math.cast(u32, ty.abiSize(zcu)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(pt)}); return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(pt)});
}; };
const offset = off + abi_size; const offset = off + abi_size;
@ -1211,7 +1212,7 @@ fn airBreakpoint(self: *Self) !void {
fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt; const pt = self.pt;
const mod = pt.zcu; const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
// We have hardware byteswapper in SPARCv9, don't let mainstream compilers mislead you. // We have hardware byteswapper in SPARCv9, don't let mainstream compilers mislead you.
@ -1227,14 +1228,14 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand); const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand);
switch (operand_ty.zigTypeTag(mod)) { switch (operand_ty.zigTypeTag(zcu)) {
.Vector => return self.fail("TODO byteswap for vectors", .{}), .Vector => return self.fail("TODO byteswap for vectors", .{}),
.Int => { .Int => {
const int_info = operand_ty.intInfo(mod); const int_info = operand_ty.intInfo(zcu);
if (int_info.bits == 8) break :result operand; if (int_info.bits == 8) break :result operand;
const abi_size = int_info.bits >> 3; const abi_size = int_info.bits >> 3;
const abi_align = operand_ty.abiAlignment(pt); const abi_align = operand_ty.abiAlignment(zcu);
const opposite_endian_asi = switch (self.target.cpu.arch.endian()) { const opposite_endian_asi = switch (self.target.cpu.arch.endian()) {
Endian.big => ASI.asi_primary_little, Endian.big => ASI.asi_primary_little,
Endian.little => ASI.asi_primary, Endian.little => ASI.asi_primary,
@ -1409,24 +1410,24 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const pt = self.pt; const pt = self.pt;
const mod = pt.zcu; const zcu = pt.zcu;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const lhs = try self.resolveInst(bin_op.lhs); const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs); const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.typeOf(bin_op.lhs); const lhs_ty = self.typeOf(bin_op.lhs);
const int_ty = switch (lhs_ty.zigTypeTag(mod)) { const int_ty = switch (lhs_ty.zigTypeTag(zcu)) {
.Vector => unreachable, // Handled by cmp_vector. .Vector => unreachable, // Handled by cmp_vector.
.Enum => lhs_ty.intTagType(mod), .Enum => lhs_ty.intTagType(zcu),
.Int => lhs_ty, .Int => lhs_ty,
.Bool => Type.u1, .Bool => Type.u1,
.Pointer => Type.usize, .Pointer => Type.usize,
.ErrorSet => Type.u16, .ErrorSet => Type.u16,
.Optional => blk: { .Optional => blk: {
const payload_ty = lhs_ty.optionalChild(mod); const payload_ty = lhs_ty.optionalChild(zcu);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
break :blk Type.u1; break :blk Type.u1;
} else if (lhs_ty.isPtrLikeOptional(mod)) { } else if (lhs_ty.isPtrLikeOptional(zcu)) {
break :blk Type.usize; break :blk Type.usize;
} else { } else {
return self.fail("TODO SPARCv9 cmp non-pointer optionals", .{}); return self.fail("TODO SPARCv9 cmp non-pointer optionals", .{});
@ -1436,7 +1437,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
else => unreachable, else => unreachable,
}; };
const int_info = int_ty.intInfo(mod); const int_info = int_ty.intInfo(zcu);
if (int_info.bits <= 64) { if (int_info.bits <= 64) {
_ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, BinOpMetadata{ _ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, BinOpMetadata{
.lhs = bin_op.lhs, .lhs = bin_op.lhs,
@ -1797,16 +1798,16 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
fn airLoad(self: *Self, inst: Air.Inst.Index) !void { fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt; const pt = self.pt;
const mod = pt.zcu; const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const elem_ty = self.typeOfIndex(inst); const elem_ty = self.typeOfIndex(inst);
const elem_size = elem_ty.abiSize(pt); const elem_size = elem_ty.abiSize(zcu);
const result: MCValue = result: { const result: MCValue = result: {
if (!elem_ty.hasRuntimeBits(pt)) if (!elem_ty.hasRuntimeBits(zcu))
break :result MCValue.none; break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand);
const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(zcu);
if (self.liveness.isUnused(inst) and !is_volatile) if (self.liveness.isUnused(inst) and !is_volatile)
break :result MCValue.dead; break :result MCValue.dead;
@ -2428,7 +2429,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt; const pt = self.pt;
const mod = pt.zcu; const zcu = pt.zcu;
const is_volatile = false; // TODO const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@ -2438,10 +2439,10 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const index_mcv = try self.resolveInst(bin_op.rhs); const index_mcv = try self.resolveInst(bin_op.rhs);
const slice_ty = self.typeOf(bin_op.lhs); const slice_ty = self.typeOf(bin_op.lhs);
const elem_ty = slice_ty.childType(mod); const elem_ty = slice_ty.childType(zcu);
const elem_size = elem_ty.abiSize(pt); const elem_size = elem_ty.abiSize(zcu);
const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); const slice_ptr_field_type = slice_ty.slicePtrFieldType(zcu);
const index_lock: ?RegisterLock = if (index_mcv == .register) const index_lock: ?RegisterLock = if (index_mcv == .register)
self.register_manager.lockRegAssumeUnused(index_mcv.register) self.register_manager.lockRegAssumeUnused(index_mcv.register)
@ -2553,10 +2554,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const operand = extra.struct_operand; const operand = extra.struct_operand;
const index = extra.field_index; const index = extra.field_index;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const pt = self.pt; const zcu = self.pt.zcu;
const mcv = try self.resolveInst(operand); const mcv = try self.resolveInst(operand);
const struct_ty = self.typeOf(operand); const struct_ty = self.typeOf(operand);
const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt))); const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, zcu)));
switch (mcv) { switch (mcv) {
.dead, .unreach => unreachable, .dead, .unreach => unreachable,
@ -2687,13 +2688,13 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt; const pt = self.pt;
const mod = pt.zcu; const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = self.typeOf(ty_op.operand); const error_union_ty = self.typeOf(ty_op.operand);
const payload_ty = error_union_ty.errorUnionPayload(mod); const payload_ty = error_union_ty.errorUnionPayload(zcu);
const mcv = try self.resolveInst(ty_op.operand); const mcv = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasRuntimeBits(pt)) break :result mcv; if (!payload_ty.hasRuntimeBits(zcu)) break :result mcv;
return self.fail("TODO implement unwrap error union error for non-empty payloads", .{}); return self.fail("TODO implement unwrap error union error for non-empty payloads", .{});
}; };
@ -2702,12 +2703,12 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt; const pt = self.pt;
const mod = pt.zcu; const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = self.typeOf(ty_op.operand); const error_union_ty = self.typeOf(ty_op.operand);
const payload_ty = error_union_ty.errorUnionPayload(mod); const payload_ty = error_union_ty.errorUnionPayload(zcu);
if (!payload_ty.hasRuntimeBits(pt)) break :result MCValue.none; if (!payload_ty.hasRuntimeBits(zcu)) break :result MCValue.none;
return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{}); return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{});
}; };
@ -2717,13 +2718,13 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
/// E to E!T /// E to E!T
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt; const pt = self.pt;
const mod = pt.zcu; const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = ty_op.ty.toType(); const error_union_ty = ty_op.ty.toType();
const payload_ty = error_union_ty.errorUnionPayload(mod); const payload_ty = error_union_ty.errorUnionPayload(zcu);
const mcv = try self.resolveInst(ty_op.operand); const mcv = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasRuntimeBits(pt)) break :result mcv; if (!payload_ty.hasRuntimeBits(zcu)) break :result mcv;
return self.fail("TODO implement wrap errunion error for non-empty payloads", .{}); return self.fail("TODO implement wrap errunion error for non-empty payloads", .{});
}; };
@ -2744,7 +2745,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
const optional_ty = self.typeOfIndex(inst); const optional_ty = self.typeOfIndex(inst);
// Optional with a zero-bit payload type is just a boolean true // Optional with a zero-bit payload type is just a boolean true
if (optional_ty.abiSize(pt) == 1) if (optional_ty.abiSize(pt.zcu) == 1)
break :result MCValue{ .immediate = 1 }; break :result MCValue{ .immediate = 1 };
return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}); return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch});
@ -2779,10 +2780,10 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: Alignme
/// Use a pointer instruction as the basis for allocating stack memory. /// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
const pt = self.pt; const pt = self.pt;
const mod = pt.zcu; const zcu = pt.zcu;
const elem_ty = self.typeOfIndex(inst).childType(mod); const elem_ty = self.typeOfIndex(inst).childType(zcu);
if (!elem_ty.hasRuntimeBits(pt)) { if (!elem_ty.hasRuntimeBits(zcu)) {
// As this stack item will never be dereferenced at runtime, // As this stack item will never be dereferenced at runtime,
// return the stack offset 0. Stack offset 0 will be where all // return the stack offset 0. Stack offset 0 will be where all
// zero-sized stack allocations live as non-zero-sized // zero-sized stack allocations live as non-zero-sized
@ -2790,21 +2791,22 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
return @as(u32, 0); return @as(u32, 0);
} }
const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse { const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)}); return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
}; };
// TODO swap this for inst.ty.ptrAlign // TODO swap this for inst.ty.ptrAlign
const abi_align = elem_ty.abiAlignment(pt); const abi_align = elem_ty.abiAlignment(zcu);
return self.allocMem(inst, abi_size, abi_align); return self.allocMem(inst, abi_size, abi_align);
} }
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const pt = self.pt; const pt = self.pt;
const zcu = pt.zcu;
const elem_ty = self.typeOfIndex(inst); const elem_ty = self.typeOfIndex(inst);
const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse { const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)}); return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
}; };
const abi_align = elem_ty.abiAlignment(pt); const abi_align = elem_ty.abiAlignment(zcu);
self.stack_align = self.stack_align.max(abi_align); self.stack_align = self.stack_align.max(abi_align);
if (reg_ok) { if (reg_ok) {
@ -2847,7 +2849,7 @@ fn binOp(
metadata: ?BinOpMetadata, metadata: ?BinOpMetadata,
) InnerError!MCValue { ) InnerError!MCValue {
const pt = self.pt; const pt = self.pt;
const mod = pt.zcu; const zcu = pt.zcu;
switch (tag) { switch (tag) {
.add, .add,
.sub, .sub,
@ -2857,12 +2859,12 @@ fn binOp(
.xor, .xor,
.cmp_eq, .cmp_eq,
=> { => {
switch (lhs_ty.zigTypeTag(mod)) { switch (lhs_ty.zigTypeTag(zcu)) {
.Float => return self.fail("TODO binary operations on floats", .{}), .Float => return self.fail("TODO binary operations on floats", .{}),
.Vector => return self.fail("TODO binary operations on vectors", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => { .Int => {
assert(lhs_ty.eql(rhs_ty, mod)); assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(mod); const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) { if (int_info.bits <= 64) {
// Only say yes if the operation is // Only say yes if the operation is
// commutative, i.e. we can swap both of the // commutative, i.e. we can swap both of the
@ -2931,10 +2933,10 @@ fn binOp(
const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
// Truncate if necessary // Truncate if necessary
switch (lhs_ty.zigTypeTag(mod)) { switch (lhs_ty.zigTypeTag(zcu)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => { .Int => {
const int_info = lhs_ty.intInfo(mod); const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) { if (int_info.bits <= 64) {
const result_reg = result.register; const result_reg = result.register;
try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits); try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits);
@ -2948,11 +2950,11 @@ fn binOp(
}, },
.div_trunc => { .div_trunc => {
switch (lhs_ty.zigTypeTag(mod)) { switch (lhs_ty.zigTypeTag(zcu)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => { .Int => {
assert(lhs_ty.eql(rhs_ty, mod)); assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(mod); const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) { if (int_info.bits <= 64) {
const rhs_immediate_ok = switch (tag) { const rhs_immediate_ok = switch (tag) {
.div_trunc => rhs == .immediate and rhs.immediate <= std.math.maxInt(u12), .div_trunc => rhs == .immediate and rhs.immediate <= std.math.maxInt(u12),
@ -2981,14 +2983,14 @@ fn binOp(
}, },
.ptr_add => { .ptr_add => {
switch (lhs_ty.zigTypeTag(mod)) { switch (lhs_ty.zigTypeTag(zcu)) {
.Pointer => { .Pointer => {
const ptr_ty = lhs_ty; const ptr_ty = lhs_ty;
const elem_ty = switch (ptr_ty.ptrSize(mod)) { const elem_ty = switch (ptr_ty.ptrSize(zcu)) {
.One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type .One => ptr_ty.childType(zcu).childType(zcu), // ptr to array, so get array element type
else => ptr_ty.childType(mod), else => ptr_ty.childType(zcu),
}; };
const elem_size = elem_ty.abiSize(pt); const elem_size = elem_ty.abiSize(zcu);
if (elem_size == 1) { if (elem_size == 1) {
const base_tag: Mir.Inst.Tag = switch (tag) { const base_tag: Mir.Inst.Tag = switch (tag) {
@ -3013,7 +3015,7 @@ fn binOp(
.bool_and, .bool_and,
.bool_or, .bool_or,
=> { => {
switch (lhs_ty.zigTypeTag(mod)) { switch (lhs_ty.zigTypeTag(zcu)) {
.Bool => { .Bool => {
assert(lhs != .immediate); // should have been handled by Sema assert(lhs != .immediate); // should have been handled by Sema
assert(rhs != .immediate); // should have been handled by Sema assert(rhs != .immediate); // should have been handled by Sema
@ -3043,10 +3045,10 @@ fn binOp(
const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
// Truncate if necessary // Truncate if necessary
switch (lhs_ty.zigTypeTag(mod)) { switch (lhs_ty.zigTypeTag(zcu)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => { .Int => {
const int_info = lhs_ty.intInfo(mod); const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) { if (int_info.bits <= 64) {
// 32 and 64 bit operands doesn't need truncating // 32 and 64 bit operands doesn't need truncating
if (int_info.bits == 32 or int_info.bits == 64) return result; if (int_info.bits == 32 or int_info.bits == 64) return result;
@ -3065,10 +3067,10 @@ fn binOp(
.shl_exact, .shl_exact,
.shr_exact, .shr_exact,
=> { => {
switch (lhs_ty.zigTypeTag(mod)) { switch (lhs_ty.zigTypeTag(zcu)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => { .Int => {
const int_info = lhs_ty.intInfo(mod); const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) { if (int_info.bits <= 64) {
const rhs_immediate_ok = rhs == .immediate; const rhs_immediate_ok = rhs == .immediate;
@ -3388,8 +3390,8 @@ fn binOpRegister(
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const block_data = self.blocks.getPtr(block).?; const block_data = self.blocks.getPtr(block).?;
const pt = self.pt; const zcu = self.pt.zcu;
if (self.typeOf(operand).hasRuntimeBits(pt)) { if (self.typeOf(operand).hasRuntimeBits(zcu)) {
const operand_mcv = try self.resolveInst(operand); const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv; const block_mcv = block_data.mcv;
if (block_mcv == .none) { if (block_mcv == .none) {
@ -3509,17 +3511,17 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
/// Given an error union, returns the payload /// Given an error union, returns the payload
fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue { fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue {
const pt = self.pt; const pt = self.pt;
const mod = pt.zcu; const zcu = pt.zcu;
const err_ty = error_union_ty.errorUnionSet(mod); const err_ty = error_union_ty.errorUnionSet(zcu);
const payload_ty = error_union_ty.errorUnionPayload(mod); const payload_ty = error_union_ty.errorUnionPayload(zcu);
if (err_ty.errorSetIsEmpty(mod)) { if (err_ty.errorSetIsEmpty(zcu)) {
return error_union_mcv; return error_union_mcv;
} }
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return MCValue.none; return MCValue.none;
} }
const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt))); const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu)));
switch (error_union_mcv) { switch (error_union_mcv) {
.register => return self.fail("TODO errUnionPayload for registers", .{}), .register => return self.fail("TODO errUnionPayload for registers", .{}),
.stack_offset => |off| { .stack_offset => |off| {
@ -3731,6 +3733,7 @@ fn genLoadASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Reg
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
const pt = self.pt; const pt = self.pt;
const zcu = pt.zcu;
switch (mcv) { switch (mcv) {
.dead => unreachable, .dead => unreachable,
.unreach, .none => return, // Nothing to do. .unreach, .none => return, // Nothing to do.
@ -3929,21 +3932,21 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
// The value is in memory at a hard-coded address. // The value is in memory at a hard-coded address.
// If the type is a pointer, it means the pointer address is at this memory location. // If the type is a pointer, it means the pointer address is at this memory location.
try self.genSetReg(ty, reg, .{ .immediate = addr }); try self.genSetReg(ty, reg, .{ .immediate = addr });
try self.genLoad(reg, reg, i13, 0, ty.abiSize(pt)); try self.genLoad(reg, reg, i13, 0, ty.abiSize(zcu));
}, },
.stack_offset => |off| { .stack_offset => |off| {
const real_offset = realStackOffset(off); const real_offset = realStackOffset(off);
const simm13 = math.cast(i13, real_offset) orelse const simm13 = math.cast(i13, real_offset) orelse
return self.fail("TODO larger stack offsets: {}", .{real_offset}); return self.fail("TODO larger stack offsets: {}", .{real_offset});
try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(pt)); try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(zcu));
}, },
} }
} }
fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
const pt = self.pt; const pt = self.pt;
const mod = pt.zcu; const zcu = pt.zcu;
const abi_size = ty.abiSize(pt); const abi_size = ty.abiSize(zcu);
switch (mcv) { switch (mcv) {
.dead => unreachable, .dead => unreachable,
.unreach, .none => return, // Nothing to do. .unreach, .none => return, // Nothing to do.
@ -3951,7 +3954,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
if (!self.wantSafety()) if (!self.wantSafety())
return; // The already existing value will do just fine. return; // The already existing value will do just fine.
// TODO Upgrade this to a memset call when we have that available. // TODO Upgrade this to a memset call when we have that available.
switch (ty.abiSize(pt)) { switch (ty.abiSize(zcu)) {
1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
@ -3977,11 +3980,11 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
const reg_lock = self.register_manager.lockReg(rwo.reg); const reg_lock = self.register_manager.lockReg(rwo.reg);
defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
const wrapped_ty = ty.structFieldType(0, mod); const wrapped_ty = ty.structFieldType(0, zcu);
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
const overflow_bit_ty = ty.structFieldType(1, mod); const overflow_bit_ty = ty.structFieldType(1, zcu);
const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, pt))); const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, zcu)));
const cond_reg = try self.register_manager.allocReg(null, gp); const cond_reg = try self.register_manager.allocReg(null, gp);
// TODO handle floating point CCRs // TODO handle floating point CCRs
@ -4154,14 +4157,14 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
const pt = self.pt; const pt = self.pt;
const mod = pt.zcu; const zcu = pt.zcu;
const error_type = ty.errorUnionSet(mod); const error_type = ty.errorUnionSet(zcu);
const payload_type = ty.errorUnionPayload(mod); const payload_type = ty.errorUnionPayload(zcu);
if (!error_type.hasRuntimeBits(pt)) { if (!error_type.hasRuntimeBits(zcu)) {
return MCValue{ .immediate = 0 }; // always false return MCValue{ .immediate = 0 }; // always false
} else if (!payload_type.hasRuntimeBits(pt)) { } else if (!payload_type.hasRuntimeBits(zcu)) {
if (error_type.abiSize(pt) <= 8) { if (error_type.abiSize(zcu) <= 8) {
const reg_mcv: MCValue = switch (operand) { const reg_mcv: MCValue = switch (operand) {
.register => operand, .register => operand,
else => .{ .register = try self.copyToTmpRegister(error_type, operand) }, else => .{ .register = try self.copyToTmpRegister(error_type, operand) },
@ -4253,9 +4256,9 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void {
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
const pt = self.pt; const pt = self.pt;
const mod = pt.zcu; const zcu = pt.zcu;
const elem_ty = ptr_ty.childType(mod); const elem_ty = ptr_ty.childType(zcu);
const elem_size = elem_ty.abiSize(pt); const elem_size = elem_ty.abiSize(zcu);
switch (ptr) { switch (ptr) {
.none => unreachable, .none => unreachable,
@ -4446,9 +4449,9 @@ fn realStackOffset(off: u32) u32 {
/// Caller must call `CallMCValues.deinit`. /// Caller must call `CallMCValues.deinit`.
fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues { fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues {
const pt = self.pt; const pt = self.pt;
const mod = pt.zcu; const zcu = pt.zcu;
const ip = &mod.intern_pool; const ip = &zcu.intern_pool;
const fn_info = mod.typeToFunc(fn_ty).?; const fn_info = zcu.typeToFunc(fn_ty).?;
const cc = fn_info.cc; const cc = fn_info.cc;
var result: CallMCValues = .{ var result: CallMCValues = .{
.args = try self.gpa.alloc(MCValue, fn_info.param_types.len), .args = try self.gpa.alloc(MCValue, fn_info.param_types.len),
@ -4459,7 +4462,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
}; };
errdefer self.gpa.free(result.args); errdefer self.gpa.free(result.args);
const ret_ty = fn_ty.fnReturnType(mod); const ret_ty = fn_ty.fnReturnType(zcu);
switch (cc) { switch (cc) {
.Naked => { .Naked => {
@ -4487,7 +4490,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
}; };
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| { for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(pt))); const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(zcu)));
if (param_size <= 8) { if (param_size <= 8) {
if (next_register < argument_registers.len) { if (next_register < argument_registers.len) {
result_arg.* = .{ .register = argument_registers[next_register] }; result_arg.* = .{ .register = argument_registers[next_register] };
@ -4514,12 +4517,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
result.stack_byte_count = next_stack_offset; result.stack_byte_count = next_stack_offset;
result.stack_align = .@"16"; result.stack_align = .@"16";
if (ret_ty.zigTypeTag(mod) == .NoReturn) { if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
result.return_value = .{ .unreach = {} }; result.return_value = .{ .unreach = {} };
} else if (!ret_ty.hasRuntimeBits(pt)) { } else if (!ret_ty.hasRuntimeBits(zcu)) {
result.return_value = .{ .none = {} }; result.return_value = .{ .none = {} };
} else { } else {
const ret_ty_size: u32 = @intCast(ret_ty.abiSize(pt)); const ret_ty_size: u32 = @intCast(ret_ty.abiSize(zcu));
// The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller. // The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller.
if (ret_ty_size <= 8) { if (ret_ty_size <= 8) {
result.return_value = switch (role) { result.return_value = switch (role) {
@ -4542,7 +4545,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
const ty = self.typeOf(ref); const ty = self.typeOf(ref);
// If the type has no codegen bits, no need to store it. // If the type has no codegen bits, no need to store it.
if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return .none; if (!ty.hasRuntimeBitsIgnoreComptime(pt.zcu)) return .none;
if (ref.toIndex()) |inst| { if (ref.toIndex()) |inst| {
return self.getResolvedInstValue(inst); return self.getResolvedInstValue(inst);
@ -4656,7 +4659,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
const pt = self.pt; const pt = self.pt;
const abi_size = value_ty.abiSize(pt); const abi_size = value_ty.abiSize(pt.zcu);
switch (ptr) { switch (ptr) {
.none => unreachable, .none => unreachable,
@ -4698,11 +4701,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
return if (self.liveness.isUnused(inst)) .dead else result: { return if (self.liveness.isUnused(inst)) .dead else result: {
const pt = self.pt; const pt = self.pt;
const mod = pt.zcu; const zcu = pt.zcu;
const mcv = try self.resolveInst(operand); const mcv = try self.resolveInst(operand);
const ptr_ty = self.typeOf(operand); const ptr_ty = self.typeOf(operand);
const struct_ty = ptr_ty.childType(mod); const struct_ty = ptr_ty.childType(zcu);
const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt))); const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, zcu)));
switch (mcv) { switch (mcv) {
.ptr_stack_offset => |off| { .ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };

File diff suppressed because it is too large Load Diff

View File

@ -22,16 +22,15 @@ const direct: [2]Class = .{ .direct, .none };
/// Classifies a given Zig type to determine how they must be passed /// Classifies a given Zig type to determine how they must be passed
/// or returned as value within a wasm function. /// or returned as value within a wasm function.
/// When all elements result in `.none`, no value must be passed in or returned. /// When all elements result in `.none`, no value must be passed in or returned.
pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class { pub fn classifyType(ty: Type, zcu: *Zcu) [2]Class {
const mod = pt.zcu; const ip = &zcu.intern_pool;
const ip = &mod.intern_pool; const target = zcu.getTarget();
const target = mod.getTarget(); if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return none;
if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return none; switch (ty.zigTypeTag(zcu)) {
switch (ty.zigTypeTag(mod)) {
.Struct => { .Struct => {
const struct_type = pt.zcu.typeToStruct(ty).?; const struct_type = zcu.typeToStruct(ty).?;
if (struct_type.layout == .@"packed") { if (struct_type.layout == .@"packed") {
if (ty.bitSize(pt) <= 64) return direct; if (ty.bitSize(zcu) <= 64) return direct;
return .{ .direct, .direct }; return .{ .direct, .direct };
} }
if (struct_type.field_types.len > 1) { if (struct_type.field_types.len > 1) {
@ -41,13 +40,13 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[0]); const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[0]);
const explicit_align = struct_type.fieldAlign(ip, 0); const explicit_align = struct_type.fieldAlign(ip, 0);
if (explicit_align != .none) { if (explicit_align != .none) {
if (explicit_align.compareStrict(.gt, field_ty.abiAlignment(pt))) if (explicit_align.compareStrict(.gt, field_ty.abiAlignment(zcu)))
return memory; return memory;
} }
return classifyType(field_ty, pt); return classifyType(field_ty, zcu);
}, },
.Int, .Enum, .ErrorSet => { .Int, .Enum, .ErrorSet => {
const int_bits = ty.intInfo(pt.zcu).bits; const int_bits = ty.intInfo(zcu).bits;
if (int_bits <= 64) return direct; if (int_bits <= 64) return direct;
if (int_bits <= 128) return .{ .direct, .direct }; if (int_bits <= 128) return .{ .direct, .direct };
return memory; return memory;
@ -62,24 +61,24 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class {
.Vector => return direct, .Vector => return direct,
.Array => return memory, .Array => return memory,
.Optional => { .Optional => {
assert(ty.isPtrLikeOptional(pt.zcu)); assert(ty.isPtrLikeOptional(zcu));
return direct; return direct;
}, },
.Pointer => { .Pointer => {
assert(!ty.isSlice(pt.zcu)); assert(!ty.isSlice(zcu));
return direct; return direct;
}, },
.Union => { .Union => {
const union_obj = pt.zcu.typeToUnion(ty).?; const union_obj = zcu.typeToUnion(ty).?;
if (union_obj.flagsUnordered(ip).layout == .@"packed") { if (union_obj.flagsUnordered(ip).layout == .@"packed") {
if (ty.bitSize(pt) <= 64) return direct; if (ty.bitSize(zcu) <= 64) return direct;
return .{ .direct, .direct }; return .{ .direct, .direct };
} }
const layout = ty.unionGetLayout(pt); const layout = ty.unionGetLayout(zcu);
assert(layout.tag_size == 0); assert(layout.tag_size == 0);
if (union_obj.field_types.len > 1) return memory; if (union_obj.field_types.len > 1) return memory;
const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]); const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
return classifyType(first_field_ty, pt); return classifyType(first_field_ty, zcu);
}, },
.ErrorUnion, .ErrorUnion,
.Frame, .Frame,
@ -101,29 +100,28 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class {
/// Returns the scalar type a given type can represent. /// Returns the scalar type a given type can represent.
/// Asserts given type can be represented as scalar, such as /// Asserts given type can be represented as scalar, such as
/// a struct with a single scalar field. /// a struct with a single scalar field.
pub fn scalarType(ty: Type, pt: Zcu.PerThread) Type { pub fn scalarType(ty: Type, zcu: *Zcu) Type {
const mod = pt.zcu; const ip = &zcu.intern_pool;
const ip = &mod.intern_pool; switch (ty.zigTypeTag(zcu)) {
switch (ty.zigTypeTag(mod)) {
.Struct => { .Struct => {
if (mod.typeToPackedStruct(ty)) |packed_struct| { if (zcu.typeToPackedStruct(ty)) |packed_struct| {
return scalarType(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt); return scalarType(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), zcu);
} else { } else {
assert(ty.structFieldCount(mod) == 1); assert(ty.structFieldCount(zcu) == 1);
return scalarType(ty.structFieldType(0, mod), pt); return scalarType(ty.structFieldType(0, zcu), zcu);
} }
}, },
.Union => { .Union => {
const union_obj = mod.typeToUnion(ty).?; const union_obj = zcu.typeToUnion(ty).?;
if (union_obj.flagsUnordered(ip).layout != .@"packed") { if (union_obj.flagsUnordered(ip).layout != .@"packed") {
const layout = pt.getUnionLayout(union_obj); const layout = Type.getUnionLayout(union_obj, zcu);
if (layout.payload_size == 0 and layout.tag_size != 0) { if (layout.payload_size == 0 and layout.tag_size != 0) {
return scalarType(ty.unionTagTypeSafety(mod).?, pt); return scalarType(ty.unionTagTypeSafety(zcu).?, zcu);
} }
assert(union_obj.field_types.len == 1); assert(union_obj.field_types.len == 1);
} }
const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]); const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
return scalarType(first_field_ty, pt); return scalarType(first_field_ty, zcu);
}, },
else => return ty, else => return ty,
} }

File diff suppressed because it is too large Load Diff

View File

@ -44,7 +44,7 @@ pub const Class = enum {
} }
}; };
pub fn classifyWindows(ty: Type, pt: Zcu.PerThread) Class { pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
// https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017 // https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017
// "There's a strict one-to-one correspondence between a function call's arguments // "There's a strict one-to-one correspondence between a function call's arguments
// and the registers used for those arguments. Any argument that doesn't fit in 8 // and the registers used for those arguments. Any argument that doesn't fit in 8
@ -53,7 +53,7 @@ pub fn classifyWindows(ty: Type, pt: Zcu.PerThread) Class {
// "All floating point operations are done using the 16 XMM registers." // "All floating point operations are done using the 16 XMM registers."
// "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed // "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed
// as if they were integers of the same size." // as if they were integers of the same size."
switch (ty.zigTypeTag(pt.zcu)) { switch (ty.zigTypeTag(zcu)) {
.Pointer, .Pointer,
.Int, .Int,
.Bool, .Bool,
@ -68,12 +68,12 @@ pub fn classifyWindows(ty: Type, pt: Zcu.PerThread) Class {
.ErrorUnion, .ErrorUnion,
.AnyFrame, .AnyFrame,
.Frame, .Frame,
=> switch (ty.abiSize(pt)) { => switch (ty.abiSize(zcu)) {
0 => unreachable, 0 => unreachable,
1, 2, 4, 8 => return .integer, 1, 2, 4, 8 => return .integer,
else => switch (ty.zigTypeTag(pt.zcu)) { else => switch (ty.zigTypeTag(zcu)) {
.Int => return .win_i128, .Int => return .win_i128,
.Struct, .Union => if (ty.containerLayout(pt.zcu) == .@"packed") { .Struct, .Union => if (ty.containerLayout(zcu) == .@"packed") {
return .win_i128; return .win_i128;
} else { } else {
return .memory; return .memory;
@ -100,14 +100,14 @@ pub const Context = enum { ret, arg, field, other };
/// There are a maximum of 8 possible return slots. Returned values are in /// There are a maximum of 8 possible return slots. Returned values are in
/// the beginning of the array; unused slots are filled with .none. /// the beginning of the array; unused slots are filled with .none.
pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Context) [8]Class { pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8]Class {
const memory_class = [_]Class{ const memory_class = [_]Class{
.memory, .none, .none, .none, .memory, .none, .none, .none,
.none, .none, .none, .none, .none, .none, .none, .none,
}; };
var result = [1]Class{.none} ** 8; var result = [1]Class{.none} ** 8;
switch (ty.zigTypeTag(pt.zcu)) { switch (ty.zigTypeTag(zcu)) {
.Pointer => switch (ty.ptrSize(pt.zcu)) { .Pointer => switch (ty.ptrSize(zcu)) {
.Slice => { .Slice => {
result[0] = .integer; result[0] = .integer;
result[1] = .integer; result[1] = .integer;
@ -119,7 +119,7 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
}, },
}, },
.Int, .Enum, .ErrorSet => { .Int, .Enum, .ErrorSet => {
const bits = ty.intInfo(pt.zcu).bits; const bits = ty.intInfo(zcu).bits;
if (bits <= 64) { if (bits <= 64) {
result[0] = .integer; result[0] = .integer;
return result; return result;
@ -185,8 +185,8 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
else => unreachable, else => unreachable,
}, },
.Vector => { .Vector => {
const elem_ty = ty.childType(pt.zcu); const elem_ty = ty.childType(zcu);
const bits = elem_ty.bitSize(pt) * ty.arrayLen(pt.zcu); const bits = elem_ty.bitSize(zcu) * ty.arrayLen(zcu);
if (elem_ty.toIntern() == .bool_type) { if (elem_ty.toIntern() == .bool_type) {
if (bits <= 32) return .{ if (bits <= 32) return .{
.integer, .none, .none, .none, .integer, .none, .none, .none,
@ -250,7 +250,7 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
return memory_class; return memory_class;
}, },
.Optional => { .Optional => {
if (ty.isPtrLikeOptional(pt.zcu)) { if (ty.isPtrLikeOptional(zcu)) {
result[0] = .integer; result[0] = .integer;
return result; return result;
} }
@ -261,8 +261,8 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
// it contains unaligned fields, it has class MEMORY" // it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified // "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.". // separately.".
const ty_size = ty.abiSize(pt); const ty_size = ty.abiSize(zcu);
switch (ty.containerLayout(pt.zcu)) { switch (ty.containerLayout(zcu)) {
.auto, .@"extern" => {}, .auto, .@"extern" => {},
.@"packed" => { .@"packed" => {
assert(ty_size <= 16); assert(ty_size <= 16);
@ -274,10 +274,10 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
if (ty_size > 64) if (ty_size > 64)
return memory_class; return memory_class;
_ = if (pt.zcu.typeToStruct(ty)) |loaded_struct| _ = if (zcu.typeToStruct(ty)) |loaded_struct|
classifySystemVStruct(&result, 0, loaded_struct, pt, target) classifySystemVStruct(&result, 0, loaded_struct, zcu, target)
else if (pt.zcu.typeToUnion(ty)) |loaded_union| else if (zcu.typeToUnion(ty)) |loaded_union|
classifySystemVUnion(&result, 0, loaded_union, pt, target) classifySystemVUnion(&result, 0, loaded_union, zcu, target)
else else
unreachable; unreachable;
@ -306,7 +306,7 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
return result; return result;
}, },
.Array => { .Array => {
const ty_size = ty.abiSize(pt); const ty_size = ty.abiSize(zcu);
if (ty_size <= 8) { if (ty_size <= 8) {
result[0] = .integer; result[0] = .integer;
return result; return result;
@ -326,10 +326,10 @@ fn classifySystemVStruct(
result: *[8]Class, result: *[8]Class,
starting_byte_offset: u64, starting_byte_offset: u64,
loaded_struct: InternPool.LoadedStructType, loaded_struct: InternPool.LoadedStructType,
pt: Zcu.PerThread, zcu: *Zcu,
target: std.Target, target: std.Target,
) u64 { ) u64 {
const ip = &pt.zcu.intern_pool; const ip = &zcu.intern_pool;
var byte_offset = starting_byte_offset; var byte_offset = starting_byte_offset;
var field_it = loaded_struct.iterateRuntimeOrder(ip); var field_it = loaded_struct.iterateRuntimeOrder(ip);
while (field_it.next()) |field_index| { while (field_it.next()) |field_index| {
@ -338,29 +338,29 @@ fn classifySystemVStruct(
byte_offset = std.mem.alignForward( byte_offset = std.mem.alignForward(
u64, u64,
byte_offset, byte_offset,
field_align.toByteUnits() orelse field_ty.abiAlignment(pt).toByteUnits().?, field_align.toByteUnits() orelse field_ty.abiAlignment(zcu).toByteUnits().?,
); );
if (pt.zcu.typeToStruct(field_ty)) |field_loaded_struct| { if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
switch (field_loaded_struct.layout) { switch (field_loaded_struct.layout) {
.auto, .@"extern" => { .auto, .@"extern" => {
byte_offset = classifySystemVStruct(result, byte_offset, field_loaded_struct, pt, target); byte_offset = classifySystemVStruct(result, byte_offset, field_loaded_struct, zcu, target);
continue; continue;
}, },
.@"packed" => {}, .@"packed" => {},
} }
} else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| { } else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
switch (field_loaded_union.flagsUnordered(ip).layout) { switch (field_loaded_union.flagsUnordered(ip).layout) {
.auto, .@"extern" => { .auto, .@"extern" => {
byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, pt, target); byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, zcu, target);
continue; continue;
}, },
.@"packed" => {}, .@"packed" => {},
} }
} }
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, pt, target, .field), .none); const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
for (result[@intCast(byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class| for (result[@intCast(byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
result_class.* = result_class.combineSystemV(field_class); result_class.* = result_class.combineSystemV(field_class);
byte_offset += field_ty.abiSize(pt); byte_offset += field_ty.abiSize(zcu);
} }
const final_byte_offset = starting_byte_offset + loaded_struct.sizeUnordered(ip); const final_byte_offset = starting_byte_offset + loaded_struct.sizeUnordered(ip);
std.debug.assert(final_byte_offset == std.mem.alignForward( std.debug.assert(final_byte_offset == std.mem.alignForward(
@ -375,30 +375,30 @@ fn classifySystemVUnion(
result: *[8]Class, result: *[8]Class,
starting_byte_offset: u64, starting_byte_offset: u64,
loaded_union: InternPool.LoadedUnionType, loaded_union: InternPool.LoadedUnionType,
pt: Zcu.PerThread, zcu: *Zcu,
target: std.Target, target: std.Target,
) u64 { ) u64 {
const ip = &pt.zcu.intern_pool; const ip = &zcu.intern_pool;
for (0..loaded_union.field_types.len) |field_index| { for (0..loaded_union.field_types.len) |field_index| {
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
if (pt.zcu.typeToStruct(field_ty)) |field_loaded_struct| { if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
switch (field_loaded_struct.layout) { switch (field_loaded_struct.layout) {
.auto, .@"extern" => { .auto, .@"extern" => {
_ = classifySystemVStruct(result, starting_byte_offset, field_loaded_struct, pt, target); _ = classifySystemVStruct(result, starting_byte_offset, field_loaded_struct, zcu, target);
continue; continue;
}, },
.@"packed" => {}, .@"packed" => {},
} }
} else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| { } else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
switch (field_loaded_union.flagsUnordered(ip).layout) { switch (field_loaded_union.flagsUnordered(ip).layout) {
.auto, .@"extern" => { .auto, .@"extern" => {
_ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, pt, target); _ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, zcu, target);
continue; continue;
}, },
.@"packed" => {}, .@"packed" => {},
} }
} }
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, pt, target, .field), .none); const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
for (result[@intCast(starting_byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class| for (result[@intCast(starting_byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
result_class.* = result_class.combineSystemV(field_class); result_class.* = result_class.combineSystemV(field_class);
} }

View File

@ -198,17 +198,17 @@ pub fn generateSymbol(
const tracy = trace(@src()); const tracy = trace(@src());
defer tracy.end(); defer tracy.end();
const mod = pt.zcu; const zcu = pt.zcu;
const ip = &mod.intern_pool; const ip = &zcu.intern_pool;
const ty = val.typeOf(mod); const ty = val.typeOf(zcu);
const target = mod.getTarget(); const target = zcu.getTarget();
const endian = target.cpu.arch.endian(); const endian = target.cpu.arch.endian();
log.debug("generateSymbol: val = {}", .{val.fmtValue(pt)}); log.debug("generateSymbol: val = {}", .{val.fmtValue(pt)});
if (val.isUndefDeep(mod)) { if (val.isUndefDeep(zcu)) {
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow; const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
try code.appendNTimes(0xaa, abi_size); try code.appendNTimes(0xaa, abi_size);
return .ok; return .ok;
} }
@ -254,9 +254,9 @@ pub fn generateSymbol(
.empty_enum_value, .empty_enum_value,
=> unreachable, // non-runtime values => unreachable, // non-runtime values
.int => { .int => {
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow; const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
var space: Value.BigIntSpace = undefined; var space: Value.BigIntSpace = undefined;
const int_val = val.toBigInt(&space, pt); const int_val = val.toBigInt(&space, zcu);
int_val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian); int_val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian);
}, },
.err => |err| { .err => |err| {
@ -264,20 +264,20 @@ pub fn generateSymbol(
try code.writer().writeInt(u16, @intCast(int), endian); try code.writer().writeInt(u16, @intCast(int), endian);
}, },
.error_union => |error_union| { .error_union => |error_union| {
const payload_ty = ty.errorUnionPayload(mod); const payload_ty = ty.errorUnionPayload(zcu);
const err_val: u16 = switch (error_union.val) { const err_val: u16 = switch (error_union.val) {
.err_name => |err_name| @intCast(try pt.getErrorValue(err_name)), .err_name => |err_name| @intCast(try pt.getErrorValue(err_name)),
.payload => 0, .payload => 0,
}; };
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try code.writer().writeInt(u16, err_val, endian); try code.writer().writeInt(u16, err_val, endian);
return .ok; return .ok;
} }
const payload_align = payload_ty.abiAlignment(pt); const payload_align = payload_ty.abiAlignment(zcu);
const error_align = Type.anyerror.abiAlignment(pt); const error_align = Type.anyerror.abiAlignment(zcu);
const abi_align = ty.abiAlignment(pt); const abi_align = ty.abiAlignment(zcu);
// error value first when its type is larger than the error union's payload // error value first when its type is larger than the error union's payload
if (error_align.order(payload_align) == .gt) { if (error_align.order(payload_align) == .gt) {
@ -317,7 +317,7 @@ pub fn generateSymbol(
} }
}, },
.enum_tag => |enum_tag| { .enum_tag => |enum_tag| {
const int_tag_ty = ty.intTagType(mod); const int_tag_ty = ty.intTagType(zcu);
switch (try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, debug_output, reloc_info)) { switch (try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, debug_output, reloc_info)) {
.ok => {}, .ok => {},
.fail => |em| return .{ .fail = em }, .fail => |em| return .{ .fail = em },
@ -329,7 +329,7 @@ pub fn generateSymbol(
.f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(8)), .f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(8)),
.f80 => |f80_val| { .f80 => |f80_val| {
writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(10)); writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(10));
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow; const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
try code.appendNTimes(0, abi_size - 10); try code.appendNTimes(0, abi_size - 10);
}, },
.f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)), .f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)),
@ -349,11 +349,11 @@ pub fn generateSymbol(
} }
}, },
.opt => { .opt => {
const payload_type = ty.optionalChild(mod); const payload_type = ty.optionalChild(zcu);
const payload_val = val.optionalValue(mod); const payload_val = val.optionalValue(zcu);
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow; const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
if (ty.optionalReprIsPayload(mod)) { if (ty.optionalReprIsPayload(zcu)) {
if (payload_val) |value| { if (payload_val) |value| {
switch (try generateSymbol(bin_file, pt, src_loc, value, code, debug_output, reloc_info)) { switch (try generateSymbol(bin_file, pt, src_loc, value, code, debug_output, reloc_info)) {
.ok => {}, .ok => {},
@ -363,8 +363,8 @@ pub fn generateSymbol(
try code.appendNTimes(0, abi_size); try code.appendNTimes(0, abi_size);
} }
} else { } else {
const padding = abi_size - (math.cast(usize, payload_type.abiSize(pt)) orelse return error.Overflow) - 1; const padding = abi_size - (math.cast(usize, payload_type.abiSize(zcu)) orelse return error.Overflow) - 1;
if (payload_type.hasRuntimeBits(pt)) { if (payload_type.hasRuntimeBits(zcu)) {
const value = payload_val orelse Value.fromInterned(try pt.intern(.{ const value = payload_val orelse Value.fromInterned(try pt.intern(.{
.undef = payload_type.toIntern(), .undef = payload_type.toIntern(),
})); }));
@ -398,7 +398,7 @@ pub fn generateSymbol(
}, },
}, },
.vector_type => |vector_type| { .vector_type => |vector_type| {
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow; const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
if (vector_type.child == .bool_type) { if (vector_type.child == .bool_type) {
const bytes = try code.addManyAsSlice(abi_size); const bytes = try code.addManyAsSlice(abi_size);
@memset(bytes, 0xaa); @memset(bytes, 0xaa);
@ -458,7 +458,7 @@ pub fn generateSymbol(
} }
const padding = abi_size - const padding = abi_size -
(math.cast(usize, Type.fromInterned(vector_type.child).abiSize(pt) * vector_type.len) orelse (math.cast(usize, Type.fromInterned(vector_type.child).abiSize(zcu) * vector_type.len) orelse
return error.Overflow); return error.Overflow);
if (padding > 0) try code.appendNTimes(0, padding); if (padding > 0) try code.appendNTimes(0, padding);
} }
@ -471,7 +471,7 @@ pub fn generateSymbol(
0.., 0..,
) |field_ty, comptime_val, index| { ) |field_ty, comptime_val, index| {
if (comptime_val != .none) continue; if (comptime_val != .none) continue;
if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; if (!Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
const field_val = switch (aggregate.storage) { const field_val = switch (aggregate.storage) {
.bytes => |bytes| try pt.intern(.{ .int = .{ .bytes => |bytes| try pt.intern(.{ .int = .{
@ -489,7 +489,7 @@ pub fn generateSymbol(
const unpadded_field_end = code.items.len - struct_begin; const unpadded_field_end = code.items.len - struct_begin;
// Pad struct members if required // Pad struct members if required
const padded_field_end = ty.structFieldOffset(index + 1, pt); const padded_field_end = ty.structFieldOffset(index + 1, zcu);
const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse
return error.Overflow; return error.Overflow;
@ -502,7 +502,7 @@ pub fn generateSymbol(
const struct_type = ip.loadStructType(ty.toIntern()); const struct_type = ip.loadStructType(ty.toIntern());
switch (struct_type.layout) { switch (struct_type.layout) {
.@"packed" => { .@"packed" => {
const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow; const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
const current_pos = code.items.len; const current_pos = code.items.len;
try code.appendNTimes(0, abi_size); try code.appendNTimes(0, abi_size);
var bits: u16 = 0; var bits: u16 = 0;
@ -519,8 +519,8 @@ pub fn generateSymbol(
// pointer may point to a decl which must be marked used // pointer may point to a decl which must be marked used
// but can also result in a relocation. Therefore we handle those separately. // but can also result in a relocation. Therefore we handle those separately.
if (Type.fromInterned(field_ty).zigTypeTag(mod) == .Pointer) { if (Type.fromInterned(field_ty).zigTypeTag(zcu) == .Pointer) {
const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(pt)) orelse const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(zcu)) orelse
return error.Overflow; return error.Overflow;
var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
defer tmp_list.deinit(); defer tmp_list.deinit();
@ -531,7 +531,7 @@ pub fn generateSymbol(
} else { } else {
Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), pt, code.items[current_pos..], bits) catch unreachable; Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), pt, code.items[current_pos..], bits) catch unreachable;
} }
bits += @intCast(Type.fromInterned(field_ty).bitSize(pt)); bits += @intCast(Type.fromInterned(field_ty).bitSize(zcu));
} }
}, },
.auto, .@"extern" => { .auto, .@"extern" => {
@ -542,7 +542,7 @@ pub fn generateSymbol(
var it = struct_type.iterateRuntimeOrder(ip); var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| { while (it.next()) |field_index| {
const field_ty = field_types[field_index]; const field_ty = field_types[field_index];
if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; if (!Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) { const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| try pt.intern(.{ .int = .{ .bytes => |bytes| try pt.intern(.{ .int = .{
@ -580,7 +580,7 @@ pub fn generateSymbol(
else => unreachable, else => unreachable,
}, },
.un => |un| { .un => |un| {
const layout = ty.unionGetLayout(pt); const layout = ty.unionGetLayout(zcu);
if (layout.payload_size == 0) { if (layout.payload_size == 0) {
return generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info); return generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info);
@ -594,11 +594,11 @@ pub fn generateSymbol(
} }
} }
const union_obj = mod.typeToUnion(ty).?; const union_obj = zcu.typeToUnion(ty).?;
if (un.tag != .none) { if (un.tag != .none) {
const field_index = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?; const field_index = ty.unionTagFieldIndex(Value.fromInterned(un.tag), zcu).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBits(pt)) { if (!field_ty.hasRuntimeBits(zcu)) {
try code.appendNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); try code.appendNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
} else { } else {
switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) { switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) {
@ -606,7 +606,7 @@ pub fn generateSymbol(
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(pt)) orelse return error.Overflow; const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(zcu)) orelse return error.Overflow;
if (padding > 0) { if (padding > 0) {
try code.appendNTimes(0, padding); try code.appendNTimes(0, padding);
} }
@ -661,7 +661,7 @@ fn lowerPtr(
reloc_info, reloc_info,
offset + errUnionPayloadOffset( offset + errUnionPayloadOffset(
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu).errorUnionPayload(zcu), Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu).errorUnionPayload(zcu),
pt, zcu,
), ),
), ),
.opt_payload => |opt_ptr| try lowerPtr( .opt_payload => |opt_ptr| try lowerPtr(
@ -687,7 +687,7 @@ fn lowerPtr(
}; };
}, },
.Struct, .Union => switch (base_ty.containerLayout(zcu)) { .Struct, .Union => switch (base_ty.containerLayout(zcu)) {
.auto => base_ty.structFieldOffset(@intCast(field.index), pt), .auto => base_ty.structFieldOffset(@intCast(field.index), zcu),
.@"extern", .@"packed" => unreachable, .@"extern", .@"packed" => unreachable,
}, },
else => unreachable, else => unreachable,
@ -713,15 +713,16 @@ fn lowerUavRef(
offset: u64, offset: u64,
) CodeGenError!Result { ) CodeGenError!Result {
_ = debug_output; _ = debug_output;
const ip = &pt.zcu.intern_pool; const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const target = lf.comp.root_mod.resolved_target.result; const target = lf.comp.root_mod.resolved_target.result;
const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8); const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8);
const uav_val = uav.val; const uav_val = uav.val;
const uav_ty = Type.fromInterned(ip.typeOf(uav_val)); const uav_ty = Type.fromInterned(ip.typeOf(uav_val));
log.debug("lowerUavRef: ty = {}", .{uav_ty.fmt(pt)}); log.debug("lowerUavRef: ty = {}", .{uav_ty.fmt(pt)});
const is_fn_body = uav_ty.zigTypeTag(pt.zcu) == .Fn; const is_fn_body = uav_ty.zigTypeTag(zcu) == .Fn;
if (!is_fn_body and !uav_ty.hasRuntimeBits(pt)) { if (!is_fn_body and !uav_ty.hasRuntimeBits(zcu)) {
try code.appendNTimes(0xaa, ptr_width_bytes); try code.appendNTimes(0xaa, ptr_width_bytes);
return Result.ok; return Result.ok;
} }
@ -768,7 +769,7 @@ fn lowerNavRef(
const ptr_width = target.ptrBitWidth(); const ptr_width = target.ptrBitWidth();
const nav_ty = Type.fromInterned(ip.getNav(nav_index).typeOf(ip)); const nav_ty = Type.fromInterned(ip.getNav(nav_index).typeOf(ip));
const is_fn_body = nav_ty.zigTypeTag(zcu) == .Fn; const is_fn_body = nav_ty.zigTypeTag(zcu) == .Fn;
if (!is_fn_body and !nav_ty.hasRuntimeBits(pt)) { if (!is_fn_body and !nav_ty.hasRuntimeBits(zcu)) {
try code.appendNTimes(0xaa, @divExact(ptr_width, 8)); try code.appendNTimes(0xaa, @divExact(ptr_width, 8));
return Result.ok; return Result.ok;
} }
@ -860,7 +861,7 @@ fn genNavRef(
const ty = val.typeOf(zcu); const ty = val.typeOf(zcu);
log.debug("genNavRef: val = {}", .{val.fmtValue(pt)}); log.debug("genNavRef: val = {}", .{val.fmtValue(pt)});
if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
const imm: u64 = switch (@divExact(target.ptrBitWidth(), 8)) { const imm: u64 = switch (@divExact(target.ptrBitWidth(), 8)) {
1 => 0xaa, 1 => 0xaa,
2 => 0xaaaa, 2 => 0xaaaa,
@ -994,8 +995,8 @@ pub fn genTypedValue(
const info = ty.intInfo(zcu); const info = ty.intInfo(zcu);
if (info.bits <= target.ptrBitWidth()) { if (info.bits <= target.ptrBitWidth()) {
const unsigned: u64 = switch (info.signedness) { const unsigned: u64 = switch (info.signedness) {
.signed => @bitCast(val.toSignedInt(pt)), .signed => @bitCast(val.toSignedInt(zcu)),
.unsigned => val.toUnsignedInt(pt), .unsigned => val.toUnsignedInt(zcu),
}; };
return .{ .mcv = .{ .immediate = unsigned } }; return .{ .mcv = .{ .immediate = unsigned } };
} }
@ -1012,7 +1013,7 @@ pub fn genTypedValue(
val.optionalValue(zcu) orelse return .{ .mcv = .{ .immediate = 0 } }, val.optionalValue(zcu) orelse return .{ .mcv = .{ .immediate = 0 } },
target, target,
); );
} else if (ty.abiSize(pt) == 1) { } else if (ty.abiSize(zcu) == 1) {
return .{ .mcv = .{ .immediate = @intFromBool(!val.isNull(zcu)) } }; return .{ .mcv = .{ .immediate = @intFromBool(!val.isNull(zcu)) } };
} }
}, },
@ -1034,7 +1035,7 @@ pub fn genTypedValue(
.ErrorUnion => { .ErrorUnion => {
const err_type = ty.errorUnionSet(zcu); const err_type = ty.errorUnionSet(zcu);
const payload_type = ty.errorUnionPayload(zcu); const payload_type = ty.errorUnionPayload(zcu);
if (!payload_type.hasRuntimeBitsIgnoreComptime(pt)) { if (!payload_type.hasRuntimeBitsIgnoreComptime(zcu)) {
// We use the error type directly as the type. // We use the error type directly as the type.
const err_int_ty = try pt.errorIntType(); const err_int_ty = try pt.errorIntType();
switch (ip.indexToKey(val.toIntern()).error_union.val) { switch (ip.indexToKey(val.toIntern()).error_union.val) {
@ -1074,23 +1075,23 @@ pub fn genTypedValue(
return lf.lowerUav(pt, val.toIntern(), .none, src_loc); return lf.lowerUav(pt, val.toIntern(), .none, src_loc);
} }
pub fn errUnionPayloadOffset(payload_ty: Type, pt: Zcu.PerThread) u64 { pub fn errUnionPayloadOffset(payload_ty: Type, zcu: *Zcu) u64 {
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0; if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return 0;
const payload_align = payload_ty.abiAlignment(pt); const payload_align = payload_ty.abiAlignment(zcu);
const error_align = Type.anyerror.abiAlignment(pt); const error_align = Type.anyerror.abiAlignment(zcu);
if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return 0; return 0;
} else { } else {
return payload_align.forward(Type.anyerror.abiSize(pt)); return payload_align.forward(Type.anyerror.abiSize(zcu));
} }
} }
pub fn errUnionErrorOffset(payload_ty: Type, pt: Zcu.PerThread) u64 { pub fn errUnionErrorOffset(payload_ty: Type, zcu: *Zcu) u64 {
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0; if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return 0;
const payload_align = payload_ty.abiAlignment(pt); const payload_align = payload_ty.abiAlignment(zcu);
const error_align = Type.anyerror.abiAlignment(pt); const error_align = Type.anyerror.abiAlignment(zcu);
if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return error_align.forward(payload_ty.abiSize(pt)); return error_align.forward(payload_ty.abiSize(zcu));
} else { } else {
return 0; return 0;
} }

View File

@ -334,7 +334,7 @@ pub const Function = struct {
const writer = f.object.codeHeaderWriter(); const writer = f.object.codeHeaderWriter();
const decl_c_value = try f.allocLocalValue(.{ const decl_c_value = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(ty, .complete), .ctype = try f.ctypeFromType(ty, .complete),
.alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(pt)), .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(pt.zcu)),
}); });
const gpa = f.object.dg.gpa; const gpa = f.object.dg.gpa;
try f.allocs.put(gpa, decl_c_value.new_local, false); try f.allocs.put(gpa, decl_c_value.new_local, false);
@ -372,7 +372,7 @@ pub const Function = struct {
fn allocLocal(f: *Function, inst: ?Air.Inst.Index, ty: Type) !CValue { fn allocLocal(f: *Function, inst: ?Air.Inst.Index, ty: Type) !CValue {
return f.allocAlignedLocal(inst, .{ return f.allocAlignedLocal(inst, .{
.ctype = try f.ctypeFromType(ty, .complete), .ctype = try f.ctypeFromType(ty, .complete),
.alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.pt)), .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.pt.zcu)),
}); });
} }
@ -648,7 +648,7 @@ pub const DeclGen = struct {
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type. // Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
const ptr_ty = Type.fromInterned(uav.orig_ty); const ptr_ty = Type.fromInterned(uav.orig_ty);
if (ptr_ty.isPtrAtRuntime(zcu) and !uav_ty.isFnOrHasRuntimeBits(pt)) { if (ptr_ty.isPtrAtRuntime(zcu) and !uav_ty.isFnOrHasRuntimeBits(zcu)) {
return dg.writeCValue(writer, .{ .undef = ptr_ty }); return dg.writeCValue(writer, .{ .undef = ptr_ty });
} }
@ -688,7 +688,7 @@ pub const DeclGen = struct {
// alignment. If there is already an entry, keep the greater alignment. // alignment. If there is already an entry, keep the greater alignment.
const explicit_alignment = ptr_type.flags.alignment; const explicit_alignment = ptr_type.flags.alignment;
if (explicit_alignment != .none) { if (explicit_alignment != .none) {
const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(pt); const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(zcu);
if (explicit_alignment.order(abi_alignment).compare(.gt)) { if (explicit_alignment.order(abi_alignment).compare(.gt)) {
const aligned_gop = try dg.aligned_uavs.getOrPut(dg.gpa, uav.val); const aligned_gop = try dg.aligned_uavs.getOrPut(dg.gpa, uav.val);
aligned_gop.value_ptr.* = if (aligned_gop.found_existing) aligned_gop.value_ptr.* = if (aligned_gop.found_existing)
@ -722,7 +722,7 @@ pub const DeclGen = struct {
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type. // Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
const nav_ty = Type.fromInterned(ip.getNav(owner_nav).typeOf(ip)); const nav_ty = Type.fromInterned(ip.getNav(owner_nav).typeOf(ip));
const ptr_ty = try pt.navPtrType(owner_nav); const ptr_ty = try pt.navPtrType(owner_nav);
if (!nav_ty.isFnOrHasRuntimeBits(pt)) { if (!nav_ty.isFnOrHasRuntimeBits(zcu)) {
return dg.writeCValue(writer, .{ .undef = ptr_ty }); return dg.writeCValue(writer, .{ .undef = ptr_ty });
} }
@ -805,7 +805,7 @@ pub const DeclGen = struct {
} }
}, },
.elem_ptr => |elem| if (!(try elem.parent.ptrType(pt)).childType(zcu).hasRuntimeBits(pt)) { .elem_ptr => |elem| if (!(try elem.parent.ptrType(pt)).childType(zcu).hasRuntimeBits(zcu)) {
// Element type is zero-bit, so lowers to `void`. The index is irrelevant; just cast the pointer. // Element type is zero-bit, so lowers to `void`. The index is irrelevant; just cast the pointer.
const ptr_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete); const ptr_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete);
try writer.writeByte('('); try writer.writeByte('(');
@ -923,7 +923,7 @@ pub const DeclGen = struct {
try writer.writeAll("(("); try writer.writeAll("((");
try dg.renderCType(writer, ctype); try dg.renderCType(writer, ctype);
try writer.print("){x})", .{try dg.fmtIntLiteral( try writer.print("){x})", .{try dg.fmtIntLiteral(
try pt.intValue(Type.usize, val.toUnsignedInt(pt)), try pt.intValue(Type.usize, val.toUnsignedInt(zcu)),
.Other, .Other,
)}); )});
}, },
@ -970,7 +970,7 @@ pub const DeclGen = struct {
.enum_tag => |enum_tag| try dg.renderValue(writer, Value.fromInterned(enum_tag.int), location), .enum_tag => |enum_tag| try dg.renderValue(writer, Value.fromInterned(enum_tag.int), location),
.float => { .float => {
const bits = ty.floatBits(target.*); const bits = ty.floatBits(target.*);
const f128_val = val.toFloat(f128, pt); const f128_val = val.toFloat(f128, zcu);
// All unsigned ints matching float types are pre-allocated. // All unsigned ints matching float types are pre-allocated.
const repr_ty = pt.intType(.unsigned, bits) catch unreachable; const repr_ty = pt.intType(.unsigned, bits) catch unreachable;
@ -984,10 +984,10 @@ pub const DeclGen = struct {
}; };
switch (bits) { switch (bits) {
16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, pt)))), 16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, zcu)))),
32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, pt)))), 32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, zcu)))),
64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, pt)))), 64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, zcu)))),
80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, pt)))), 80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, zcu)))),
128 => repr_val_big.set(@as(u128, @bitCast(f128_val))), 128 => repr_val_big.set(@as(u128, @bitCast(f128_val))),
else => unreachable, else => unreachable,
} }
@ -998,10 +998,10 @@ pub const DeclGen = struct {
try dg.renderTypeForBuiltinFnName(writer, ty); try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('('); try writer.writeByte('(');
switch (bits) { switch (bits) {
16 => try writer.print("{x}", .{val.toFloat(f16, pt)}), 16 => try writer.print("{x}", .{val.toFloat(f16, zcu)}),
32 => try writer.print("{x}", .{val.toFloat(f32, pt)}), 32 => try writer.print("{x}", .{val.toFloat(f32, zcu)}),
64 => try writer.print("{x}", .{val.toFloat(f64, pt)}), 64 => try writer.print("{x}", .{val.toFloat(f64, zcu)}),
80 => try writer.print("{x}", .{val.toFloat(f80, pt)}), 80 => try writer.print("{x}", .{val.toFloat(f80, zcu)}),
128 => try writer.print("{x}", .{f128_val}), 128 => try writer.print("{x}", .{f128_val}),
else => unreachable, else => unreachable,
} }
@ -1041,10 +1041,10 @@ pub const DeclGen = struct {
if (std.math.isNan(f128_val)) switch (bits) { if (std.math.isNan(f128_val)) switch (bits) {
// We only actually need to pass the significand, but it will get // We only actually need to pass the significand, but it will get
// properly masked anyway, so just pass the whole value. // properly masked anyway, so just pass the whole value.
16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, pt)))}), 16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, zcu)))}),
32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, pt)))}), 32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, zcu)))}),
64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, pt)))}), 64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, zcu)))}),
80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, pt)))}), 80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, zcu)))}),
128 => try writer.print("\"0x{x}\"", .{@as(u128, @bitCast(f128_val))}), 128 => try writer.print("\"0x{x}\"", .{@as(u128, @bitCast(f128_val))}),
else => unreachable, else => unreachable,
}; };
@ -1167,11 +1167,11 @@ pub const DeclGen = struct {
const elem_val_u8: u8 = if (elem_val.isUndef(zcu)) const elem_val_u8: u8 = if (elem_val.isUndef(zcu))
undefPattern(u8) undefPattern(u8)
else else
@intCast(elem_val.toUnsignedInt(pt)); @intCast(elem_val.toUnsignedInt(zcu));
try literal.writeChar(elem_val_u8); try literal.writeChar(elem_val_u8);
} }
if (ai.sentinel) |s| { if (ai.sentinel) |s| {
const s_u8: u8 = @intCast(s.toUnsignedInt(pt)); const s_u8: u8 = @intCast(s.toUnsignedInt(zcu));
if (s_u8 != 0) try literal.writeChar(s_u8); if (s_u8 != 0) try literal.writeChar(s_u8);
} }
try literal.end(); try literal.end();
@ -1203,7 +1203,7 @@ pub const DeclGen = struct {
const comptime_val = tuple.values.get(ip)[field_index]; const comptime_val = tuple.values.get(ip)[field_index];
if (comptime_val != .none) continue; if (comptime_val != .none) continue;
const field_ty = Type.fromInterned(tuple.types.get(ip)[field_index]); const field_ty = Type.fromInterned(tuple.types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (!empty) try writer.writeByte(','); if (!empty) try writer.writeByte(',');
@ -1238,7 +1238,7 @@ pub const DeclGen = struct {
var need_comma = false; var need_comma = false;
while (field_it.next()) |field_index| { while (field_it.next()) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (need_comma) try writer.writeByte(','); if (need_comma) try writer.writeByte(',');
need_comma = true; need_comma = true;
@ -1265,7 +1265,7 @@ pub const DeclGen = struct {
for (0..loaded_struct.field_types.len) |field_index| { for (0..loaded_struct.field_types.len) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
eff_num_fields += 1; eff_num_fields += 1;
} }
@ -1273,7 +1273,7 @@ pub const DeclGen = struct {
try writer.writeByte('('); try writer.writeByte('(');
try dg.renderUndefValue(writer, ty, location); try dg.renderUndefValue(writer, ty, location);
try writer.writeByte(')'); try writer.writeByte(')');
} else if (ty.bitSize(pt) > 64) { } else if (ty.bitSize(zcu) > 64) {
// zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
var num_or = eff_num_fields - 1; var num_or = eff_num_fields - 1;
while (num_or > 0) : (num_or -= 1) { while (num_or > 0) : (num_or -= 1) {
@ -1286,7 +1286,7 @@ pub const DeclGen = struct {
var needs_closing_paren = false; var needs_closing_paren = false;
for (0..loaded_struct.field_types.len) |field_index| { for (0..loaded_struct.field_types.len) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) { const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| try pt.intern(.{ .int = .{ .bytes => |bytes| try pt.intern(.{ .int = .{
@ -1312,7 +1312,7 @@ pub const DeclGen = struct {
if (needs_closing_paren) try writer.writeByte(')'); if (needs_closing_paren) try writer.writeByte(')');
if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
bit_offset += field_ty.bitSize(pt); bit_offset += field_ty.bitSize(zcu);
needs_closing_paren = true; needs_closing_paren = true;
eff_index += 1; eff_index += 1;
} }
@ -1322,7 +1322,7 @@ pub const DeclGen = struct {
var empty = true; var empty = true;
for (0..loaded_struct.field_types.len) |field_index| { for (0..loaded_struct.field_types.len) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (!empty) try writer.writeAll(" | "); if (!empty) try writer.writeAll(" | ");
try writer.writeByte('('); try writer.writeByte('(');
@ -1346,7 +1346,7 @@ pub const DeclGen = struct {
try dg.renderValue(writer, Value.fromInterned(field_val), .Other); try dg.renderValue(writer, Value.fromInterned(field_val), .Other);
} }
bit_offset += field_ty.bitSize(pt); bit_offset += field_ty.bitSize(zcu);
empty = false; empty = false;
} }
try writer.writeByte(')'); try writer.writeByte(')');
@ -1396,7 +1396,7 @@ pub const DeclGen = struct {
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index]; const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index];
if (loaded_union.flagsUnordered(ip).layout == .@"packed") { if (loaded_union.flagsUnordered(ip).layout == .@"packed") {
if (field_ty.hasRuntimeBits(pt)) { if (field_ty.hasRuntimeBits(zcu)) {
if (field_ty.isPtrAtRuntime(zcu)) { if (field_ty.isPtrAtRuntime(zcu)) {
try writer.writeByte('('); try writer.writeByte('(');
try dg.renderCType(writer, ctype); try dg.renderCType(writer, ctype);
@ -1427,7 +1427,7 @@ pub const DeclGen = struct {
), ),
.payload => { .payload => {
try writer.writeByte('{'); try writer.writeByte('{');
if (field_ty.hasRuntimeBits(pt)) { if (field_ty.hasRuntimeBits(zcu)) {
try writer.print(" .{ } = ", .{fmtIdent(field_name.toSlice(ip))}); try writer.print(" .{ } = ", .{fmtIdent(field_name.toSlice(ip))});
try dg.renderValue( try dg.renderValue(
writer, writer,
@ -1439,7 +1439,7 @@ pub const DeclGen = struct {
const inner_field_ty = Type.fromInterned( const inner_field_ty = Type.fromInterned(
loaded_union.field_types.get(ip)[inner_field_index], loaded_union.field_types.get(ip)[inner_field_index],
); );
if (!inner_field_ty.hasRuntimeBits(pt)) continue; if (!inner_field_ty.hasRuntimeBits(zcu)) continue;
try dg.renderUndefValue(writer, inner_field_ty, initializer_type); try dg.renderUndefValue(writer, inner_field_ty, initializer_type);
break; break;
} }
@ -1588,7 +1588,7 @@ pub const DeclGen = struct {
var need_comma = false; var need_comma = false;
while (field_it.next()) |field_index| { while (field_it.next()) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (need_comma) try writer.writeByte(','); if (need_comma) try writer.writeByte(',');
need_comma = true; need_comma = true;
@ -1613,7 +1613,7 @@ pub const DeclGen = struct {
for (0..anon_struct_info.types.len) |field_index| { for (0..anon_struct_info.types.len) |field_index| {
if (anon_struct_info.values.get(ip)[field_index] != .none) continue; if (anon_struct_info.values.get(ip)[field_index] != .none) continue;
const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]); const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (need_comma) try writer.writeByte(','); if (need_comma) try writer.writeByte(',');
need_comma = true; need_comma = true;
@ -1651,7 +1651,7 @@ pub const DeclGen = struct {
const inner_field_ty = Type.fromInterned( const inner_field_ty = Type.fromInterned(
loaded_union.field_types.get(ip)[inner_field_index], loaded_union.field_types.get(ip)[inner_field_index],
); );
if (!inner_field_ty.hasRuntimeBits(pt)) continue; if (!inner_field_ty.hasRuntimeBits(pt.zcu)) continue;
try dg.renderUndefValue( try dg.renderUndefValue(
writer, writer,
inner_field_ty, inner_field_ty,
@ -1902,7 +1902,8 @@ pub const DeclGen = struct {
}; };
fn intCastIsNoop(dg: *DeclGen, dest_ty: Type, src_ty: Type) bool { fn intCastIsNoop(dg: *DeclGen, dest_ty: Type, src_ty: Type) bool {
const pt = dg.pt; const pt = dg.pt;
const dest_bits = dest_ty.bitSize(pt); const zcu = pt.zcu;
const dest_bits = dest_ty.bitSize(zcu);
const dest_int_info = dest_ty.intInfo(pt.zcu); const dest_int_info = dest_ty.intInfo(pt.zcu);
const src_is_ptr = src_ty.isPtrAtRuntime(pt.zcu); const src_is_ptr = src_ty.isPtrAtRuntime(pt.zcu);
@ -1911,7 +1912,7 @@ pub const DeclGen = struct {
.signed => Type.isize, .signed => Type.isize,
} else src_ty; } else src_ty;
const src_bits = src_eff_ty.bitSize(pt); const src_bits = src_eff_ty.bitSize(zcu);
const src_int_info = if (src_eff_ty.isAbiInt(pt.zcu)) src_eff_ty.intInfo(pt.zcu) else null; const src_int_info = if (src_eff_ty.isAbiInt(pt.zcu)) src_eff_ty.intInfo(pt.zcu) else null;
if (dest_bits <= 64 and src_bits <= 64) { if (dest_bits <= 64 and src_bits <= 64) {
const needs_cast = src_int_info == null or const needs_cast = src_int_info == null or
@ -1943,7 +1944,7 @@ pub const DeclGen = struct {
) !void { ) !void {
const pt = dg.pt; const pt = dg.pt;
const zcu = pt.zcu; const zcu = pt.zcu;
const dest_bits = dest_ty.bitSize(pt); const dest_bits = dest_ty.bitSize(zcu);
const dest_int_info = dest_ty.intInfo(zcu); const dest_int_info = dest_ty.intInfo(zcu);
const src_is_ptr = src_ty.isPtrAtRuntime(zcu); const src_is_ptr = src_ty.isPtrAtRuntime(zcu);
@ -1952,7 +1953,7 @@ pub const DeclGen = struct {
.signed => Type.isize, .signed => Type.isize,
} else src_ty; } else src_ty;
const src_bits = src_eff_ty.bitSize(pt); const src_bits = src_eff_ty.bitSize(zcu);
const src_int_info = if (src_eff_ty.isAbiInt(zcu)) src_eff_ty.intInfo(zcu) else null; const src_int_info = if (src_eff_ty.isAbiInt(zcu)) src_eff_ty.intInfo(zcu) else null;
if (dest_bits <= 64 and src_bits <= 64) { if (dest_bits <= 64 and src_bits <= 64) {
const needs_cast = src_int_info == null or const needs_cast = src_int_info == null or
@ -2033,7 +2034,7 @@ pub const DeclGen = struct {
qualifiers, qualifiers,
CType.AlignAs.fromAlignment(.{ CType.AlignAs.fromAlignment(.{
.@"align" = alignment, .@"align" = alignment,
.abi = ty.abiAlignment(dg.pt), .abi = ty.abiAlignment(dg.pt.zcu),
}), }),
); );
} }
@ -2239,9 +2240,10 @@ pub const DeclGen = struct {
} }
const pt = dg.pt; const pt = dg.pt;
const int_info = if (ty.isAbiInt(pt.zcu)) ty.intInfo(pt.zcu) else std.builtin.Type.Int{ const zcu = pt.zcu;
const int_info = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else std.builtin.Type.Int{
.signedness = .unsigned, .signedness = .unsigned,
.bits = @as(u16, @intCast(ty.bitSize(pt))), .bits = @as(u16, @intCast(ty.bitSize(zcu))),
}; };
if (is_big) try writer.print(", {}", .{int_info.signedness == .signed}); if (is_big) try writer.print(", {}", .{int_info.signedness == .signed});
@ -2891,7 +2893,7 @@ pub fn genDecl(o: *Object) !void {
const nav = ip.getNav(o.dg.pass.nav); const nav = ip.getNav(o.dg.pass.nav);
const nav_ty = Type.fromInterned(nav.typeOf(ip)); const nav_ty = Type.fromInterned(nav.typeOf(ip));
if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return; if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return;
switch (ip.indexToKey(nav.status.resolved.val)) { switch (ip.indexToKey(nav.status.resolved.val)) {
.@"extern" => |@"extern"| { .@"extern" => |@"extern"| {
if (!ip.isFunctionType(nav_ty.toIntern())) return o.dg.renderFwdDecl(o.dg.pass.nav, .{ if (!ip.isFunctionType(nav_ty.toIntern())) return o.dg.renderFwdDecl(o.dg.pass.nav, .{
@ -3420,10 +3422,10 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [
} }
fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
const pt = f.object.dg.pt; const zcu = f.object.dg.pt.zcu;
const inst_ty = f.typeOfIndex(inst); const inst_ty = f.typeOfIndex(inst);
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none; return .none;
} }
@ -3453,7 +3455,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.typeOfIndex(inst); const inst_ty = f.typeOfIndex(inst);
const ptr_ty = f.typeOf(bin_op.lhs); const ptr_ty = f.typeOf(bin_op.lhs);
const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(pt); const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(zcu);
const ptr = try f.resolveInst(bin_op.lhs); const ptr = try f.resolveInst(bin_op.lhs);
const index = try f.resolveInst(bin_op.rhs); const index = try f.resolveInst(bin_op.rhs);
@ -3482,10 +3484,10 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
} }
fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
const pt = f.object.dg.pt; const zcu = f.object.dg.pt.zcu;
const inst_ty = f.typeOfIndex(inst); const inst_ty = f.typeOfIndex(inst);
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none; return .none;
} }
@ -3516,7 +3518,7 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.typeOfIndex(inst); const inst_ty = f.typeOfIndex(inst);
const slice_ty = f.typeOf(bin_op.lhs); const slice_ty = f.typeOf(bin_op.lhs);
const elem_ty = slice_ty.elemType2(zcu); const elem_ty = slice_ty.elemType2(zcu);
const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(pt); const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(zcu);
const slice = try f.resolveInst(bin_op.lhs); const slice = try f.resolveInst(bin_op.lhs);
const index = try f.resolveInst(bin_op.rhs); const index = try f.resolveInst(bin_op.rhs);
@ -3539,10 +3541,10 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
} }
fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
const pt = f.object.dg.pt; const zcu = f.object.dg.pt.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const inst_ty = f.typeOfIndex(inst); const inst_ty = f.typeOfIndex(inst);
if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none; return .none;
} }
@ -3569,13 +3571,13 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
const zcu = pt.zcu; const zcu = pt.zcu;
const inst_ty = f.typeOfIndex(inst); const inst_ty = f.typeOfIndex(inst);
const elem_ty = inst_ty.childType(zcu); const elem_ty = inst_ty.childType(zcu);
if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .{ .undef = inst_ty }; if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty };
const local = try f.allocLocalValue(.{ const local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(elem_ty, .complete), .ctype = try f.ctypeFromType(elem_ty, .complete),
.alignas = CType.AlignAs.fromAlignment(.{ .alignas = CType.AlignAs.fromAlignment(.{
.@"align" = inst_ty.ptrInfo(zcu).flags.alignment, .@"align" = inst_ty.ptrInfo(zcu).flags.alignment,
.abi = elem_ty.abiAlignment(pt), .abi = elem_ty.abiAlignment(zcu),
}), }),
}); });
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
@ -3588,13 +3590,13 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const zcu = pt.zcu; const zcu = pt.zcu;
const inst_ty = f.typeOfIndex(inst); const inst_ty = f.typeOfIndex(inst);
const elem_ty = inst_ty.childType(zcu); const elem_ty = inst_ty.childType(zcu);
if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .{ .undef = inst_ty }; if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty };
const local = try f.allocLocalValue(.{ const local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(elem_ty, .complete), .ctype = try f.ctypeFromType(elem_ty, .complete),
.alignas = CType.AlignAs.fromAlignment(.{ .alignas = CType.AlignAs.fromAlignment(.{
.@"align" = inst_ty.ptrInfo(zcu).flags.alignment, .@"align" = inst_ty.ptrInfo(zcu).flags.alignment,
.abi = elem_ty.abiAlignment(pt), .abi = elem_ty.abiAlignment(zcu),
}), }),
}); });
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
@ -3636,7 +3638,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ptr_info = ptr_scalar_ty.ptrInfo(zcu); const ptr_info = ptr_scalar_ty.ptrInfo(zcu);
const src_ty = Type.fromInterned(ptr_info.child); const src_ty = Type.fromInterned(ptr_info.child);
if (!src_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try reap(f, inst, &.{ty_op.operand}); try reap(f, inst, &.{ty_op.operand});
return .none; return .none;
} }
@ -3646,7 +3648,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ty_op.operand}); try reap(f, inst, &.{ty_op.operand});
const is_aligned = if (ptr_info.flags.alignment != .none) const is_aligned = if (ptr_info.flags.alignment != .none)
ptr_info.flags.alignment.order(src_ty.abiAlignment(pt)).compare(.gte) ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
else else
true; true;
const is_array = lowersToArray(src_ty, pt); const is_array = lowersToArray(src_ty, pt);
@ -3674,7 +3676,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset); const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
const field_ty = try pt.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(pt)))); const field_ty = try pt.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(zcu))));
try f.writeCValue(writer, local, .Other); try f.writeCValue(writer, local, .Other);
try v.elem(f, writer); try v.elem(f, writer);
@ -3685,9 +3687,9 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("(("); try writer.writeAll("((");
try f.renderType(writer, field_ty); try f.renderType(writer, field_ty);
try writer.writeByte(')'); try writer.writeByte(')');
const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(pt) > 64; const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64;
if (cant_cast) { if (cant_cast) {
if (field_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); if (field_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_lo_"); try writer.writeAll("zig_lo_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('('); try writer.writeByte('(');
@ -3735,7 +3737,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
const ret_val = if (is_array) ret_val: { const ret_val = if (is_array) ret_val: {
const array_local = try f.allocAlignedLocal(inst, .{ const array_local = try f.allocAlignedLocal(inst, .{
.ctype = ret_ctype, .ctype = ret_ctype,
.alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(pt)), .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(zcu)),
}); });
try writer.writeAll("memcpy("); try writer.writeAll("memcpy(");
try f.writeCValueMember(writer, array_local, .{ .identifier = "array" }); try f.writeCValueMember(writer, array_local, .{ .identifier = "array" });
@ -3926,7 +3928,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
} }
const is_aligned = if (ptr_info.flags.alignment != .none) const is_aligned = if (ptr_info.flags.alignment != .none)
ptr_info.flags.alignment.order(src_ty.abiAlignment(pt)).compare(.gte) ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
else else
true; true;
const is_array = lowersToArray(Type.fromInterned(ptr_info.child), pt); const is_array = lowersToArray(Type.fromInterned(ptr_info.child), pt);
@ -3976,7 +3978,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset); const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
const src_bits = src_ty.bitSize(pt); const src_bits = src_ty.bitSize(zcu);
const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb; const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb;
var stack align(@alignOf(ExpectedContents)) = var stack align(@alignOf(ExpectedContents)) =
@ -4006,9 +4008,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(mask_val)}); try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(mask_val)});
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('('); try writer.writeByte('(');
const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(pt) > 64; const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64;
if (cant_cast) { if (cant_cast) {
if (src_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); if (src_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_make_"); try writer.writeAll("zig_make_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeAll("(0, "); try writer.writeAll("(0, ");
@ -4130,7 +4132,7 @@ fn airBinOp(
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const operand_ty = f.typeOf(bin_op.lhs); const operand_ty = f.typeOf(bin_op.lhs);
const scalar_ty = operand_ty.scalarType(zcu); const scalar_ty = operand_ty.scalarType(zcu);
if ((scalar_ty.isInt(zcu) and scalar_ty.bitSize(pt) > 64) or scalar_ty.isRuntimeFloat()) if ((scalar_ty.isInt(zcu) and scalar_ty.bitSize(zcu) > 64) or scalar_ty.isRuntimeFloat())
return try airBinBuiltinCall(f, inst, operation, info); return try airBinBuiltinCall(f, inst, operation, info);
const lhs = try f.resolveInst(bin_op.lhs); const lhs = try f.resolveInst(bin_op.lhs);
@ -4169,7 +4171,7 @@ fn airCmpOp(
const lhs_ty = f.typeOf(data.lhs); const lhs_ty = f.typeOf(data.lhs);
const scalar_ty = lhs_ty.scalarType(zcu); const scalar_ty = lhs_ty.scalarType(zcu);
const scalar_bits = scalar_ty.bitSize(pt); const scalar_bits = scalar_ty.bitSize(zcu);
if (scalar_ty.isInt(zcu) and scalar_bits > 64) if (scalar_ty.isInt(zcu) and scalar_bits > 64)
return airCmpBuiltinCall( return airCmpBuiltinCall(
f, f,
@ -4219,7 +4221,7 @@ fn airEquality(
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const operand_ty = f.typeOf(bin_op.lhs); const operand_ty = f.typeOf(bin_op.lhs);
const operand_bits = operand_ty.bitSize(pt); const operand_bits = operand_ty.bitSize(zcu);
if (operand_ty.isAbiInt(zcu) and operand_bits > 64) if (operand_ty.isAbiInt(zcu) and operand_bits > 64)
return airCmpBuiltinCall( return airCmpBuiltinCall(
f, f,
@ -4312,7 +4314,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
const inst_ty = f.typeOfIndex(inst); const inst_ty = f.typeOfIndex(inst);
const inst_scalar_ty = inst_ty.scalarType(zcu); const inst_scalar_ty = inst_ty.scalarType(zcu);
const elem_ty = inst_scalar_ty.elemType2(zcu); const elem_ty = inst_scalar_ty.elemType2(zcu);
if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return f.moveCValue(inst, inst_ty, lhs); if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) return f.moveCValue(inst, inst_ty, lhs);
const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete); const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete);
const local = try f.allocLocal(inst, inst_ty); const local = try f.allocLocal(inst, inst_ty);
@ -4351,7 +4353,7 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons
const inst_ty = f.typeOfIndex(inst); const inst_ty = f.typeOfIndex(inst);
const inst_scalar_ty = inst_ty.scalarType(zcu); const inst_scalar_ty = inst_ty.scalarType(zcu);
if ((inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(pt) > 64) or inst_scalar_ty.isRuntimeFloat()) if ((inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(zcu) > 64) or inst_scalar_ty.isRuntimeFloat())
return try airBinBuiltinCall(f, inst, operation, .none); return try airBinBuiltinCall(f, inst, operation, .none);
const lhs = try f.resolveInst(bin_op.lhs); const lhs = try f.resolveInst(bin_op.lhs);
@ -4446,7 +4448,7 @@ fn airCall(
if (!arg_ctype.eql(try f.ctypeFromType(arg_ty, .complete))) { if (!arg_ctype.eql(try f.ctypeFromType(arg_ty, .complete))) {
const array_local = try f.allocAlignedLocal(inst, .{ const array_local = try f.allocAlignedLocal(inst, .{
.ctype = arg_ctype, .ctype = arg_ctype,
.alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(pt)), .alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(zcu)),
}); });
try writer.writeAll("memcpy("); try writer.writeAll("memcpy(");
try f.writeCValueMember(writer, array_local, .{ .identifier = "array" }); try f.writeCValueMember(writer, array_local, .{ .identifier = "array" });
@ -4493,7 +4495,7 @@ fn airCall(
} else { } else {
const local = try f.allocAlignedLocal(inst, .{ const local = try f.allocAlignedLocal(inst, .{
.ctype = ret_ctype, .ctype = ret_ctype,
.alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(pt)), .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(zcu)),
}); });
try f.writeCValue(writer, local, .Other); try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = "); try writer.writeAll(" = ");
@ -4618,7 +4620,7 @@ fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index)
const writer = f.object.writer(); const writer = f.object.writer();
const inst_ty = f.typeOfIndex(inst); const inst_ty = f.typeOfIndex(inst);
const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(pt) and !f.liveness.isUnused(inst)) const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu) and !f.liveness.isUnused(inst))
try f.allocLocal(inst, inst_ty) try f.allocLocal(inst, inst_ty)
else else
.none; .none;
@ -4681,7 +4683,7 @@ fn lowerTry(
const liveness_condbr = f.liveness.getCondBr(inst); const liveness_condbr = f.liveness.getCondBr(inst);
const writer = f.object.writer(); const writer = f.object.writer();
const payload_ty = err_union_ty.errorUnionPayload(zcu); const payload_ty = err_union_ty.errorUnionPayload(zcu);
const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt); const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) { if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
try writer.writeAll("if ("); try writer.writeAll("if (");
@ -4820,7 +4822,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CVal
try writer.writeAll(", sizeof("); try writer.writeAll(", sizeof(");
try f.renderType( try f.renderType(
writer, writer,
if (dest_ty.abiSize(pt) <= operand_ty.abiSize(pt)) dest_ty else operand_ty, if (dest_ty.abiSize(zcu) <= operand_ty.abiSize(zcu)) dest_ty else operand_ty,
); );
try writer.writeAll("));\n"); try writer.writeAll("));\n");
@ -5030,7 +5032,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.indent_writer.insertNewline(); try f.object.indent_writer.insertNewline();
try writer.writeAll("case "); try writer.writeAll("case ");
const item_value = try f.air.value(item, pt); const item_value = try f.air.value(item, pt);
if (item_value.?.getUnsignedInt(pt)) |item_int| try writer.print("{}\n", .{ if (item_value.?.getUnsignedInt(zcu)) |item_int| try writer.print("{}\n", .{
try f.fmtIntLiteral(try pt.intValue(lowered_condition_ty, item_int)), try f.fmtIntLiteral(try pt.intValue(lowered_condition_ty, item_int)),
}) else { }) else {
if (condition_ty.isPtrAtRuntime(zcu)) { if (condition_ty.isPtrAtRuntime(zcu)) {
@ -5112,10 +5114,10 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const result = result: { const result = result: {
const writer = f.object.writer(); const writer = f.object.writer();
const inst_ty = f.typeOfIndex(inst); const inst_ty = f.typeOfIndex(inst);
const inst_local = if (inst_ty.hasRuntimeBitsIgnoreComptime(pt)) local: { const inst_local = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) local: {
const inst_local = try f.allocLocalValue(.{ const inst_local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(inst_ty, .complete), .ctype = try f.ctypeFromType(inst_ty, .complete),
.alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(pt)), .alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(zcu)),
}); });
if (f.wantSafety()) { if (f.wantSafety()) {
try f.writeCValue(writer, inst_local, .Other); try f.writeCValue(writer, inst_local, .Other);
@ -5148,7 +5150,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("register "); try writer.writeAll("register ");
const output_local = try f.allocLocalValue(.{ const output_local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(output_ty, .complete), .ctype = try f.ctypeFromType(output_ty, .complete),
.alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(pt)), .alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(zcu)),
}); });
try f.allocs.put(gpa, output_local.new_local, false); try f.allocs.put(gpa, output_local.new_local, false);
try f.object.dg.renderTypeAndName(writer, output_ty, output_local, .{}, .none, .complete); try f.object.dg.renderTypeAndName(writer, output_ty, output_local, .{}, .none, .complete);
@ -5183,7 +5185,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
if (is_reg) try writer.writeAll("register "); if (is_reg) try writer.writeAll("register ");
const input_local = try f.allocLocalValue(.{ const input_local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(input_ty, .complete), .ctype = try f.ctypeFromType(input_ty, .complete),
.alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(pt)), .alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(zcu)),
}); });
try f.allocs.put(gpa, input_local.new_local, false); try f.allocs.put(gpa, input_local.new_local, false);
try f.object.dg.renderTypeAndName(writer, input_ty, input_local, Const, .none, .complete); try f.object.dg.renderTypeAndName(writer, input_ty, input_local, Const, .none, .complete);
@ -5526,9 +5528,9 @@ fn fieldLocation(
.struct_type => { .struct_type => {
const loaded_struct = ip.loadStructType(container_ty.toIntern()); const loaded_struct = ip.loadStructType(container_ty.toIntern());
return switch (loaded_struct.layout) { return switch (loaded_struct.layout) {
.auto, .@"extern" => if (!container_ty.hasRuntimeBitsIgnoreComptime(pt)) .auto, .@"extern" => if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu))
.begin .begin
else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(pt)) else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu))
.{ .byte_offset = loaded_struct.offsets.get(ip)[field_index] } .{ .byte_offset = loaded_struct.offsets.get(ip)[field_index] }
else else
.{ .field = if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| .{ .field = if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name|
@ -5542,10 +5544,10 @@ fn fieldLocation(
.begin, .begin,
}; };
}, },
.anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(pt)) .anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu))
.begin .begin
else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(pt)) else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu))
.{ .byte_offset = container_ty.structFieldOffset(field_index, pt) } .{ .byte_offset = container_ty.structFieldOffset(field_index, zcu) }
else else
.{ .field = if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name| .{ .field = if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name|
.{ .identifier = field_name.toSlice(ip) } .{ .identifier = field_name.toSlice(ip) }
@ -5556,8 +5558,8 @@ fn fieldLocation(
switch (loaded_union.flagsUnordered(ip).layout) { switch (loaded_union.flagsUnordered(ip).layout) {
.auto, .@"extern" => { .auto, .@"extern" => {
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu))
return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(pt)) return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(zcu))
.{ .field = .{ .identifier = "payload" } } .{ .field = .{ .identifier = "payload" } }
else else
.begin; .begin;
@ -5706,7 +5708,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
const extra = f.air.extraData(Air.StructField, ty_pl.payload).data; const extra = f.air.extraData(Air.StructField, ty_pl.payload).data;
const inst_ty = f.typeOfIndex(inst); const inst_ty = f.typeOfIndex(inst);
if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try reap(f, inst, &.{extra.struct_operand}); try reap(f, inst, &.{extra.struct_operand});
return .none; return .none;
} }
@ -5738,7 +5740,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
inst_ty.intInfo(zcu).signedness inst_ty.intInfo(zcu).signedness
else else
.unsigned; .unsigned;
const field_int_ty = try pt.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(pt)))); const field_int_ty = try pt.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(zcu))));
const temp_local = try f.allocLocal(inst, field_int_ty); const temp_local = try f.allocLocal(inst, field_int_ty);
try f.writeCValue(writer, temp_local, .Other); try f.writeCValue(writer, temp_local, .Other);
@ -5749,7 +5751,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte(')'); try writer.writeByte(')');
const cant_cast = int_info.bits > 64; const cant_cast = int_info.bits > 64;
if (cant_cast) { if (cant_cast) {
if (field_int_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); if (field_int_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_lo_"); try writer.writeAll("zig_lo_");
try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
try writer.writeByte('('); try writer.writeByte('(');
@ -5857,7 +5859,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
const payload_ty = error_union_ty.errorUnionPayload(zcu); const payload_ty = error_union_ty.errorUnionPayload(zcu);
const local = try f.allocLocal(inst, inst_ty); const local = try f.allocLocal(inst, inst_ty);
if (!payload_ty.hasRuntimeBits(pt) and operand == .local and operand.local == local.new_local) { if (!payload_ty.hasRuntimeBits(zcu) and operand == .local and operand.local == local.new_local) {
// The store will be 'x = x'; elide it. // The store will be 'x = x'; elide it.
return local; return local;
} }
@ -5866,7 +5868,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, local, .Other); try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = "); try writer.writeAll(" = ");
if (!payload_ty.hasRuntimeBits(pt)) if (!payload_ty.hasRuntimeBits(zcu))
try f.writeCValue(writer, operand, .Other) try f.writeCValue(writer, operand, .Other)
else if (error_ty.errorSetIsEmpty(zcu)) else if (error_ty.errorSetIsEmpty(zcu))
try writer.print("{}", .{ try writer.print("{}", .{
@ -5892,7 +5894,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu
const error_union_ty = if (is_ptr) operand_ty.childType(zcu) else operand_ty; const error_union_ty = if (is_ptr) operand_ty.childType(zcu) else operand_ty;
const writer = f.object.writer(); const writer = f.object.writer();
if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(pt)) { if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(zcu)) {
if (!is_ptr) return .none; if (!is_ptr) return .none;
const local = try f.allocLocal(inst, inst_ty); const local = try f.allocLocal(inst, inst_ty);
@ -5963,7 +5965,7 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.typeOfIndex(inst); const inst_ty = f.typeOfIndex(inst);
const payload_ty = inst_ty.errorUnionPayload(zcu); const payload_ty = inst_ty.errorUnionPayload(zcu);
const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(pt); const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
const err_ty = inst_ty.errorUnionSet(zcu); const err_ty = inst_ty.errorUnionSet(zcu);
const err = try f.resolveInst(ty_op.operand); const err = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand}); try reap(f, inst, &.{ty_op.operand});
@ -6012,7 +6014,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ty_op.operand}); try reap(f, inst, &.{ty_op.operand});
// First, set the non-error value. // First, set the non-error value.
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
const a = try Assignment.start(f, writer, try f.ctypeFromType(operand_ty, .complete)); const a = try Assignment.start(f, writer, try f.ctypeFromType(operand_ty, .complete));
try f.writeCValueDeref(writer, operand); try f.writeCValueDeref(writer, operand);
try a.assign(f, writer); try a.assign(f, writer);
@ -6064,7 +6066,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.typeOfIndex(inst); const inst_ty = f.typeOfIndex(inst);
const payload_ty = inst_ty.errorUnionPayload(zcu); const payload_ty = inst_ty.errorUnionPayload(zcu);
const payload = try f.resolveInst(ty_op.operand); const payload = try f.resolveInst(ty_op.operand);
const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(pt); const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
const err_ty = inst_ty.errorUnionSet(zcu); const err_ty = inst_ty.errorUnionSet(zcu);
try reap(f, inst, &.{ty_op.operand}); try reap(f, inst, &.{ty_op.operand});
@ -6109,7 +6111,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const
try a.assign(f, writer); try a.assign(f, writer);
const err_int_ty = try pt.errorIntType(); const err_int_ty = try pt.errorIntType();
if (!error_ty.errorSetIsEmpty(zcu)) if (!error_ty.errorSetIsEmpty(zcu))
if (payload_ty.hasRuntimeBits(pt)) if (payload_ty.hasRuntimeBits(zcu))
if (is_ptr) if (is_ptr)
try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" }) try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" })
else else
@ -6430,7 +6432,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value }); try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value });
const repr_ty = if (ty.isRuntimeFloat()) const repr_ty = if (ty.isRuntimeFloat())
pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
else else
ty; ty;
@ -6534,7 +6536,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_mat = try Materialize.start(f, inst, ty, operand); const operand_mat = try Materialize.start(f, inst, ty, operand);
try reap(f, inst, &.{ pl_op.operand, extra.operand }); try reap(f, inst, &.{ pl_op.operand, extra.operand });
const repr_bits = @as(u16, @intCast(ty.abiSize(pt) * 8)); const repr_bits = @as(u16, @intCast(ty.abiSize(zcu) * 8));
const is_float = ty.isRuntimeFloat(); const is_float = ty.isRuntimeFloat();
const is_128 = repr_bits == 128; const is_128 = repr_bits == 128;
const repr_ty = if (is_float) pt.intType(.unsigned, repr_bits) catch unreachable else ty; const repr_ty = if (is_float) pt.intType(.unsigned, repr_bits) catch unreachable else ty;
@ -6585,7 +6587,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ty = ptr_ty.childType(zcu); const ty = ptr_ty.childType(zcu);
const repr_ty = if (ty.isRuntimeFloat()) const repr_ty = if (ty.isRuntimeFloat())
pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
else else
ty; ty;
@ -6626,7 +6628,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const repr_ty = if (ty.isRuntimeFloat()) const repr_ty = if (ty.isRuntimeFloat())
pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
else else
ty; ty;
@ -6666,7 +6668,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
const dest_slice = try f.resolveInst(bin_op.lhs); const dest_slice = try f.resolveInst(bin_op.lhs);
const value = try f.resolveInst(bin_op.rhs); const value = try f.resolveInst(bin_op.rhs);
const elem_ty = f.typeOf(bin_op.rhs); const elem_ty = f.typeOf(bin_op.rhs);
const elem_abi_size = elem_ty.abiSize(pt); const elem_abi_size = elem_ty.abiSize(zcu);
const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(zcu) else false; const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(zcu) else false;
const writer = f.object.writer(); const writer = f.object.writer();
@ -6831,7 +6833,7 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const union_ty = f.typeOf(bin_op.lhs).childType(zcu); const union_ty = f.typeOf(bin_op.lhs).childType(zcu);
const layout = union_ty.unionGetLayout(pt); const layout = union_ty.unionGetLayout(zcu);
if (layout.tag_size == 0) return .none; if (layout.tag_size == 0) return .none;
const tag_ty = union_ty.unionTagTypeSafety(zcu).?; const tag_ty = union_ty.unionTagTypeSafety(zcu).?;
@ -6846,13 +6848,14 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
const pt = f.object.dg.pt; const pt = f.object.dg.pt;
const zcu = pt.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try f.resolveInst(ty_op.operand); const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand}); try reap(f, inst, &.{ty_op.operand});
const union_ty = f.typeOf(ty_op.operand); const union_ty = f.typeOf(ty_op.operand);
const layout = union_ty.unionGetLayout(pt); const layout = union_ty.unionGetLayout(zcu);
if (layout.tag_size == 0) return .none; if (layout.tag_size == 0) return .none;
const inst_ty = f.typeOfIndex(inst); const inst_ty = f.typeOfIndex(inst);
@ -6960,6 +6963,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue {
fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
const pt = f.object.dg.pt; const pt = f.object.dg.pt;
const zcu = pt.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data; const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data;
@ -6978,7 +6982,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.dg.renderValue(writer, try pt.intValue(Type.usize, index), .Other); try f.object.dg.renderValue(writer, try pt.intValue(Type.usize, index), .Other);
try writer.writeAll("] = "); try writer.writeAll("] = ");
const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(pt); const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(zcu);
const src_val = try pt.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63))); const src_val = try pt.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63)));
try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other); try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other);
@ -7001,7 +7005,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_ty = f.typeOf(reduce.operand); const operand_ty = f.typeOf(reduce.operand);
const writer = f.object.writer(); const writer = f.object.writer();
const use_operator = scalar_ty.bitSize(pt) <= 64; const use_operator = scalar_ty.bitSize(zcu) <= 64;
const op: union(enum) { const op: union(enum) {
const Func = struct { operation: []const u8, info: BuiltinInfo = .none }; const Func = struct { operation: []const u8, info: BuiltinInfo = .none };
builtin: Func, builtin: Func,
@ -7178,7 +7182,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
var field_it = loaded_struct.iterateRuntimeOrder(ip); var field_it = loaded_struct.iterateRuntimeOrder(ip);
while (field_it.next()) |field_index| { while (field_it.next()) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete)); const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete));
try f.writeCValueMember(writer, local, if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| try f.writeCValueMember(writer, local, if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name|
@ -7203,7 +7207,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
for (0..elements.len) |field_index| { for (0..elements.len) |field_index| {
if (inst_ty.structFieldIsComptime(field_index, zcu)) continue; if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
const field_ty = inst_ty.structFieldType(field_index, zcu); const field_ty = inst_ty.structFieldType(field_index, zcu);
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (!empty) { if (!empty) {
try writer.writeAll("zig_or_"); try writer.writeAll("zig_or_");
@ -7216,7 +7220,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
for (resolved_elements, 0..) |element, field_index| { for (resolved_elements, 0..) |element, field_index| {
if (inst_ty.structFieldIsComptime(field_index, zcu)) continue; if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
const field_ty = inst_ty.structFieldType(field_index, zcu); const field_ty = inst_ty.structFieldType(field_index, zcu);
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (!empty) try writer.writeAll(", "); if (!empty) try writer.writeAll(", ");
// TODO: Skip this entire shift if val is 0? // TODO: Skip this entire shift if val is 0?
@ -7248,7 +7252,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte(')'); try writer.writeByte(')');
if (!empty) try writer.writeByte(')'); if (!empty) try writer.writeByte(')');
bit_offset += field_ty.bitSize(pt); bit_offset += field_ty.bitSize(zcu);
empty = false; empty = false;
} }
try writer.writeAll(";\n"); try writer.writeAll(";\n");
@ -7258,7 +7262,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
.anon_struct_type => |anon_struct_info| for (0..anon_struct_info.types.len) |field_index| { .anon_struct_type => |anon_struct_info| for (0..anon_struct_info.types.len) |field_index| {
if (anon_struct_info.values.get(ip)[field_index] != .none) continue; if (anon_struct_info.values.get(ip)[field_index] != .none) continue;
const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]); const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete)); const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete));
try f.writeCValueMember(writer, local, if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name| try f.writeCValueMember(writer, local, if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name|
@ -7294,7 +7298,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
if (loaded_union.flagsUnordered(ip).layout == .@"packed") return f.moveCValue(inst, union_ty, payload); if (loaded_union.flagsUnordered(ip).layout == .@"packed") return f.moveCValue(inst, union_ty, payload);
const field: CValue = if (union_ty.unionTagTypeSafety(zcu)) |tag_ty| field: { const field: CValue = if (union_ty.unionTagTypeSafety(zcu)) |tag_ty| field: {
const layout = union_ty.unionGetLayout(pt); const layout = union_ty.unionGetLayout(zcu);
if (layout.tag_size != 0) { if (layout.tag_size != 0) {
const field_index = tag_ty.enumFieldIndex(field_name, zcu).?; const field_index = tag_ty.enumFieldIndex(field_name, zcu).?;
const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index); const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
@ -7818,7 +7822,7 @@ fn formatIntLiteral(
}; };
undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits); undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits);
break :blk undef_int.toConst(); break :blk undef_int.toConst();
} else data.val.toBigInt(&int_buf, pt); } else data.val.toBigInt(&int_buf, zcu);
assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits)); assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits));
const c_bits: usize = @intCast(data.ctype.byteSize(ctype_pool, data.dg.mod) * 8); const c_bits: usize = @intCast(data.ctype.byteSize(ctype_pool, data.dg.mod) * 8);
@ -8062,9 +8066,10 @@ const Vectorize = struct {
}; };
fn lowersToArray(ty: Type, pt: Zcu.PerThread) bool { fn lowersToArray(ty: Type, pt: Zcu.PerThread) bool {
return switch (ty.zigTypeTag(pt.zcu)) { const zcu = pt.zcu;
return switch (ty.zigTypeTag(zcu)) {
.Array, .Vector => return true, .Array, .Vector => return true,
else => return ty.isAbiInt(pt.zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(pt)))) == null, else => return ty.isAbiInt(zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(zcu)))) == null,
}; };
} }

View File

@ -1344,6 +1344,7 @@ pub const Pool = struct {
kind: Kind, kind: Kind,
) !CType { ) !CType {
const ip = &pt.zcu.intern_pool; const ip = &pt.zcu.intern_pool;
const zcu = pt.zcu;
switch (ty.toIntern()) { switch (ty.toIntern()) {
.u0_type, .u0_type,
.i0_type, .i0_type,
@ -1476,7 +1477,7 @@ pub const Pool = struct {
), ),
.alignas = AlignAs.fromAlignment(.{ .alignas = AlignAs.fromAlignment(.{
.@"align" = ptr_info.flags.alignment, .@"align" = ptr_info.flags.alignment,
.abi = Type.fromInterned(ptr_info.child).abiAlignment(pt), .abi = Type.fromInterned(ptr_info.child).abiAlignment(zcu),
}), }),
}; };
break :elem_ctype if (elem.alignas.abiOrder().compare(.gte)) break :elem_ctype if (elem.alignas.abiOrder().compare(.gte))
@ -1552,7 +1553,7 @@ pub const Pool = struct {
.{ .{
.name = .{ .index = .array }, .name = .{ .index = .array },
.ctype = array_ctype, .ctype = array_ctype,
.alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(pt)), .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)),
}, },
}; };
return pool.fromFields(allocator, .@"struct", &fields, kind); return pool.fromFields(allocator, .@"struct", &fields, kind);
@ -1578,7 +1579,7 @@ pub const Pool = struct {
.{ .{
.name = .{ .index = .array }, .name = .{ .index = .array },
.ctype = vector_ctype, .ctype = vector_ctype,
.alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(pt)), .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)),
}, },
}; };
return pool.fromFields(allocator, .@"struct", &fields, kind); return pool.fromFields(allocator, .@"struct", &fields, kind);
@ -1613,7 +1614,7 @@ pub const Pool = struct {
.name = .{ .index = .payload }, .name = .{ .index = .payload },
.ctype = payload_ctype, .ctype = payload_ctype,
.alignas = AlignAs.fromAbiAlignment( .alignas = AlignAs.fromAbiAlignment(
Type.fromInterned(payload_type).abiAlignment(pt), Type.fromInterned(payload_type).abiAlignment(zcu),
), ),
}, },
}; };
@ -1649,7 +1650,7 @@ pub const Pool = struct {
.{ .{
.name = .{ .index = .payload }, .name = .{ .index = .payload },
.ctype = payload_ctype, .ctype = payload_ctype,
.alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(pt)), .alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(zcu)),
}, },
}; };
return pool.fromFields(allocator, .@"struct", &fields, kind); return pool.fromFields(allocator, .@"struct", &fields, kind);
@ -1663,7 +1664,7 @@ pub const Pool = struct {
.tag = .@"struct", .tag = .@"struct",
.name = .{ .index = ip_index }, .name = .{ .index = ip_index },
}); });
if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt)) if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu))
fwd_decl fwd_decl
else else
CType.void; CType.void;
@ -1696,7 +1697,7 @@ pub const Pool = struct {
String.fromUnnamed(@intCast(field_index)); String.fromUnnamed(@intCast(field_index));
const field_alignas = AlignAs.fromAlignment(.{ const field_alignas = AlignAs.fromAlignment(.{
.@"align" = loaded_struct.fieldAlign(ip, field_index), .@"align" = loaded_struct.fieldAlign(ip, field_index),
.abi = field_type.abiAlignment(pt), .abi = field_type.abiAlignment(zcu),
}); });
pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{ pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
.name = field_name.index, .name = field_name.index,
@ -1758,7 +1759,7 @@ pub const Pool = struct {
.name = field_name.index, .name = field_name.index,
.ctype = field_ctype.index, .ctype = field_ctype.index,
.flags = .{ .alignas = AlignAs.fromAbiAlignment( .flags = .{ .alignas = AlignAs.fromAbiAlignment(
field_type.abiAlignment(pt), field_type.abiAlignment(zcu),
) }, ) },
}); });
} }
@ -1802,7 +1803,7 @@ pub const Pool = struct {
.tag = if (has_tag) .@"struct" else .@"union", .tag = if (has_tag) .@"struct" else .@"union",
.name = .{ .index = ip_index }, .name = .{ .index = ip_index },
}); });
if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt)) if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu))
fwd_decl fwd_decl
else else
CType.void; CType.void;
@ -1836,7 +1837,7 @@ pub const Pool = struct {
); );
const field_alignas = AlignAs.fromAlignment(.{ const field_alignas = AlignAs.fromAlignment(.{
.@"align" = loaded_union.fieldAlign(ip, field_index), .@"align" = loaded_union.fieldAlign(ip, field_index),
.abi = field_type.abiAlignment(pt), .abi = field_type.abiAlignment(zcu),
}); });
pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{ pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
.name = field_name.index, .name = field_name.index,
@ -1881,7 +1882,7 @@ pub const Pool = struct {
struct_fields[struct_fields_len] = .{ struct_fields[struct_fields_len] = .{
.name = .{ .index = .tag }, .name = .{ .index = .tag },
.ctype = tag_ctype, .ctype = tag_ctype,
.alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(pt)), .alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(zcu)),
}; };
struct_fields_len += 1; struct_fields_len += 1;
} }
@ -1929,7 +1930,7 @@ pub const Pool = struct {
}, },
.@"packed" => return pool.fromIntInfo(allocator, .{ .@"packed" => return pool.fromIntInfo(allocator, .{
.signedness = .unsigned, .signedness = .unsigned,
.bits = @intCast(ty.bitSize(pt)), .bits = @intCast(ty.bitSize(zcu)),
}, mod, kind), }, mod, kind),
} }
}, },

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1259,8 +1259,8 @@ fn updateLazySymbolAtom(
atom_index: Atom.Index, atom_index: Atom.Index,
section_index: u16, section_index: u16,
) !void { ) !void {
const mod = pt.zcu; const zcu = pt.zcu;
const gpa = mod.gpa; const gpa = zcu.gpa;
var required_alignment: InternPool.Alignment = .none; var required_alignment: InternPool.Alignment = .none;
var code_buffer = std.ArrayList(u8).init(gpa); var code_buffer = std.ArrayList(u8).init(gpa);
@ -1275,7 +1275,7 @@ fn updateLazySymbolAtom(
const atom = self.getAtomPtr(atom_index); const atom = self.getAtomPtr(atom_index);
const local_sym_index = atom.getSymbolIndex().?; const local_sym_index = atom.getSymbolIndex().?;
const src = Type.fromInterned(sym.ty).srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; const src = Type.fromInterned(sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded;
const res = try codegen.generateLazySymbol( const res = try codegen.generateLazySymbol(
&self.base, &self.base,
pt, pt,
@ -1849,7 +1849,7 @@ pub fn lowerUav(
const gpa = zcu.gpa; const gpa = zcu.gpa;
const val = Value.fromInterned(uav); const val = Value.fromInterned(uav);
const uav_alignment = switch (explicit_alignment) { const uav_alignment = switch (explicit_alignment) {
.none => val.typeOf(zcu).abiAlignment(pt), .none => val.typeOf(zcu).abiAlignment(zcu),
else => explicit_alignment, else => explicit_alignment,
}; };
if (self.uavs.get(uav)) |metadata| { if (self.uavs.get(uav)) |metadata| {

View File

@ -849,7 +849,7 @@ pub fn lowerUav(
const gpa = zcu.gpa; const gpa = zcu.gpa;
const val = Value.fromInterned(uav); const val = Value.fromInterned(uav);
const uav_alignment = switch (explicit_alignment) { const uav_alignment = switch (explicit_alignment) {
.none => val.typeOf(zcu).abiAlignment(pt), .none => val.typeOf(zcu).abiAlignment(zcu),
else => explicit_alignment, else => explicit_alignment,
}; };
if (self.uavs.get(uav)) |metadata| { if (self.uavs.get(uav)) |metadata| {

View File

@ -688,7 +688,7 @@ pub fn lowerUav(
const gpa = zcu.gpa; const gpa = zcu.gpa;
const val = Value.fromInterned(uav); const val = Value.fromInterned(uav);
const uav_alignment = switch (explicit_alignment) { const uav_alignment = switch (explicit_alignment) {
.none => val.typeOf(zcu).abiAlignment(pt), .none => val.typeOf(zcu).abiAlignment(zcu),
else => explicit_alignment, else => explicit_alignment,
}; };
if (self.uavs.get(uav)) |metadata| { if (self.uavs.get(uav)) |metadata| {

View File

@ -487,9 +487,9 @@ fn lowerConst(
src_loc: Zcu.LazySrcLoc, src_loc: Zcu.LazySrcLoc,
) !LowerConstResult { ) !LowerConstResult {
const gpa = wasm_file.base.comp.gpa; const gpa = wasm_file.base.comp.gpa;
const mod = wasm_file.base.comp.module.?; const zcu = wasm_file.base.comp.module.?;
const ty = val.typeOf(mod); const ty = val.typeOf(zcu);
// Create and initialize a new local symbol and atom // Create and initialize a new local symbol and atom
const sym_index = try zig_object.allocateSymbol(gpa); const sym_index = try zig_object.allocateSymbol(gpa);
@ -499,7 +499,7 @@ fn lowerConst(
const code = code: { const code = code: {
const atom = wasm_file.getAtomPtr(atom_index); const atom = wasm_file.getAtomPtr(atom_index);
atom.alignment = ty.abiAlignment(pt); atom.alignment = ty.abiAlignment(zcu);
const segment_name = try std.mem.concat(gpa, u8, &.{ ".rodata.", name }); const segment_name = try std.mem.concat(gpa, u8, &.{ ".rodata.", name });
errdefer gpa.free(segment_name); errdefer gpa.free(segment_name);
zig_object.symbol(sym_index).* = .{ zig_object.symbol(sym_index).* = .{
@ -509,7 +509,7 @@ fn lowerConst(
.index = try zig_object.createDataSegment( .index = try zig_object.createDataSegment(
gpa, gpa,
segment_name, segment_name,
ty.abiAlignment(pt), ty.abiAlignment(zcu),
), ),
.virtual_address = undefined, .virtual_address = undefined,
}; };
@ -555,7 +555,7 @@ pub fn getErrorTableSymbol(zig_object: *ZigObject, wasm_file: *Wasm, pt: Zcu.Per
const atom_index = try wasm_file.createAtom(sym_index, zig_object.index); const atom_index = try wasm_file.createAtom(sym_index, zig_object.index);
const atom = wasm_file.getAtomPtr(atom_index); const atom = wasm_file.getAtomPtr(atom_index);
const slice_ty = Type.slice_const_u8_sentinel_0; const slice_ty = Type.slice_const_u8_sentinel_0;
atom.alignment = slice_ty.abiAlignment(pt); atom.alignment = slice_ty.abiAlignment(pt.zcu);
const sym_name = try zig_object.string_table.insert(gpa, "__zig_err_name_table"); const sym_name = try zig_object.string_table.insert(gpa, "__zig_err_name_table");
const segment_name = try gpa.dupe(u8, ".rodata.__zig_err_name_table"); const segment_name = try gpa.dupe(u8, ".rodata.__zig_err_name_table");
@ -611,7 +611,7 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm, tid: Zcu.Per
// TODO: remove this unreachable entry // TODO: remove this unreachable entry
try atom.code.appendNTimes(gpa, 0, 4); try atom.code.appendNTimes(gpa, 0, 4);
try atom.code.writer(gpa).writeInt(u32, 0, .little); try atom.code.writer(gpa).writeInt(u32, 0, .little);
atom.size += @intCast(slice_ty.abiSize(pt)); atom.size += @intCast(slice_ty.abiSize(pt.zcu));
addend += 1; addend += 1;
try names_atom.code.append(gpa, 0); try names_atom.code.append(gpa, 0);
@ -632,7 +632,7 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm, tid: Zcu.Per
.offset = offset, .offset = offset,
.addend = @intCast(addend), .addend = @intCast(addend),
}); });
atom.size += @intCast(slice_ty.abiSize(pt)); atom.size += @intCast(slice_ty.abiSize(pt.zcu));
addend += len; addend += len;
// as we updated the error name table, we now store the actual name within the names atom // as we updated the error name table, we now store the actual name within the names atom

View File

@ -369,7 +369,7 @@ pub const MutableValue = union(enum) {
.bytes => |b| { .bytes => |b| {
assert(is_trivial_int); assert(is_trivial_int);
assert(field_val.typeOf(zcu).toIntern() == .u8_type); assert(field_val.typeOf(zcu).toIntern() == .u8_type);
b.data[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(pt)); b.data[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
}, },
.repeated => |r| { .repeated => |r| {
if (field_val.eqlTrivial(r.child.*)) return; if (field_val.eqlTrivial(r.child.*)) return;
@ -382,9 +382,9 @@ pub const MutableValue = union(enum) {
{ {
// We can use the `bytes` representation. // We can use the `bytes` representation.
const bytes = try arena.alloc(u8, @intCast(len_inc_sent)); const bytes = try arena.alloc(u8, @intCast(len_inc_sent));
const repeated_byte = Value.fromInterned(r.child.interned).toUnsignedInt(pt); const repeated_byte = Value.fromInterned(r.child.interned).toUnsignedInt(zcu);
@memset(bytes, @intCast(repeated_byte)); @memset(bytes, @intCast(repeated_byte));
bytes[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(pt)); bytes[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
mv.* = .{ .bytes = .{ mv.* = .{ .bytes = .{
.ty = r.ty, .ty = r.ty,
.data = bytes, .data = bytes,
@ -431,7 +431,7 @@ pub const MutableValue = union(enum) {
} else { } else {
const bytes = try arena.alloc(u8, a.elems.len); const bytes = try arena.alloc(u8, a.elems.len);
for (a.elems, bytes) |elem_val, *b| { for (a.elems, bytes) |elem_val, *b| {
b.* = @intCast(Value.fromInterned(elem_val.interned).toUnsignedInt(pt)); b.* = @intCast(Value.fromInterned(elem_val.interned).toUnsignedInt(zcu));
} }
mv.* = .{ .bytes = .{ mv.* = .{ .bytes = .{
.ty = a.ty, .ty = a.ty,

View File

@ -95,11 +95,11 @@ pub fn print(
.int => |int| switch (int.storage) { .int => |int| switch (int.storage) {
inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}), inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}),
.lazy_align => |ty| if (have_sema) { .lazy_align => |ty| if (have_sema) {
const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(pt, .sema)).scalar; const a = try Type.fromInterned(ty).abiAlignmentSema(pt);
try writer.print("{}", .{a.toByteUnits() orelse 0}); try writer.print("{}", .{a.toByteUnits() orelse 0});
} else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(pt)}), } else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(pt)}),
.lazy_size => |ty| if (have_sema) { .lazy_size => |ty| if (have_sema) {
const s = (try Type.fromInterned(ty).abiSizeAdvanced(pt, .sema)).scalar; const s = try Type.fromInterned(ty).abiSizeSema(pt);
try writer.print("{}", .{s}); try writer.print("{}", .{s});
} else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(pt)}), } else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(pt)}),
}, },
@ -245,7 +245,7 @@ fn printAggregate(
if (ty.childType(zcu).toIntern() != .u8_type) break :one_byte_str; if (ty.childType(zcu).toIntern() != .u8_type) break :one_byte_str;
const elem_val = Value.fromInterned(aggregate.storage.values()[0]); const elem_val = Value.fromInterned(aggregate.storage.values()[0]);
if (elem_val.isUndef(zcu)) break :one_byte_str; if (elem_val.isUndef(zcu)) break :one_byte_str;
const byte = elem_val.toUnsignedInt(pt); const byte = elem_val.toUnsignedInt(zcu);
try writer.print("\"{}\"", .{std.zig.fmtEscapes(&.{@intCast(byte)})}); try writer.print("\"{}\"", .{std.zig.fmtEscapes(&.{@intCast(byte)})});
if (!is_ref) try writer.writeAll(".*"); if (!is_ref) try writer.writeAll(".*");
return; return;