stage2: lazy @alignOf

Add a `target` parameter to every function that deals with Type and
Value.
This commit is contained in:
Andrew Kelley 2022-03-22 00:23:54 -07:00
parent b74f292410
commit 593130ce0a
24 changed files with 1575 additions and 1058 deletions

View File

@ -2781,7 +2781,9 @@ fn processOneJob(comp: *Compilation, job: Job, main_progress_node: *std.Progress
.error_msg = null,
.decl = decl,
.fwd_decl = fwd_decl.toManaged(gpa),
.typedefs = c_codegen.TypedefMap.init(gpa),
.typedefs = c_codegen.TypedefMap.initContext(gpa, .{
.target = comp.getTarget(),
}),
.typedefs_arena = typedefs_arena.allocator(),
};
defer dg.fwd_decl.deinit();

View File

@ -146,6 +146,8 @@ const MonomorphedFuncsSet = std.HashMapUnmanaged(
);
const MonomorphedFuncsContext = struct {
target: Target,
pub fn eql(ctx: @This(), a: *Fn, b: *Fn) bool {
_ = ctx;
return a == b;
@ -153,7 +155,6 @@ const MonomorphedFuncsContext = struct {
/// Must match `Sema.GenericCallAdapter.hash`.
pub fn hash(ctx: @This(), key: *Fn) u64 {
_ = ctx;
var hasher = std.hash.Wyhash.init(0);
// The generic function Decl is guaranteed to be the first dependency
@ -168,7 +169,7 @@ const MonomorphedFuncsContext = struct {
const generic_ty_info = generic_owner_decl.ty.fnInfo();
for (generic_ty_info.param_types) |param_ty, i| {
if (generic_ty_info.paramIsComptime(i) and param_ty.tag() != .generic_poison) {
comptime_args[i].val.hash(param_ty, &hasher);
comptime_args[i].val.hash(param_ty, &hasher, ctx.target);
}
}
@ -184,6 +185,8 @@ pub const MemoizedCallSet = std.HashMapUnmanaged(
);
pub const MemoizedCall = struct {
target: std.Target,
pub const Key = struct {
func: *Fn,
args: []TypedValue,
@ -195,14 +198,12 @@ pub const MemoizedCall = struct {
};
pub fn eql(ctx: @This(), a: Key, b: Key) bool {
_ = ctx;
if (a.func != b.func) return false;
assert(a.args.len == b.args.len);
for (a.args) |a_arg, arg_i| {
const b_arg = b.args[arg_i];
if (!a_arg.eql(b_arg)) {
if (!a_arg.eql(b_arg, ctx.target)) {
return false;
}
}
@ -212,8 +213,6 @@ pub const MemoizedCall = struct {
/// Must match `Sema.GenericCallAdapter.hash`.
pub fn hash(ctx: @This(), key: Key) u64 {
_ = ctx;
var hasher = std.hash.Wyhash.init(0);
// The generic function Decl is guaranteed to be the first dependency
@ -223,7 +222,7 @@ pub const MemoizedCall = struct {
// This logic must be kept in sync with the logic in `analyzeCall` that
// computes the hash.
for (key.args) |arg| {
arg.hash(&hasher);
arg.hash(&hasher, ctx.target);
}
return hasher.final();
@ -1230,7 +1229,7 @@ pub const Union = struct {
if (field.abi_align == 0) {
break :a field.ty.abiAlignment(target);
} else {
break :a @intCast(u32, field.abi_align.toUnsignedInt());
break :a field.abi_align;
}
};
if (field_align > most_alignment) {
@ -3877,6 +3876,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
const bytes = try sema.resolveConstString(&block_scope, src, linksection_ref);
break :blk (try decl_arena_allocator.dupeZ(u8, bytes)).ptr;
};
const target = sema.mod.getTarget();
const address_space = blk: {
const addrspace_ctx: Sema.AddressSpaceContext = switch (decl_tv.val.tag()) {
.function, .extern_fn => .function,
@ -3886,9 +3886,9 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
break :blk switch (decl.zirAddrspaceRef()) {
.none => switch (addrspace_ctx) {
.function => target_util.defaultAddressSpace(sema.mod.getTarget(), .function),
.variable => target_util.defaultAddressSpace(sema.mod.getTarget(), .global_mutable),
.constant => target_util.defaultAddressSpace(sema.mod.getTarget(), .global_constant),
.function => target_util.defaultAddressSpace(target, .function),
.variable => target_util.defaultAddressSpace(target, .global_mutable),
.constant => target_util.defaultAddressSpace(target, .global_constant),
else => unreachable,
},
else => |addrspace_ref| try sema.analyzeAddrspace(&block_scope, src, addrspace_ref, addrspace_ctx),
@ -3904,13 +3904,15 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
if (decl.is_usingnamespace) {
const ty_ty = Type.initTag(.type);
if (!decl_tv.ty.eql(ty_ty)) {
return sema.fail(&block_scope, src, "expected type, found {}", .{decl_tv.ty});
if (!decl_tv.ty.eql(ty_ty, target)) {
return sema.fail(&block_scope, src, "expected type, found {}", .{
decl_tv.ty.fmt(target),
});
}
var buffer: Value.ToTypeBuffer = undefined;
const ty = decl_tv.val.toType(&buffer);
if (ty.getNamespace() == null) {
return sema.fail(&block_scope, src, "type {} has no namespace", .{ty});
return sema.fail(&block_scope, src, "type {} has no namespace", .{ty.fmt(target)});
}
decl.ty = ty_ty;
@ -3937,7 +3939,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
if (decl.has_tv) {
prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits();
type_changed = !decl.ty.eql(decl_tv.ty);
type_changed = !decl.ty.eql(decl_tv.ty, target);
if (decl.getFunction()) |prev_func| {
prev_is_inline = prev_func.state == .inline_only;
}
@ -3986,7 +3988,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
}
var type_changed = true;
if (decl.has_tv) {
type_changed = !decl.ty.eql(decl_tv.ty);
type_changed = !decl.ty.eql(decl_tv.ty, target);
decl.clearValues(gpa);
}
@ -5054,22 +5056,6 @@ pub fn errNoteNonLazy(
};
}
pub fn errorUnionType(
arena: Allocator,
error_set: Type,
payload: Type,
) Allocator.Error!Type {
assert(error_set.zigTypeTag() == .ErrorSet);
if (error_set.eql(Type.initTag(.anyerror)) and payload.eql(Type.initTag(.void))) {
return Type.initTag(.anyerror_void_error_union);
}
return Type.Tag.error_union.create(arena, .{
.error_set = error_set,
.payload = payload,
});
}
pub fn getTarget(mod: Module) Target {
return mod.comp.bin_file.options.target;
}

View File

@ -6,6 +6,7 @@ const RangeSet = @This();
const SwitchProngSrc = @import("Module.zig").SwitchProngSrc;
ranges: std.ArrayList(Range),
target: std.Target,
pub const Range = struct {
first: Value,
@ -13,9 +14,10 @@ pub const Range = struct {
src: SwitchProngSrc,
};
pub fn init(allocator: std.mem.Allocator) RangeSet {
pub fn init(allocator: std.mem.Allocator, target: std.Target) RangeSet {
return .{
.ranges = std.ArrayList(Range).init(allocator),
.target = target,
};
}
@ -30,8 +32,12 @@ pub fn add(
ty: Type,
src: SwitchProngSrc,
) !?SwitchProngSrc {
const target = self.target;
for (self.ranges.items) |range| {
if (last.compare(.gte, range.first, ty) and first.compare(.lte, range.last, ty)) {
if (last.compare(.gte, range.first, ty, target) and
first.compare(.lte, range.last, ty, target))
{
return range.src; // They overlap.
}
}
@ -43,19 +49,26 @@ pub fn add(
return null;
}
const LessThanContext = struct { ty: Type, target: std.Target };
/// Assumes a and b do not overlap
fn lessThan(ty: Type, a: Range, b: Range) bool {
return a.first.compare(.lt, b.first, ty);
fn lessThan(ctx: LessThanContext, a: Range, b: Range) bool {
return a.first.compare(.lt, b.first, ctx.ty, ctx.target);
}
pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool {
if (self.ranges.items.len == 0)
return false;
std.sort.sort(Range, self.ranges.items, ty, lessThan);
const target = self.target;
if (!self.ranges.items[0].first.eql(first, ty) or
!self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty))
std.sort.sort(Range, self.ranges.items, LessThanContext{
.ty = ty,
.target = target,
}, lessThan);
if (!self.ranges.items[0].first.eql(first, ty, target) or
!self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, target))
{
return false;
}
@ -71,10 +84,10 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool {
const prev = self.ranges.items[i];
// prev.last + 1 == cur.first
try counter.copy(prev.last.toBigInt(&space));
try counter.copy(prev.last.toBigInt(&space, target));
try counter.addScalar(counter.toConst(), 1);
const cur_start_int = cur.first.toBigInt(&space);
const cur_start_int = cur.first.toBigInt(&space, target);
if (!cur_start_int.eq(counter.toConst())) {
return false;
}

File diff suppressed because it is too large Load Diff

View File

@ -3,6 +3,7 @@ const Type = @import("type.zig").Type;
const Value = @import("value.zig").Value;
const Allocator = std.mem.Allocator;
const TypedValue = @This();
const Target = std.Target;
ty: Type,
val: Value,
@ -30,13 +31,13 @@ pub fn copy(self: TypedValue, arena: Allocator) error{OutOfMemory}!TypedValue {
};
}
pub fn eql(a: TypedValue, b: TypedValue) bool {
if (!a.ty.eql(b.ty)) return false;
return a.val.eql(b.val, a.ty);
pub fn eql(a: TypedValue, b: TypedValue, target: std.Target) bool {
if (!a.ty.eql(b.ty, target)) return false;
return a.val.eql(b.val, a.ty, target);
}
pub fn hash(tv: TypedValue, hasher: *std.hash.Wyhash) void {
return tv.val.hash(tv.ty, hasher);
pub fn hash(tv: TypedValue, hasher: *std.hash.Wyhash, target: std.Target) void {
return tv.val.hash(tv.ty, hasher, target);
}
pub fn enumToInt(tv: TypedValue, buffer: *Value.Payload.U64) Value {
@ -45,21 +46,28 @@ pub fn enumToInt(tv: TypedValue, buffer: *Value.Payload.U64) Value {
const max_aggregate_items = 100;
pub fn format(
const FormatContext = struct {
tv: TypedValue,
target: Target,
};
pub fn format(
ctx: FormatContext,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = options;
comptime std.debug.assert(fmt.len == 0);
return tv.print(options, writer, 3);
return ctx.tv.print(writer, 3, ctx.target);
}
/// Prints the Value according to the Type, not according to the Value Tag.
pub fn print(
tv: TypedValue,
options: std.fmt.FormatOptions,
writer: anytype,
level: u8,
target: std.Target,
) @TypeOf(writer).Error!void {
var val = tv.val;
var ty = tv.ty;
@ -148,7 +156,7 @@ pub fn print(
try print(.{
.ty = fields[i].ty,
.val = vals[i],
}, options, writer, level - 1);
}, writer, level - 1, target);
}
return writer.writeAll(" }");
} else {
@ -162,7 +170,7 @@ pub fn print(
try print(.{
.ty = elem_ty,
.val = vals[i],
}, options, writer, level - 1);
}, writer, level - 1, target);
}
return writer.writeAll(" }");
}
@ -177,12 +185,12 @@ pub fn print(
try print(.{
.ty = ty.unionTagType().?,
.val = union_val.tag,
}, options, writer, level - 1);
}, writer, level - 1, target);
try writer.writeAll(" = ");
try print(.{
.ty = ty.unionFieldType(union_val.tag),
.ty = ty.unionFieldType(union_val.tag, target),
.val = union_val.val,
}, options, writer, level - 1);
}, writer, level - 1, target);
return writer.writeAll(" }");
},
@ -197,7 +205,7 @@ pub fn print(
},
.bool_true => return writer.writeAll("true"),
.bool_false => return writer.writeAll("false"),
.ty => return val.castTag(.ty).?.data.format("", options, writer),
.ty => return val.castTag(.ty).?.data.print(writer, target),
.int_type => {
const int_type = val.castTag(.int_type).?.data;
return writer.print("{s}{d}", .{
@ -205,10 +213,15 @@ pub fn print(
int_type.bits,
});
},
.int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", options, writer),
.int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, writer),
.int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", .{}, writer),
.int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", .{}, writer),
.int_big_positive => return writer.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}),
.int_big_negative => return writer.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}),
.lazy_align => {
const sub_ty = val.castTag(.lazy_align).?.data;
const x = sub_ty.abiAlignment(target);
return writer.print("{d}", .{x});
},
.function => return writer.print("(function '{s}')", .{val.castTag(.function).?.data.owner_decl.name}),
.extern_fn => return writer.writeAll("(extern function)"),
.variable => return writer.writeAll("(variable)"),
@ -220,7 +233,7 @@ pub fn print(
return print(.{
.ty = decl.ty,
.val = decl.val,
}, options, writer, level - 1);
}, writer, level - 1, target);
},
.decl_ref => {
const decl = val.castTag(.decl_ref).?.data;
@ -230,7 +243,7 @@ pub fn print(
return print(.{
.ty = decl.ty,
.val = decl.val,
}, options, writer, level - 1);
}, writer, level - 1, target);
},
.elem_ptr => {
const elem_ptr = val.castTag(.elem_ptr).?.data;
@ -238,7 +251,7 @@ pub fn print(
try print(.{
.ty = elem_ptr.elem_ty,
.val = elem_ptr.array_ptr,
}, options, writer, level - 1);
}, writer, level - 1, target);
return writer.print("[{}]", .{elem_ptr.index});
},
.field_ptr => {
@ -247,7 +260,7 @@ pub fn print(
try print(.{
.ty = field_ptr.container_ty,
.val = field_ptr.container_ptr,
}, options, writer, level - 1);
}, writer, level - 1, target);
if (field_ptr.container_ty.zigTypeTag() == .Struct) {
const field_name = field_ptr.container_ty.structFields().keys()[field_ptr.field_index];
@ -275,7 +288,7 @@ pub fn print(
};
while (i < max_aggregate_items) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
try print(elem_tv, options, writer, level - 1);
try print(elem_tv, writer, level - 1, target);
}
return writer.writeAll(" }");
},
@ -287,7 +300,7 @@ pub fn print(
try print(.{
.ty = ty.elemType2(),
.val = ty.sentinel().?,
}, options, writer, level - 1);
}, writer, level - 1, target);
return writer.writeAll(" }");
},
.slice => return writer.writeAll("(slice)"),

View File

@ -796,7 +796,9 @@ fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void {
const index = dbg_out.dbg_info.items.len;
try dbg_out.dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4
const gop = try dbg_out.dbg_info_type_relocs.getOrPut(self.gpa, ty);
const gop = try dbg_out.dbg_info_type_relocs.getOrPutContext(self.gpa, ty, .{
.target = self.target.*,
});
if (!gop.found_existing) {
gop.value_ptr.* = .{
.off = undefined,
@ -835,8 +837,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
return self.next_stack_offset;
}
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty});
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
};
// TODO swap this for inst.ty.ptrAlign
const abi_align = elem_ty.abiAlignment(self.target.*);
@ -845,8 +848,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty});
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
};
const abi_align = elem_ty.abiAlignment(self.target.*);
if (abi_align > self.stack_align)
@ -1372,6 +1376,7 @@ fn binOp(
lhs_ty: Type,
rhs_ty: Type,
) InnerError!MCValue {
const target = self.target.*;
switch (tag) {
// Arithmetic operations on integers and floats
.add,
@ -1381,7 +1386,7 @@ fn binOp(
.Float => return self.fail("TODO binary operations on floats", .{}),
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty));
assert(lhs_ty.eql(rhs_ty, target));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
// Only say yes if the operation is
@ -1418,7 +1423,7 @@ fn binOp(
switch (lhs_ty.zigTypeTag()) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty));
assert(lhs_ty.eql(rhs_ty, target));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
// TODO add optimisations for multiplication
@ -1440,7 +1445,7 @@ fn binOp(
switch (lhs_ty.zigTypeTag()) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty));
assert(lhs_ty.eql(rhs_ty, target));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
// TODO implement bitwise operations with immediates
@ -2348,11 +2353,12 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
const ty = self.air.typeOfIndex(inst);
const result = self.args[arg_index];
const target = self.target.*;
const mcv = switch (result) {
// Copy registers to the stack
.register => |reg| blk: {
const abi_size = math.cast(u32, ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{ty});
return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(target)});
};
const abi_align = ty.abiAlignment(self.target.*);
const stack_offset = try self.allocMem(inst, abi_size, abi_align);
@ -3879,7 +3885,7 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
}
fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
log.debug("lowerUnnamedConst: ty = {}, val = {}", .{ tv.ty, tv.val.fmtDebug() });
log.debug("lowerUnnamedConst: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
const local_sym_index = self.bin_file.lowerUnnamedConst(tv, self.mod_fn.owner_decl) catch |err| {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
@ -3907,6 +3913,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
return self.lowerDeclRef(typed_value, payload.data.decl);
}
const target = self.target.*;
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
@ -3916,7 +3923,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
else => {
switch (typed_value.val.tag()) {
.int_u64 => {
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
},
.slice => {
return self.lowerUnnamedConst(typed_value);
@ -3935,7 +3942,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
const signed = typed_value.val.toSignedInt();
break :blk @bitCast(u64, signed);
},
.unsigned => typed_value.val.toUnsignedInt(),
.unsigned => typed_value.val.toUnsignedInt(target),
};
return MCValue{ .immediate = unsigned };
@ -4004,20 +4011,20 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
}
_ = pl;
return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty});
return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty.fmtDebug()});
} else {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val });
}
return self.fail("TODO implement error union const of type '{}' (error)", .{typed_value.ty});
return self.fail("TODO implement error union const of type '{}' (error)", .{typed_value.ty.fmtDebug()});
}
},
.Struct => {
return self.lowerUnnamedConst(typed_value);
},
else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty}),
else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty.fmtDebug()}),
}
}

View File

@ -801,8 +801,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
return self.next_stack_offset;
}
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty});
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
};
// TODO swap this for inst.ty.ptrAlign
const abi_align = elem_ty.abiAlignment(self.target.*);
@ -811,8 +812,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty});
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
};
const abi_align = elem_ty.abiAlignment(self.target.*);
if (abi_align > self.stack_align)
@ -2195,6 +2197,7 @@ fn binOp(
lhs_ty: Type,
rhs_ty: Type,
) InnerError!MCValue {
const target = self.target.*;
switch (tag) {
.add,
.sub,
@ -2204,7 +2207,7 @@ fn binOp(
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty));
assert(lhs_ty.eql(rhs_ty, target));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 32) {
// Only say yes if the operation is
@ -2245,7 +2248,7 @@ fn binOp(
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty));
assert(lhs_ty.eql(rhs_ty, target));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 32) {
// TODO add optimisations for multiplication
@ -2299,7 +2302,7 @@ fn binOp(
switch (lhs_ty.zigTypeTag()) {
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty));
assert(lhs_ty.eql(rhs_ty, target));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 32) {
const lhs_immediate_ok = lhs == .immediate and Instruction.Operand.fromU32(lhs.immediate) != null;
@ -4376,6 +4379,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
return self.lowerDeclRef(typed_value, payload.data.decl);
}
const target = self.target.*;
switch (typed_value.ty.zigTypeTag()) {
.Array => {
@ -4388,7 +4392,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
else => {
switch (typed_value.val.tag()) {
.int_u64 => {
return MCValue{ .immediate = @intCast(u32, typed_value.val.toUnsignedInt()) };
return MCValue{ .immediate = @intCast(u32, typed_value.val.toUnsignedInt(target)) };
},
.slice => {
return self.lowerUnnamedConst(typed_value);
@ -4407,7 +4411,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
const signed = @intCast(i32, typed_value.val.toSignedInt());
break :blk @bitCast(u32, signed);
},
.unsigned => @intCast(u32, typed_value.val.toUnsignedInt()),
.unsigned => @intCast(u32, typed_value.val.toUnsignedInt(target)),
};
return MCValue{ .immediate = unsigned };
@ -4476,20 +4480,20 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
}
_ = pl;
return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty});
return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty.fmtDebug()});
} else {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val });
}
return self.fail("TODO implement error union const of type '{}' (error)", .{typed_value.ty});
return self.fail("TODO implement error union const of type '{}' (error)", .{typed_value.ty.fmtDebug()});
}
},
.Struct => {
return self.lowerUnnamedConst(typed_value);
},
else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty}),
else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty.fmtDebug()}),
}
}

View File

@ -384,7 +384,7 @@ fn addDbgInfoTypeReloc(self: *Emit, ty: Type) !void {
const index = dbg_out.dbg_info.items.len;
try dbg_out.dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4
const gop = try dbg_out.dbg_info_type_relocs.getOrPut(self.bin_file.allocator, ty);
const gop = try dbg_out.dbg_info_type_relocs.getOrPutContext(self.bin_file.allocator, ty, .{ .target = self.target.* });
if (!gop.found_existing) {
gop.value_ptr.* = .{
.off = undefined,
@ -404,6 +404,7 @@ fn genArgDbgInfo(self: *Emit, inst: Air.Inst.Index, arg_index: u32) !void {
const ty = self.function.air.instructions.items(.data)[inst].ty;
const name = self.function.mod_fn.getParamName(arg_index);
const name_with_null = name.ptr[0 .. name.len + 1];
const target = self.target.*;
switch (mcv) {
.register => |reg| {
@ -429,7 +430,7 @@ fn genArgDbgInfo(self: *Emit, inst: Air.Inst.Index, arg_index: u32) !void {
switch (self.debug_output) {
.dwarf => |dbg_out| {
const abi_size = math.cast(u32, ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{ty});
return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(target)});
};
const adjusted_stack_offset = switch (mcv) {
.stack_offset => |offset| math.negateCast(offset + abi_size) catch {

View File

@ -749,7 +749,9 @@ fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void {
const index = dbg_out.dbg_info.items.len;
try dbg_out.dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4
const gop = try dbg_out.dbg_info_type_relocs.getOrPut(self.gpa, ty);
const gop = try dbg_out.dbg_info_type_relocs.getOrPutContext(self.gpa, ty, .{
.target = self.target.*,
});
if (!gop.found_existing) {
gop.value_ptr.* = .{
.off = undefined,
@ -781,8 +783,9 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
const elem_ty = self.air.typeOfIndex(inst).elemType();
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty});
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
};
// TODO swap this for inst.ty.ptrAlign
const abi_align = elem_ty.abiAlignment(self.target.*);
@ -791,8 +794,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty});
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
};
const abi_align = elem_ty.abiAlignment(self.target.*);
if (abi_align > self.stack_align)
@ -1048,7 +1052,7 @@ fn binOp(
.Float => return self.fail("TODO binary operations on floats", .{}),
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty));
assert(lhs_ty.eql(rhs_ty, self.target.*));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
// TODO immediate operands
@ -1778,7 +1782,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
const ty = self.air.typeOf(bin_op.lhs);
assert(ty.eql(self.air.typeOf(bin_op.rhs)));
assert(ty.eql(self.air.typeOf(bin_op.rhs), self.target.*));
if (ty.zigTypeTag() == .ErrorSet)
return self.fail("TODO implement cmp for errors", .{});
@ -2531,6 +2535,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
return self.lowerDeclRef(typed_value, payload.data.decl);
}
const target = self.target.*;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
@ -2538,7 +2543,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_type = typed_value.ty.slicePtrFieldType(&buf);
const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val });
const slice_len = typed_value.val.sliceLen();
const slice_len = typed_value.val.sliceLen(target);
// Codegen can't handle some kinds of indirection. If the wrong union field is accessed here it may mean
// the Sema code needs to use anonymous Decls or alloca instructions to store data.
const ptr_imm = ptr_mcv.memory;
@ -2549,7 +2554,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
},
else => {
if (typed_value.val.tag() == .int_u64) {
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
}
return self.fail("TODO codegen more kinds of const pointers", .{});
},
@ -2559,7 +2564,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (info.bits > ptr_bits or info.signedness == .signed) {
return self.fail("TODO const int bigger than ptr and signed int", .{});
}
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
},
.Bool => {
return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
@ -2629,9 +2634,9 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.genTypedValue(.{ .ty = error_type, .val = sub_val });
}
return self.fail("TODO implement error union const of type '{}'", .{typed_value.ty});
return self.fail("TODO implement error union const of type '{}'", .{typed_value.ty.fmtDebug()});
},
else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty}),
else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty.fmtDebug()}),
}
}

View File

@ -1021,7 +1021,9 @@ fn allocStack(self: *Self, ty: Type) !WValue {
}
const abi_size = std.math.cast(u32, ty.abiSize(self.target)) catch {
return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ ty, ty.abiSize(self.target) });
return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
ty.fmt(self.target), ty.abiSize(self.target),
});
};
const abi_align = ty.abiAlignment(self.target);
@ -1053,7 +1055,9 @@ fn allocStackPtr(self: *Self, inst: Air.Inst.Index) !WValue {
const abi_alignment = ptr_ty.ptrAlignment(self.target);
const abi_size = std.math.cast(u32, pointee_ty.abiSize(self.target)) catch {
return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ pointee_ty, pointee_ty.abiSize(self.target) });
return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
pointee_ty.fmt(self.target), pointee_ty.abiSize(self.target),
});
};
if (abi_alignment > self.stack_alignment) {
self.stack_alignment = abi_alignment;
@ -1750,7 +1754,7 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
const operand_ty = self.air.typeOfIndex(inst);
if (isByRef(operand_ty, self.target)) {
return self.fail("TODO: Implement binary operation for type: {}", .{operand_ty});
return self.fail("TODO: Implement binary operation for type: {}", .{operand_ty.fmtDebug()});
}
try self.emitWValue(lhs);
@ -1918,6 +1922,8 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue {
return self.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl);
}
const target = self.target;
switch (ty.zigTypeTag()) {
.Int => {
const int_info = ty.intInfo(self.target);
@ -1929,13 +1935,13 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue {
else => unreachable,
},
.unsigned => switch (int_info.bits) {
0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt()) },
33...64 => return WValue{ .imm64 = val.toUnsignedInt() },
0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) },
33...64 => return WValue{ .imm64 = val.toUnsignedInt(target) },
else => unreachable,
},
}
},
.Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt()) },
.Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) },
.Float => switch (ty.floatBits(self.target)) {
0...32 => return WValue{ .float32 = val.toFloat(f32) },
33...64 => return WValue{ .float64 = val.toFloat(f64) },
@ -1945,7 +1951,7 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue {
.field_ptr, .elem_ptr => {
return self.lowerParentPtr(val, ty.childType());
},
.int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt()) },
.int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) },
.zero, .null_value => return WValue{ .imm32 = 0 },
else => return self.fail("Wasm TODO: lowerConstant for other const pointer tag {s}", .{val.tag()}),
},
@ -2044,6 +2050,7 @@ fn emitUndefined(self: *Self, ty: Type) InnerError!WValue {
/// It's illegal to provide a value with a type that cannot be represented
/// as an integer value.
fn valueAsI32(self: Self, val: Value, ty: Type) i32 {
const target = self.target;
switch (ty.zigTypeTag()) {
.Enum => {
if (val.castTag(.enum_field_index)) |field_index| {
@ -2071,7 +2078,7 @@ fn valueAsI32(self: Self, val: Value, ty: Type) i32 {
},
.Int => switch (ty.intInfo(self.target).signedness) {
.signed => return @truncate(i32, val.toSignedInt()),
.unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt())),
.unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(target))),
},
.ErrorSet => {
const kv = self.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function
@ -2296,7 +2303,7 @@ fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const struct_ty = self.air.typeOf(extra.data.struct_operand).childType();
const offset = std.math.cast(u32, struct_ty.structFieldOffset(extra.data.field_index, self.target)) catch {
return self.fail("Field type '{}' too big to fit into stack frame", .{
struct_ty.structFieldType(extra.data.field_index),
struct_ty.structFieldType(extra.data.field_index).fmt(self.target),
});
};
return self.structFieldPtr(struct_ptr, offset);
@ -2309,7 +2316,7 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u32) InnerEr
const field_ty = struct_ty.structFieldType(index);
const offset = std.math.cast(u32, struct_ty.structFieldOffset(index, self.target)) catch {
return self.fail("Field type '{}' too big to fit into stack frame", .{
field_ty,
field_ty.fmt(self.target),
});
};
return self.structFieldPtr(struct_ptr, offset);
@ -2335,7 +2342,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const field_ty = struct_ty.structFieldType(field_index);
if (!field_ty.hasRuntimeBits()) return WValue{ .none = {} };
const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, self.target)) catch {
return self.fail("Field type '{}' too big to fit into stack frame", .{field_ty});
return self.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(self.target)});
};
if (isByRef(field_ty, self.target)) {
@ -2716,7 +2723,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue
var buf: Type.Payload.ElemType = undefined;
const payload_ty = opt_ty.optionalChild(&buf);
if (!payload_ty.hasRuntimeBits()) {
return self.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty});
return self.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()});
}
if (opt_ty.isPtrLikeOptional()) {
@ -2724,7 +2731,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue
}
const offset = std.math.cast(u32, opt_ty.abiSize(self.target) - payload_ty.abiSize(self.target)) catch {
return self.fail("Optional type {} too big to fit into stack frame", .{opt_ty});
return self.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(self.target)});
};
try self.emitWValue(operand);
@ -2753,7 +2760,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
return operand;
}
const offset = std.math.cast(u32, op_ty.abiSize(self.target) - payload_ty.abiSize(self.target)) catch {
return self.fail("Optional type {} too big to fit into stack frame", .{op_ty});
return self.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(self.target)});
};
// Create optional type, set the non-null bit, and store the operand inside the optional type

View File

@ -892,8 +892,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
return self.allocMem(inst, @sizeOf(usize), @alignOf(usize));
}
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty});
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
};
// TODO swap this for inst.ty.ptrAlign
const abi_align = ptr_ty.ptrAlignment(self.target.*);
@ -902,8 +903,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty});
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
};
const abi_align = elem_ty.abiAlignment(self.target.*);
if (abi_align > self.stack_align)
@ -1142,7 +1144,7 @@ fn airMin(self: *Self, inst: Air.Inst.Index) !void {
const ty = self.air.typeOfIndex(inst);
if (ty.zigTypeTag() != .Int) {
return self.fail("TODO implement min for type {}", .{ty});
return self.fail("TODO implement min for type {}", .{ty.fmtDebug()});
}
const signedness = ty.intInfo(self.target.*).signedness;
const result: MCValue = result: {
@ -1676,13 +1678,13 @@ fn airShl(self: *Self, inst: Air.Inst.Index) !void {
const ty = self.air.typeOfIndex(inst);
const tag = self.air.instructions.items(.tag)[inst];
switch (tag) {
.shl_exact => return self.fail("TODO implement {} for type {}", .{ tag, ty }),
.shl_exact => return self.fail("TODO implement {} for type {}", .{ tag, ty.fmtDebug() }),
.shl => {},
else => unreachable,
}
if (ty.zigTypeTag() != .Int) {
return self.fail("TODO implement .shl for type {}", .{ty});
return self.fail("TODO implement .shl for type {}", .{ty.fmtDebug()});
}
if (ty.abiSize(self.target.*) > 8) {
return self.fail("TODO implement .shl for integers larger than 8 bytes", .{});
@ -5820,7 +5822,7 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
}
fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
log.debug("lowerUnnamedConst: ty = {}, val = {}", .{ tv.ty, tv.val.fmtDebug() });
log.debug("lowerUnnamedConst: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
const local_sym_index = self.bin_file.lowerUnnamedConst(tv, self.mod_fn.owner_decl) catch |err| {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
@ -5850,13 +5852,15 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.lowerDeclRef(typed_value, payload.data.decl);
}
const target = self.target.*;
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {},
else => {
switch (typed_value.val.tag()) {
.int_u64 => {
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
},
else => {},
}
@ -5868,7 +5872,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return MCValue{ .immediate = @bitCast(u64, typed_value.val.toSignedInt()) };
}
if (!(info.bits > ptr_bits or info.signedness == .signed)) {
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
}
},
.Bool => {

View File

@ -1118,7 +1118,9 @@ fn addDbgInfoTypeReloc(emit: *Emit, ty: Type) !void {
const index = dbg_out.dbg_info.items.len;
try dbg_out.dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4
const gop = try dbg_out.dbg_info_type_relocs.getOrPut(emit.bin_file.allocator, ty);
const gop = try dbg_out.dbg_info_type_relocs.getOrPutContext(emit.bin_file.allocator, ty, .{
.target = emit.target.*,
});
if (!gop.found_existing) {
gop.value_ptr.* = .{
.off = undefined,

View File

@ -165,7 +165,10 @@ pub fn generateSymbol(
const target = bin_file.options.target;
const endian = target.cpu.arch.endian();
log.debug("generateSymbol: ty = {}, val = {}", .{ typed_value.ty, typed_value.val.fmtDebug() });
log.debug("generateSymbol: ty = {}, val = {}", .{
typed_value.ty.fmtDebug(),
typed_value.val.fmtDebug(),
});
if (typed_value.val.isUndefDeep()) {
const abi_size = try math.cast(usize, typed_value.ty.abiSize(target));
@ -295,11 +298,11 @@ pub fn generateSymbol(
.zero, .one, .int_u64, .int_big_positive => {
switch (target.cpu.arch.ptrBitWidth()) {
32 => {
const x = typed_value.val.toUnsignedInt();
const x = typed_value.val.toUnsignedInt(target);
mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian);
},
64 => {
const x = typed_value.val.toUnsignedInt();
const x = typed_value.val.toUnsignedInt(target);
mem.writeInt(u64, try code.addManyAsArray(8), x, endian);
},
else => unreachable,
@ -433,7 +436,7 @@ pub fn generateSymbol(
// TODO populate .debug_info for the integer
const info = typed_value.ty.intInfo(bin_file.options.target);
if (info.bits <= 8) {
const x = @intCast(u8, typed_value.val.toUnsignedInt());
const x = @intCast(u8, typed_value.val.toUnsignedInt(target));
try code.append(x);
return Result{ .appended = {} };
}
@ -443,20 +446,20 @@ pub fn generateSymbol(
bin_file.allocator,
src_loc,
"TODO implement generateSymbol for big ints ('{}')",
.{typed_value.ty},
.{typed_value.ty.fmtDebug()},
),
};
}
switch (info.signedness) {
.unsigned => {
if (info.bits <= 16) {
const x = @intCast(u16, typed_value.val.toUnsignedInt());
const x = @intCast(u16, typed_value.val.toUnsignedInt(target));
mem.writeInt(u16, try code.addManyAsArray(2), x, endian);
} else if (info.bits <= 32) {
const x = @intCast(u32, typed_value.val.toUnsignedInt());
const x = @intCast(u32, typed_value.val.toUnsignedInt(target));
mem.writeInt(u32, try code.addManyAsArray(4), x, endian);
} else {
const x = typed_value.val.toUnsignedInt();
const x = typed_value.val.toUnsignedInt(target);
mem.writeInt(u64, try code.addManyAsArray(8), x, endian);
}
},
@ -482,7 +485,7 @@ pub fn generateSymbol(
const info = typed_value.ty.intInfo(target);
if (info.bits <= 8) {
const x = @intCast(u8, int_val.toUnsignedInt());
const x = @intCast(u8, int_val.toUnsignedInt(target));
try code.append(x);
return Result{ .appended = {} };
}
@ -492,20 +495,20 @@ pub fn generateSymbol(
bin_file.allocator,
src_loc,
"TODO implement generateSymbol for big int enums ('{}')",
.{typed_value.ty},
.{typed_value.ty.fmtDebug()},
),
};
}
switch (info.signedness) {
.unsigned => {
if (info.bits <= 16) {
const x = @intCast(u16, int_val.toUnsignedInt());
const x = @intCast(u16, int_val.toUnsignedInt(target));
mem.writeInt(u16, try code.addManyAsArray(2), x, endian);
} else if (info.bits <= 32) {
const x = @intCast(u32, int_val.toUnsignedInt());
const x = @intCast(u32, int_val.toUnsignedInt(target));
mem.writeInt(u32, try code.addManyAsArray(4), x, endian);
} else {
const x = int_val.toUnsignedInt();
const x = int_val.toUnsignedInt(target);
mem.writeInt(u64, try code.addManyAsArray(8), x, endian);
}
},
@ -597,7 +600,7 @@ pub fn generateSymbol(
}
const union_ty = typed_value.ty.cast(Type.Payload.Union).?.data;
const field_index = union_ty.tag_ty.enumTagFieldIndex(union_obj.tag).?;
const field_index = union_ty.tag_ty.enumTagFieldIndex(union_obj.tag, target).?;
assert(union_ty.haveFieldTypes());
const field_ty = union_ty.fields.values()[field_index].ty;
if (!field_ty.hasRuntimeBits()) {
@ -787,6 +790,7 @@ fn lowerDeclRef(
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
) GenerateSymbolError!Result {
const target = bin_file.options.target;
if (typed_value.ty.isSlice()) {
// generate ptr
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
@ -805,7 +809,7 @@ fn lowerDeclRef(
// generate length
var slice_len: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = typed_value.val.sliceLen(),
.data = typed_value.val.sliceLen(target),
};
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.usize,
@ -821,7 +825,6 @@ fn lowerDeclRef(
return Result{ .appended = {} };
}
const target = bin_file.options.target;
const ptr_width = target.cpu.arch.ptrBitWidth();
const is_fn_body = decl.ty.zigTypeTag() == .Fn;
if (!is_fn_body and !decl.ty.hasRuntimeBits()) {

View File

@ -56,8 +56,14 @@ pub const TypedefMap = std.ArrayHashMap(
true,
);
const FormatTypeAsCIdentContext = struct {
ty: Type,
target: std.Target,
};
/// TODO make this not cut off at 128 bytes
fn formatTypeAsCIdentifier(
data: Type,
data: FormatTypeAsCIdentContext,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
@ -65,13 +71,15 @@ fn formatTypeAsCIdentifier(
_ = fmt;
_ = options;
var buffer = [1]u8{0} ** 128;
// We don't care if it gets cut off, it's still more unique than a number
var buf = std.fmt.bufPrint(&buffer, "{}", .{data}) catch &buffer;
var buf = std.fmt.bufPrint(&buffer, "{}", .{data.ty.fmt(data.target)}) catch &buffer;
return formatIdent(buf, "", .{}, writer);
}
pub fn typeToCIdentifier(t: Type) std.fmt.Formatter(formatTypeAsCIdentifier) {
return .{ .data = t };
pub fn typeToCIdentifier(ty: Type, target: std.Target) std.fmt.Formatter(formatTypeAsCIdentifier) {
return .{ .data = .{
.ty = ty,
.target = target,
} };
}
const reserved_idents = std.ComptimeStringMap(void, .{
@ -369,6 +377,8 @@ pub const DeclGen = struct {
) error{ OutOfMemory, AnalysisFail }!void {
decl.markAlive();
const target = dg.module.getTarget();
if (ty.isSlice()) {
try writer.writeByte('(');
try dg.renderTypecast(writer, ty);
@ -376,7 +386,7 @@ pub const DeclGen = struct {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
try dg.renderValue(writer, ty.slicePtrFieldType(&buf), val.slicePtr());
try writer.writeAll(", ");
try writer.print("{d}", .{val.sliceLen()});
try writer.print("{d}", .{val.sliceLen(target)});
try writer.writeAll("}");
return;
}
@ -388,7 +398,7 @@ pub const DeclGen = struct {
// somewhere and we should let the C compiler tell us about it.
if (ty.castPtrToFn() == null) {
// Determine if we must pointer cast.
if (ty.eql(decl.ty)) {
if (ty.eql(decl.ty, target)) {
try writer.writeByte('&');
try dg.renderDeclName(writer, decl);
return;
@ -508,6 +518,7 @@ pub const DeclGen = struct {
ty: Type,
val: Value,
) error{ OutOfMemory, AnalysisFail }!void {
const target = dg.module.getTarget();
if (val.isUndefDeep()) {
switch (ty.zigTypeTag()) {
// Using '{}' for integer and floats seemed to error C compilers (both GCC and Clang)
@ -551,7 +562,7 @@ pub const DeclGen = struct {
else => {
if (ty.isSignedInt())
return writer.print("{d}", .{val.toSignedInt()});
return writer.print("{d}u", .{val.toUnsignedInt()});
return writer.print("{d}u", .{val.toUnsignedInt(target)});
},
},
.Float => {
@ -609,7 +620,7 @@ pub const DeclGen = struct {
.int_u64, .one => {
try writer.writeAll("((");
try dg.renderTypecast(writer, ty);
try writer.print(")0x{x}u)", .{val.toUnsignedInt()});
try writer.print(")0x{x}u)", .{val.toUnsignedInt(target)});
},
else => unreachable,
},
@ -653,7 +664,6 @@ pub const DeclGen = struct {
if (ty.isPtrLikeOptional()) {
return dg.renderValue(writer, payload_type, val);
}
const target = dg.module.getTarget();
if (payload_type.abiSize(target) == 0) {
const is_null = val.castTag(.opt_payload) == null;
return writer.print("{}", .{is_null});
@ -773,7 +783,6 @@ pub const DeclGen = struct {
.Union => {
const union_obj = val.castTag(.@"union").?.data;
const union_ty = ty.cast(Type.Payload.Union).?.data;
const target = dg.module.getTarget();
const layout = ty.unionGetLayout(target);
try writer.writeAll("(");
@ -789,7 +798,7 @@ pub const DeclGen = struct {
try writer.writeAll(".payload = {");
}
const index = union_ty.tag_ty.enumTagFieldIndex(union_obj.tag).?;
const index = union_ty.tag_ty.enumTagFieldIndex(union_obj.tag, target).?;
const field_ty = ty.unionFields().values()[index].ty;
const field_name = ty.unionFields().keys()[index];
if (field_ty.hasRuntimeBits()) {
@ -879,8 +888,8 @@ pub const DeclGen = struct {
try bw.writeAll(" (*");
const name_start = buffer.items.len;
// TODO: typeToCIdentifier truncates to 128 bytes, we probably don't want to do this
try bw.print("zig_F_{s})(", .{typeToCIdentifier(t)});
const target = dg.module.getTarget();
try bw.print("zig_F_{s})(", .{typeToCIdentifier(t, target)});
const name_end = buffer.items.len - 2;
const param_len = fn_info.param_types.len;
@ -934,10 +943,11 @@ pub const DeclGen = struct {
try bw.writeAll("; size_t len; } ");
const name_index = buffer.items.len;
const target = dg.module.getTarget();
if (t.isConstPtr()) {
try bw.print("zig_L_{s}", .{typeToCIdentifier(child_type)});
try bw.print("zig_L_{s}", .{typeToCIdentifier(child_type, target)});
} else {
try bw.print("zig_M_{s}", .{typeToCIdentifier(child_type)});
try bw.print("zig_M_{s}", .{typeToCIdentifier(child_type, target)});
}
if (ptr_sentinel) |s| {
try bw.writeAll("_s_");
@ -1023,7 +1033,8 @@ pub const DeclGen = struct {
try buffer.appendSlice("} ");
const name_start = buffer.items.len;
try writer.print("zig_T_{};\n", .{typeToCIdentifier(t)});
const target = dg.module.getTarget();
try writer.print("zig_T_{};\n", .{typeToCIdentifier(t, target)});
const rendered = buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered);
@ -1107,6 +1118,7 @@ pub const DeclGen = struct {
try dg.renderTypeAndName(bw, child_type, payload_name, .Mut, 0);
try bw.writeAll("; uint16_t error; } ");
const name_index = buffer.items.len;
const target = dg.module.getTarget();
if (err_set_type.castTag(.error_set_inferred)) |inf_err_set_payload| {
const func = inf_err_set_payload.data.func;
try bw.writeAll("zig_E_");
@ -1114,7 +1126,7 @@ pub const DeclGen = struct {
try bw.writeAll(";\n");
} else {
try bw.print("zig_E_{s}_{s};\n", .{
typeToCIdentifier(err_set_type), typeToCIdentifier(child_type),
typeToCIdentifier(err_set_type, target), typeToCIdentifier(child_type, target),
});
}
@ -1144,7 +1156,8 @@ pub const DeclGen = struct {
try dg.renderType(bw, elem_type);
const name_start = buffer.items.len + 1;
try bw.print(" zig_A_{s}_{d}", .{ typeToCIdentifier(elem_type), c_len });
const target = dg.module.getTarget();
try bw.print(" zig_A_{s}_{d}", .{ typeToCIdentifier(elem_type, target), c_len });
const name_end = buffer.items.len;
try bw.print("[{d}];\n", .{c_len});
@ -1172,7 +1185,8 @@ pub const DeclGen = struct {
try dg.renderTypeAndName(bw, child_type, payload_name, .Mut, 0);
try bw.writeAll("; bool is_null; } ");
const name_index = buffer.items.len;
try bw.print("zig_Q_{s};\n", .{typeToCIdentifier(child_type)});
const target = dg.module.getTarget();
try bw.print("zig_Q_{s};\n", .{typeToCIdentifier(child_type, target)});
const rendered = buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered);
@ -2177,12 +2191,13 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
if (src_val_is_undefined)
return try airStoreUndefined(f, dest_ptr);
const target = f.object.dg.module.getTarget();
const writer = f.object.writer();
if (lhs_child_type.zigTypeTag() == .Array) {
// For this memcpy to safely work we need the rhs to have the same
// underlying type as the lhs (i.e. they must both be arrays of the same underlying type).
const rhs_type = f.air.typeOf(bin_op.rhs);
assert(rhs_type.eql(lhs_child_type));
assert(rhs_type.eql(lhs_child_type, target));
// If the source is a constant, writeCValue will emit a brace initialization
// so work around this by initializing into new local.

View File

@ -812,7 +812,7 @@ pub const Object = struct {
const gpa = o.gpa;
// Be careful not to reference this `gop` variable after any recursive calls
// to `lowerDebugType`.
const gop = try o.di_type_map.getOrPut(gpa, ty);
const gop = try o.di_type_map.getOrPutContext(gpa, ty, .{ .target = o.target });
if (gop.found_existing) {
const annotated = gop.value_ptr.*;
const di_type = annotated.toDIType();
@ -825,7 +825,7 @@ pub const Object = struct {
};
return o.lowerDebugTypeImpl(entry, resolve, di_type);
}
errdefer assert(o.di_type_map.orderedRemove(ty));
errdefer assert(o.di_type_map.orderedRemoveContext(ty, .{ .target = o.target }));
// The Type memory is ephemeral; since we want to store a longer-lived
// reference, we need to copy it here.
gop.key_ptr.* = try ty.copy(o.type_map_arena.allocator());
@ -856,7 +856,7 @@ pub const Object = struct {
.Int => {
const info = ty.intInfo(target);
assert(info.bits != 0);
const name = try ty.nameAlloc(gpa);
const name = try ty.nameAlloc(gpa, target);
defer gpa.free(name);
const dwarf_encoding: c_uint = switch (info.signedness) {
.signed => DW.ATE.signed,
@ -873,7 +873,7 @@ pub const Object = struct {
const enum_di_ty = try o.makeEmptyNamespaceDIType(owner_decl);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .target = o.target });
return enum_di_ty;
}
@ -903,7 +903,7 @@ pub const Object = struct {
const di_file = try o.getDIFile(gpa, owner_decl.src_namespace.file_scope);
const di_scope = try o.namespaceToDebugScope(owner_decl.src_namespace);
const name = try ty.nameAlloc(gpa);
const name = try ty.nameAlloc(gpa, target);
defer gpa.free(name);
var buffer: Type.Payload.Bits = undefined;
const int_ty = ty.intTagType(&buffer);
@ -921,12 +921,12 @@ pub const Object = struct {
"",
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .target = o.target });
return enum_di_ty;
},
.Float => {
const bits = ty.floatBits(target);
const name = try ty.nameAlloc(gpa);
const name = try ty.nameAlloc(gpa, target);
defer gpa.free(name);
const di_type = dib.createBasicType(name, bits, DW.ATE.float);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type);
@ -974,7 +974,7 @@ pub const Object = struct {
const bland_ptr_ty = Type.initPayload(&payload.base);
const ptr_di_ty = try o.lowerDebugType(bland_ptr_ty, resolve);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .target = o.target });
return ptr_di_ty;
}
@ -983,7 +983,7 @@ pub const Object = struct {
const ptr_ty = ty.slicePtrFieldType(&buf);
const len_ty = Type.usize;
const name = try ty.nameAlloc(gpa);
const name = try ty.nameAlloc(gpa, target);
defer gpa.free(name);
const di_file: ?*llvm.DIFile = null;
const line = 0;
@ -1054,12 +1054,12 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target });
return full_di_ty;
}
const elem_di_ty = try o.lowerDebugType(ptr_info.pointee_type, .fwd);
const name = try ty.nameAlloc(gpa);
const name = try ty.nameAlloc(gpa, target);
defer gpa.free(name);
const ptr_di_ty = dib.createPointerType(
elem_di_ty,
@ -1068,7 +1068,7 @@ pub const Object = struct {
name,
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .target = o.target });
return ptr_di_ty;
},
.Opaque => {
@ -1077,7 +1077,7 @@ pub const Object = struct {
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty);
return di_ty;
}
const name = try ty.nameAlloc(gpa);
const name = try ty.nameAlloc(gpa, target);
defer gpa.free(name);
const owner_decl = ty.getOwnerDecl();
const opaque_di_ty = dib.createForwardDeclType(
@ -1089,7 +1089,7 @@ pub const Object = struct {
);
// The recursive call to `lowerDebugType` va `namespaceToDebugScope`
// means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(opaque_di_ty));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(opaque_di_ty), .{ .target = o.target });
return opaque_di_ty;
},
.Array => {
@ -1100,7 +1100,7 @@ pub const Object = struct {
@intCast(c_int, ty.arrayLen()),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(array_di_ty));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(array_di_ty), .{ .target = o.target });
return array_di_ty;
},
.Vector => {
@ -1111,11 +1111,11 @@ pub const Object = struct {
ty.vectorLen(),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(vector_di_ty));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(vector_di_ty), .{ .target = o.target });
return vector_di_ty;
},
.Optional => {
const name = try ty.nameAlloc(gpa);
const name = try ty.nameAlloc(gpa, target);
defer gpa.free(name);
var buf: Type.Payload.ElemType = undefined;
const child_ty = ty.optionalChild(&buf);
@ -1127,7 +1127,7 @@ pub const Object = struct {
if (ty.isPtrLikeOptional()) {
const ptr_di_ty = try o.lowerDebugType(child_ty, resolve);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .target = o.target });
return ptr_di_ty;
}
@ -1200,7 +1200,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target });
return full_di_ty;
},
.ErrorUnion => {
@ -1209,10 +1209,10 @@ pub const Object = struct {
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
const err_set_di_ty = try o.lowerDebugType(err_set_ty, .full);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty), .{ .target = o.target });
return err_set_di_ty;
}
const name = try ty.nameAlloc(gpa);
const name = try ty.nameAlloc(gpa, target);
defer gpa.free(name);
const di_file: ?*llvm.DIFile = null;
const line = 0;
@ -1282,7 +1282,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target });
return full_di_ty;
},
.ErrorSet => {
@ -1294,7 +1294,7 @@ pub const Object = struct {
},
.Struct => {
const compile_unit_scope = o.di_compile_unit.?.toScope();
const name = try ty.nameAlloc(gpa);
const name = try ty.nameAlloc(gpa, target);
defer gpa.free(name);
if (ty.castTag(.@"struct")) |payload| {
@ -1381,7 +1381,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target });
return full_di_ty;
}
@ -1395,7 +1395,7 @@ pub const Object = struct {
dib.replaceTemporary(fwd_decl, struct_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .target = o.target });
return struct_di_ty;
}
}
@ -1406,7 +1406,7 @@ pub const Object = struct {
dib.replaceTemporary(fwd_decl, struct_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .target = o.target });
return struct_di_ty;
}
@ -1461,13 +1461,13 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target });
return full_di_ty;
},
.Union => {
const owner_decl = ty.getOwnerDecl();
const name = try ty.nameAlloc(gpa);
const name = try ty.nameAlloc(gpa, target);
defer gpa.free(name);
const fwd_decl = opt_fwd_decl orelse blk: {
@ -1489,7 +1489,7 @@ pub const Object = struct {
dib.replaceTemporary(fwd_decl, union_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .target = o.target });
return union_di_ty;
}
@ -1603,7 +1603,7 @@ pub const Object = struct {
0,
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(fn_di_ty));
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(fn_di_ty), .{ .target = o.target });
return fn_di_ty;
},
.ComptimeInt => unreachable,
@ -1676,7 +1676,9 @@ pub const DeclGen = struct {
const decl = dg.decl;
assert(decl.has_tv);
log.debug("gen: {s} type: {}, value: {}", .{ decl.name, decl.ty, decl.val.fmtDebug() });
log.debug("gen: {s} type: {}, value: {}", .{
decl.name, decl.ty.fmtDebug(), decl.val.fmtDebug(),
});
if (decl.val.castTag(.function)) |func_payload| {
_ = func_payload;
@ -1990,7 +1992,7 @@ pub const DeclGen = struct {
},
.Opaque => switch (t.tag()) {
.@"opaque" => {
const gop = try dg.object.type_map.getOrPut(gpa, t);
const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .target = target });
if (gop.found_existing) return gop.value_ptr.*;
// The Type memory is ephemeral; since we want to store a longer-lived
@ -2051,7 +2053,7 @@ pub const DeclGen = struct {
return dg.context.intType(16);
},
.Struct => {
const gop = try dg.object.type_map.getOrPut(gpa, t);
const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .target = target });
if (gop.found_existing) return gop.value_ptr.*;
// The Type memory is ephemeral; since we want to store a longer-lived
@ -2174,7 +2176,7 @@ pub const DeclGen = struct {
return llvm_struct_ty;
},
.Union => {
const gop = try dg.object.type_map.getOrPut(gpa, t);
const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .target = target });
if (gop.found_existing) return gop.value_ptr.*;
// The Type memory is ephemeral; since we want to store a longer-lived
@ -2289,6 +2291,7 @@ pub const DeclGen = struct {
const llvm_type = try dg.llvmType(tv.ty);
return llvm_type.getUndef();
}
const target = dg.module.getTarget();
switch (tv.ty.zigTypeTag()) {
.Bool => {
@ -2302,8 +2305,7 @@ pub const DeclGen = struct {
.decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data),
else => {
var bigint_space: Value.BigIntSpace = undefined;
const bigint = tv.val.toBigInt(&bigint_space);
const target = dg.module.getTarget();
const bigint = tv.val.toBigInt(&bigint_space, target);
const int_info = tv.ty.intInfo(target);
assert(int_info.bits != 0);
const llvm_type = dg.context.intType(int_info.bits);
@ -2331,9 +2333,8 @@ pub const DeclGen = struct {
const int_val = tv.enumToInt(&int_buffer);
var bigint_space: Value.BigIntSpace = undefined;
const bigint = int_val.toBigInt(&bigint_space);
const bigint = int_val.toBigInt(&bigint_space, target);
const target = dg.module.getTarget();
const int_info = tv.ty.intInfo(target);
const llvm_type = dg.context.intType(int_info.bits);
@ -2356,7 +2357,6 @@ pub const DeclGen = struct {
},
.Float => {
const llvm_ty = try dg.llvmType(tv.ty);
const target = dg.module.getTarget();
switch (tv.ty.floatBits(target)) {
16, 32, 64 => return llvm_ty.constReal(tv.val.toFloat(f64)),
80 => {
@ -2414,7 +2414,7 @@ pub const DeclGen = struct {
},
.int_u64, .one, .int_big_positive => {
const llvm_usize = try dg.llvmType(Type.usize);
const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(), .False);
const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(target), .False);
return llvm_int.constIntToPtr(try dg.llvmType(tv.ty));
},
.field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => {
@ -2424,7 +2424,9 @@ pub const DeclGen = struct {
const llvm_type = try dg.llvmType(tv.ty);
return llvm_type.constNull();
},
else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ tv.ty, tag }),
else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{
tv.ty.fmtDebug(), tag,
}),
},
.Array => switch (tv.val.tag()) {
.bytes => {
@ -2592,7 +2594,6 @@ pub const DeclGen = struct {
const llvm_struct_ty = try dg.llvmType(tv.ty);
const field_vals = tv.val.castTag(.aggregate).?.data;
const gpa = dg.gpa;
const target = dg.module.getTarget();
if (tv.ty.isTupleOrAnonStruct()) {
const tuple = tv.ty.tupleFields();
@ -2753,7 +2754,6 @@ pub const DeclGen = struct {
const llvm_union_ty = try dg.llvmType(tv.ty);
const tag_and_val = tv.val.castTag(.@"union").?.data;
const target = dg.module.getTarget();
const layout = tv.ty.unionGetLayout(target);
if (layout.payload_size == 0) {
@ -2763,7 +2763,7 @@ pub const DeclGen = struct {
});
}
const union_obj = tv.ty.cast(Type.Payload.Union).?.data;
const field_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag).?;
const field_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag, target).?;
assert(union_obj.haveFieldTypes());
const field_ty = union_obj.fields.values()[field_index].ty;
const payload = p: {
@ -2892,7 +2892,7 @@ pub const DeclGen = struct {
.Frame,
.AnyFrame,
=> return dg.todo("implement const of type '{}'", .{tv.ty}),
=> return dg.todo("implement const of type '{}'", .{tv.ty.fmtDebug()}),
}
}
@ -2910,7 +2910,8 @@ pub const DeclGen = struct {
const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
const llvm_ptr = try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl);
if (ptr_child_ty.eql(decl.ty)) {
const target = dg.module.getTarget();
if (ptr_child_ty.eql(decl.ty, target)) {
return llvm_ptr;
} else {
return llvm_ptr.constBitCast((try dg.llvmType(ptr_child_ty)).pointerType(0));
@ -2918,6 +2919,7 @@ pub const DeclGen = struct {
}
fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, ptr_child_ty: Type) Error!*const llvm.Value {
const target = dg.module.getTarget();
var bitcast_needed: bool = undefined;
const llvm_ptr = switch (ptr_val.tag()) {
.decl_ref_mut => {
@ -2951,7 +2953,6 @@ pub const DeclGen = struct {
const field_index = @intCast(u32, field_ptr.field_index);
const llvm_u32 = dg.context.intType(32);
const target = dg.module.getTarget();
switch (parent_ty.zigTypeTag()) {
.Union => {
bitcast_needed = true;
@ -2974,7 +2975,7 @@ pub const DeclGen = struct {
},
.Struct => {
const field_ty = parent_ty.structFieldType(field_index);
bitcast_needed = !field_ty.eql(ptr_child_ty);
bitcast_needed = !field_ty.eql(ptr_child_ty, target);
var ty_buf: Type.Payload.Pointer = undefined;
const llvm_field_index = llvmFieldIndex(parent_ty, field_index, target, &ty_buf).?;
@ -2990,7 +2991,7 @@ pub const DeclGen = struct {
.elem_ptr => blk: {
const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr, elem_ptr.elem_ty);
bitcast_needed = !elem_ptr.elem_ty.eql(ptr_child_ty);
bitcast_needed = !elem_ptr.elem_ty.eql(ptr_child_ty, target);
const llvm_usize = try dg.llvmType(Type.usize);
const indices: [1]*const llvm.Value = .{
@ -3004,7 +3005,7 @@ pub const DeclGen = struct {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf);
bitcast_needed = !payload_ty.eql(ptr_child_ty);
bitcast_needed = !payload_ty.eql(ptr_child_ty, target);
if (!payload_ty.hasRuntimeBitsIgnoreComptime() or payload_ty.isPtrLikeOptional()) {
// In this case, we represent pointer to optional the same as pointer
@ -3024,7 +3025,7 @@ pub const DeclGen = struct {
const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, eu_payload_ptr.container_ty);
const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload();
bitcast_needed = !payload_ty.eql(ptr_child_ty);
bitcast_needed = !payload_ty.eql(ptr_child_ty, target);
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
// In this case, we represent pointer to error union the same as pointer
@ -3053,12 +3054,13 @@ pub const DeclGen = struct {
tv: TypedValue,
decl: *Module.Decl,
) Error!*const llvm.Value {
const target = self.module.getTarget();
if (tv.ty.isSlice()) {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = tv.ty.slicePtrFieldType(&buf);
var slice_len: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = tv.val.sliceLen(),
.data = tv.val.sliceLen(target),
};
const fields: [2]*const llvm.Value = .{
try self.genTypedValue(.{

View File

@ -313,7 +313,7 @@ pub const DeclGen = struct {
// As of yet, there is no vector support in the self-hosted compiler.
.Vector => self.todo("implement arithmeticTypeInfo for Vector", .{}),
// TODO: For which types is this the case?
else => self.todo("implement arithmeticTypeInfo for {}", .{ty}),
else => self.todo("implement arithmeticTypeInfo for {}", .{ty.fmtDebug()}),
};
}
@ -335,7 +335,7 @@ pub const DeclGen = struct {
const int_info = ty.intInfo(target);
const backing_bits = self.backingIntBits(int_info.bits) orelse {
// Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits.
return self.todo("implement composite int constants for {}", .{ty});
return self.todo("implement composite int constants for {}", .{ty.fmtDebug()});
};
// We can just use toSignedInt/toUnsignedInt here as it returns u64 - a type large enough to hold any
@ -345,7 +345,7 @@ pub const DeclGen = struct {
// Note, value is required to be sign-extended, so we don't need to mask off the upper bits.
// See https://www.khronos.org/registry/SPIR-V/specs/unified1/SPIRV.html#Literal
var int_bits = if (ty.isSignedInt()) @bitCast(u64, val.toSignedInt()) else val.toUnsignedInt();
var int_bits = if (ty.isSignedInt()) @bitCast(u64, val.toSignedInt()) else val.toUnsignedInt(target);
const value: spec.LiteralContextDependentNumber = switch (backing_bits) {
1...32 => .{ .uint32 = @truncate(u32, int_bits) },
@ -388,7 +388,7 @@ pub const DeclGen = struct {
});
},
.Void => unreachable,
else => return self.todo("constant generation of type {}", .{ty}),
else => return self.todo("constant generation of type {}", .{ty.fmtDebug()}),
}
return result_id.toRef();
@ -414,7 +414,7 @@ pub const DeclGen = struct {
const backing_bits = self.backingIntBits(int_info.bits) orelse {
// TODO: Integers too big for any native type are represented as "composite integers":
// An array of largestSupportedIntBits.
return self.todo("Implement composite int type {}", .{ty});
return self.todo("Implement composite int type {}", .{ty.fmtDebug()});
};
const payload = try self.spv.arena.create(SpvType.Payload.Int);
@ -644,8 +644,10 @@ pub const DeclGen = struct {
const result_id = self.spv.allocId();
const result_type_id = try self.resolveTypeId(ty);
assert(self.air.typeOf(bin_op.lhs).eql(ty));
assert(self.air.typeOf(bin_op.rhs).eql(ty));
const target = self.getTarget();
assert(self.air.typeOf(bin_op.lhs).eql(ty, target));
assert(self.air.typeOf(bin_op.rhs).eql(ty, target));
// Binary operations are generally applicable to both scalar and vector operations
// in SPIR-V, but int and float versions of operations require different opcodes.
@ -692,7 +694,7 @@ pub const DeclGen = struct {
const result_id = self.spv.allocId();
const result_type_id = try self.resolveTypeId(Type.initTag(.bool));
const op_ty = self.air.typeOf(bin_op.lhs);
assert(op_ty.eql(self.air.typeOf(bin_op.rhs)));
assert(op_ty.eql(self.air.typeOf(bin_op.rhs), self.getTarget()));
// Comparisons are generally applicable to both scalar and vector operations in SPIR-V,
// but int and float versions of operations require different opcodes.

View File

@ -457,7 +457,7 @@ pub const File = struct {
/// May be called before or after updateDeclExports but must be called
/// after allocateDeclIndexes for any given Decl.
pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) UpdateDeclError!void {
log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty });
log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty.fmtDebug() });
assert(decl.has_tv);
switch (base.tag) {
// zig fmt: off
@ -477,7 +477,7 @@ pub const File = struct {
/// after allocateDeclIndexes for any given Decl.
pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) UpdateDeclError!void {
log.debug("updateFunc {*} ({s}), type={}", .{
func.owner_decl, func.owner_decl.name, func.owner_decl.ty,
func.owner_decl, func.owner_decl.name, func.owner_decl.ty.fmtDebug(),
});
switch (base.tag) {
// zig fmt: off

View File

@ -127,7 +127,7 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes
.error_msg = null,
.decl = decl,
.fwd_decl = fwd_decl.toManaged(module.gpa),
.typedefs = typedefs.promote(module.gpa),
.typedefs = typedefs.promoteContext(module.gpa, .{ .target = module.getTarget() }),
.typedefs_arena = self.arena.allocator(),
},
.code = code.toManaged(module.gpa),
@ -192,7 +192,7 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
.error_msg = null,
.decl = decl,
.fwd_decl = fwd_decl.toManaged(module.gpa),
.typedefs = typedefs.promote(module.gpa),
.typedefs = typedefs.promoteContext(module.gpa, .{ .target = module.getTarget() }),
.typedefs_arena = self.arena.allocator(),
},
.code = code.toManaged(module.gpa),
@ -366,7 +366,9 @@ fn flushDecl(self: *C, f: *Flush, decl: *const Module.Decl) FlushDeclError!void
try f.typedefs.ensureUnusedCapacity(gpa, @intCast(u32, decl_block.typedefs.count()));
var it = decl_block.typedefs.iterator();
while (it.next()) |new| {
const gop = f.typedefs.getOrPutAssumeCapacity(new.key_ptr.*);
const gop = f.typedefs.getOrPutAssumeCapacityContext(new.key_ptr.*, .{
.target = self.base.options.target,
});
if (!gop.found_existing) {
try f.err_typedef_buf.appendSlice(gpa, new.value_ptr.rendered);
}

View File

@ -200,7 +200,9 @@ pub fn initDeclDebugInfo(self: *Dwarf, decl: *Module.Decl) !DeclDebugBuffers {
assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len);
dbg_info_buffer.items.len += 4; // DW.AT.high_pc, DW.FORM.data4
if (fn_ret_has_bits) {
const gop = try dbg_info_type_relocs.getOrPut(gpa, fn_ret_type);
const gop = try dbg_info_type_relocs.getOrPutContext(gpa, fn_ret_type, .{
.target = self.target,
});
if (!gop.found_existing) {
gop.value_ptr.* = .{
.off = undefined,
@ -455,7 +457,9 @@ pub fn commitDeclDebugInfo(
var it: usize = 0;
while (it < dbg_info_type_relocs.count()) : (it += 1) {
const ty = dbg_info_type_relocs.keys()[it];
const value_ptr = dbg_info_type_relocs.getPtr(ty).?;
const value_ptr = dbg_info_type_relocs.getPtrContext(ty, .{
.target = self.target,
}).?;
value_ptr.off = @intCast(u32, dbg_info_buffer.items.len);
try self.addDbgInfoType(dbg_type_arena.allocator(), ty, dbg_info_buffer, dbg_info_type_relocs);
}
@ -774,7 +778,7 @@ fn addDbgInfoType(
// DW.AT.byte_size, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(@intCast(u8, ty.abiSize(target)));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty});
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(target)});
},
.Optional => {
if (ty.isPtrLikeOptional()) {
@ -785,7 +789,7 @@ fn addDbgInfoType(
// DW.AT.byte_size, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(@intCast(u8, ty.abiSize(target)));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty});
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(target)});
} else {
// Non-pointer optionals are structs: struct { .maybe = *, .val = * }
var buf = try arena.create(Type.Payload.ElemType);
@ -796,7 +800,7 @@ fn addDbgInfoType(
const abi_size = ty.abiSize(target);
try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty});
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(target)});
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(7);
dbg_info_buffer.appendAssumeCapacity(abbrev_struct_member);
@ -835,7 +839,7 @@ fn addDbgInfoType(
// DW.AT.byte_size, DW.FORM.sdata
dbg_info_buffer.appendAssumeCapacity(@sizeOf(usize) * 2);
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty});
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(target)});
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(5);
dbg_info_buffer.appendAssumeCapacity(abbrev_struct_member);
@ -882,7 +886,7 @@ fn addDbgInfoType(
const abi_size = ty.abiSize(target);
try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
const struct_name = try ty.nameAllocArena(arena);
const struct_name = try ty.nameAllocArena(arena, target);
try dbg_info_buffer.ensureUnusedCapacity(struct_name.len + 1);
dbg_info_buffer.appendSliceAssumeCapacity(struct_name);
dbg_info_buffer.appendAssumeCapacity(0);
@ -915,13 +919,15 @@ fn addDbgInfoType(
try dbg_info_buffer.append(0);
},
else => {
log.debug("TODO implement .debug_info for type '{}'", .{ty});
log.debug("TODO implement .debug_info for type '{}'", .{ty.fmtDebug()});
try dbg_info_buffer.append(abbrev_pad1);
},
}
for (relocs.items) |rel| {
const gop = try dbg_info_type_relocs.getOrPut(self.allocator, rel.ty);
const gop = try dbg_info_type_relocs.getOrPutContext(self.allocator, rel.ty, .{
.target = self.target,
});
if (!gop.found_existing) {
gop.value_ptr.* = .{
.off = undefined,

View File

@ -3874,7 +3874,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
/// Checks if the value, or any of its embedded values stores a pointer, and thus requires
/// a rebase opcode for the dynamic linker.
fn needsPointerRebase(ty: Type, val: Value) bool {
fn needsPointerRebase(ty: Type, val: Value, target: std.Target) bool {
if (ty.zigTypeTag() == .Fn) {
return false;
}
@ -3890,7 +3890,7 @@ fn needsPointerRebase(ty: Type, val: Value) bool {
const elem_ty = ty.childType();
var elem_value_buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(0, &elem_value_buf);
return needsPointerRebase(elem_ty, elem_val);
return needsPointerRebase(elem_ty, elem_val, target);
},
.Struct => {
const fields = ty.structFields().values();
@ -3898,7 +3898,7 @@ fn needsPointerRebase(ty: Type, val: Value) bool {
if (val.castTag(.aggregate)) |payload| {
const field_values = payload.data;
for (field_values) |field_val, i| {
if (needsPointerRebase(fields[i].ty, field_val)) return true;
if (needsPointerRebase(fields[i].ty, field_val, target)) return true;
} else return false;
} else return false;
},
@ -3907,18 +3907,18 @@ fn needsPointerRebase(ty: Type, val: Value) bool {
const sub_val = payload.data;
var buffer: Type.Payload.ElemType = undefined;
const sub_ty = ty.optionalChild(&buffer);
return needsPointerRebase(sub_ty, sub_val);
return needsPointerRebase(sub_ty, sub_val, target);
} else return false;
},
.Union => {
const union_obj = val.cast(Value.Payload.Union).?.data;
const active_field_ty = ty.unionFieldType(union_obj.tag);
return needsPointerRebase(active_field_ty, union_obj.val);
const active_field_ty = ty.unionFieldType(union_obj.tag, target);
return needsPointerRebase(active_field_ty, union_obj.val, target);
},
.ErrorUnion => {
if (val.castTag(.eu_payload)) |payload| {
const payload_ty = ty.errorUnionPayload();
return needsPointerRebase(payload_ty, payload.data);
return needsPointerRebase(payload_ty, payload.data, target);
} else return false;
},
else => return false,
@ -3927,7 +3927,8 @@ fn needsPointerRebase(ty: Type, val: Value) bool {
fn getMatchingSectionAtom(self: *MachO, atom: *Atom, name: []const u8, ty: Type, val: Value) !MatchingSection {
const code = atom.code.items;
const alignment = ty.abiAlignment(self.base.options.target);
const target = self.base.options.target;
const alignment = ty.abiAlignment(target);
const align_log_2 = math.log2(alignment);
const zig_ty = ty.zigTypeTag();
const mode = self.base.options.optimize_mode;
@ -3954,7 +3955,7 @@ fn getMatchingSectionAtom(self: *MachO, atom: *Atom, name: []const u8, ty: Type,
};
}
if (needsPointerRebase(ty, val)) {
if (needsPointerRebase(ty, val, target)) {
break :blk (try self.getMatchingSection(.{
.segname = makeStaticString("__DATA_CONST"),
.sectname = makeStaticString("__const"),

View File

@ -299,12 +299,12 @@ const Writer = struct {
fn writeTy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty = w.air.instructions.items(.data)[inst].ty;
try s.print("{}", .{ty});
try s.print("{}", .{ty.fmtDebug()});
}
fn writeTyOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_op = w.air.instructions.items(.data)[inst].ty_op;
try s.print("{}, ", .{w.air.getRefType(ty_op.ty)});
try s.print("{}, ", .{w.air.getRefType(ty_op.ty).fmtDebug()});
try w.writeOperand(s, inst, 0, ty_op.operand);
}
@ -313,7 +313,7 @@ const Writer = struct {
const extra = w.air.extraData(Air.Block, ty_pl.payload);
const body = w.air.extra[extra.end..][0..extra.data.body_len];
try s.print("{}, {{\n", .{w.air.getRefType(ty_pl.ty)});
try s.print("{}, {{\n", .{w.air.getRefType(ty_pl.ty).fmtDebug()});
const old_indent = w.indent;
w.indent += 2;
try w.writeBody(s, body);
@ -328,7 +328,7 @@ const Writer = struct {
const len = @intCast(usize, vector_ty.arrayLen());
const elements = @bitCast([]const Air.Inst.Ref, w.air.extra[ty_pl.payload..][0..len]);
try s.print("{}, [", .{vector_ty});
try s.print("{}, [", .{vector_ty.fmtDebug()});
for (elements) |elem, i| {
if (i != 0) try s.writeAll(", ");
try w.writeOperand(s, inst, i, elem);
@ -502,7 +502,7 @@ const Writer = struct {
fn writeConstant(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
const val = w.air.values[ty_pl.payload];
try s.print("{}, {}", .{ w.air.getRefType(ty_pl.ty), val.fmtDebug() });
try s.print("{}, {}", .{ w.air.getRefType(ty_pl.ty).fmtDebug(), val.fmtDebug() });
}
fn writeAssembly(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
@ -514,7 +514,7 @@ const Writer = struct {
var op_index: usize = 0;
const ret_ty = w.air.typeOfIndex(inst);
try s.print("{}", .{ret_ty});
try s.print("{}", .{ret_ty.fmtDebug()});
if (is_volatile) {
try s.writeAll(", volatile");

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -125,6 +125,7 @@ test {
_ = @import("behavior/src.zig");
_ = @import("behavior/struct.zig");
_ = @import("behavior/struct_contains_null_ptr_itself.zig");
_ = @import("behavior/struct_contains_slice_of_itself.zig");
_ = @import("behavior/switch.zig");
_ = @import("behavior/switch_prong_err_enum.zig");
_ = @import("behavior/switch_prong_implicit_cast.zig");
@ -179,6 +180,5 @@ test {
_ = @import("behavior/bugs/6781.zig");
_ = @import("behavior/bugs/7027.zig");
_ = @import("behavior/select.zig");
_ = @import("behavior/struct_contains_slice_of_itself.zig");
}
}