stage2: compile error fixes for AIR memory layout branch

Now the branch is compiling again, provided that one uses
`-Dskip-non-native`, but many code paths are disabled. The code paths
can now be re-enabled one at a time and updated to conform to the new
AIR memory layout.
This commit is contained in:
Andrew Kelley 2021-07-13 15:45:08 -07:00
parent 0f38f68696
commit c09b973ec2
9 changed files with 842 additions and 631 deletions

View File

@ -332,12 +332,12 @@ pub const Block = struct {
body_len: u32,
};
/// Trailing is a list of `Ref` for every `args_len`.
/// Trailing is a list of `Inst.Ref` for every `args_len`.
pub const Call = struct {
args_len: u32,
};
/// This data is stored inside extra, with two sets of trailing `Ref`:
/// This data is stored inside extra, with two sets of trailing `Inst.Ref`:
/// * 0. the then body, according to `then_body_len`.
/// * 1. the else body, according to `else_body_len`.
pub const CondBr = struct {
@ -355,19 +355,19 @@ pub const SwitchBr = struct {
/// Trailing:
/// * instruction index for each `body_len`.
pub const Case = struct {
item: Ref,
item: Inst.Ref,
body_len: u32,
};
};
pub const StructField = struct {
struct_ptr: Ref,
struct_ptr: Inst.Ref,
field_index: u32,
};
/// Trailing:
/// 0. `Ref` for every outputs_len
/// 1. `Ref` for every inputs_len
/// 0. `Inst.Ref` for every outputs_len
/// 1. `Inst.Ref` for every inputs_len
pub const Asm = struct {
/// Index to the corresponding ZIR instruction.
/// `asm_source`, `outputs_len`, `inputs_len`, `clobbers_len`, `is_volatile`, and
@ -381,6 +381,24 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index {
return air.extra[body_index..][0..body_len];
}
pub fn getType(air: Air, inst: Air.Inst.Index) Type {
_ = air;
_ = inst;
@panic("TODO Air getType");
}
pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type {
var i: usize = @enumToInt(ref);
if (i < Air.Inst.Ref.typed_value_map.len) {
return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable;
}
i -= Air.Inst.Ref.typed_value_map.len;
const air_tags = air.instructions.items(.tag);
const air_datas = air.instructions.items(.data);
assert(air_tags[i] == .const_ty);
return air_datas[i].ty;
}
/// Returns the requested data, as well as the new index which is at the start of the
/// trailers for the object.
pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end: usize } {

View File

@ -2023,7 +2023,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
defer air.deinit(gpa);
log.debug("analyze liveness of {s}", .{decl.name});
var liveness = try Liveness.analyze(gpa, air);
var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir);
defer liveness.deinit(gpa);
if (std.builtin.mode == .Debug and self.verbose_air) {

View File

@ -7,11 +7,13 @@
//! * Switch Branches
const Liveness = @This();
const std = @import("std");
const Air = @import("Air.zig");
const trace = @import("tracy.zig").trace;
const log = std.log.scoped(.liveness);
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Air = @import("Air.zig");
const Zir = @import("Zir.zig");
const Log2Int = std.math.Log2Int;
/// This array is split into sets of 4 bits per AIR instruction.
/// The MSB (0bX000) is whether the instruction is unreferenced.
@ -44,7 +46,7 @@ pub const SwitchBr = struct {
else_death_count: u32,
};
pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness {
pub fn analyze(gpa: *Allocator, air: Air, zir: Zir) Allocator.Error!Liveness {
const tracy = trace(@src());
defer tracy.end();
@ -58,6 +60,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness {
),
.extra = .{},
.special = .{},
.zir = &zir,
};
errdefer gpa.free(a.tomb_bits);
errdefer a.special.deinit(gpa);
@ -74,23 +77,32 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness {
};
}
pub fn getTombBits(l: Liveness, inst: Air.Inst.Index) Bpi {
const usize_index = (inst * bpi) / @bitSizeOf(usize);
return @truncate(Bpi, l.tomb_bits[usize_index] >>
@intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi));
}
pub fn isUnused(l: Liveness, inst: Air.Inst.Index) bool {
const usize_index = (inst * bpi) / @bitSizeOf(usize);
const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1));
const mask = @as(usize, 1) <<
@intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1));
return (l.tomb_bits[usize_index] & mask) != 0;
}
pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool {
assert(operand < bpi - 1);
const usize_index = (inst * bpi) / @bitSizeOf(usize);
const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand);
const mask = @as(usize, 1) <<
@intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand);
return (l.tomb_bits[usize_index] & mask) != 0;
}
pub fn clearOperandDeath(l: *Liveness, inst: Air.Inst.Index, operand: OperandInt) void {
assert(operand < bpi - 1);
const usize_index = (inst * bpi) / @bitSizeOf(usize);
const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand);
const mask = @as(usize, 1) <<
@intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand);
l.tomb_bits[usize_index] |= mask;
}
@ -113,10 +125,12 @@ const Analysis = struct {
tomb_bits: []usize,
special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32),
extra: std.ArrayListUnmanaged(u32),
zir: *const Zir,
fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void {
const usize_index = (inst * bpi) / @bitSizeOf(usize);
a.tomb_bits[usize_index] |= tomb_bits << (inst % (@bitSizeOf(usize) / bpi)) * bpi;
a.tomb_bits[usize_index] |= @as(usize, tomb_bits) <<
@intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi);
}
fn addExtra(a: *Analysis, extra: anytype) Allocator.Error!u32 {
@ -203,9 +217,11 @@ fn analyzeInst(
return trackOperands(a, new_set, inst, main_tomb, .{ o.lhs, o.rhs, .none });
},
.arg,
.alloc,
.br,
.constant,
.const_ty,
.breakpoint,
.dbg_stmt,
.varptr,
@ -255,15 +271,30 @@ fn analyzeInst(
if (args.len <= bpi - 2) {
var buf: [bpi - 1]Air.Inst.Ref = undefined;
buf[0] = callee;
std.mem.copy(&buf, buf[1..], args);
std.mem.copy(Air.Inst.Ref, buf[1..], @bitCast([]const Air.Inst.Ref, args));
return trackOperands(a, new_set, inst, main_tomb, buf);
}
@panic("TODO: liveness analysis for function with many args");
@panic("TODO: liveness analysis for function with greater than 2 args");
},
.struct_field_ptr => {
const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ extra.struct_ptr, .none, .none });
},
.assembly => {
const extra = a.air.extraData(Air.Asm, inst_datas[inst].ty_pl.payload);
const extended = a.zir.instructions.items(.data)[extra.data.zir_index].extended;
const outputs_len = @truncate(u5, extended.small);
const inputs_len = @truncate(u5, extended.small >> 5);
const outputs = a.air.extra[extra.end..][0..outputs_len];
const inputs = a.air.extra[extra.end + outputs.len ..][0..inputs_len];
if (outputs.len + inputs.len <= bpi - 1) {
var buf: [bpi - 1]Air.Inst.Ref = undefined;
std.mem.copy(Air.Inst.Ref, &buf, @bitCast([]const Air.Inst.Ref, outputs));
std.mem.copy(Air.Inst.Ref, buf[outputs.len..], @bitCast([]const Air.Inst.Ref, inputs));
return trackOperands(a, new_set, inst, main_tomb, buf);
}
@panic("TODO: liveness analysis for asm with greater than 3 args");
},
.block => {
const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload);
const body = a.air.extra[extra.end..][0..extra.data.body_len];
@ -287,8 +318,8 @@ fn analyzeInst(
const then_body = a.air.extra[extra.end..][0..extra.data.then_body_len];
const else_body = a.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
var then_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa);
defer then_table.deinit();
var then_table: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{};
defer then_table.deinit(gpa);
try analyzeWithContext(a, &then_table, then_body);
// Reset the table back to its state from before the branch.
@ -299,8 +330,8 @@ fn analyzeInst(
}
}
var else_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa);
defer else_table.deinit();
var else_table: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{};
defer else_table.deinit(gpa);
try analyzeWithContext(a, &else_table, else_body);
var then_entry_deaths = std.ArrayList(Air.Inst.Index).init(gpa);
@ -331,7 +362,7 @@ fn analyzeInst(
}
// Now we have to correctly populate new_set.
if (new_set) |ns| {
try ns.ensureCapacity(@intCast(u32, ns.count() + then_table.count() + else_table.count()));
try ns.ensureCapacity(gpa, @intCast(u32, ns.count() + then_table.count() + else_table.count()));
var it = then_table.keyIterator();
while (it.next()) |key| {
_ = ns.putAssumeCapacity(key.*, {});
@ -344,7 +375,7 @@ fn analyzeInst(
const then_death_count = @intCast(u32, then_entry_deaths.items.len);
const else_death_count = @intCast(u32, else_entry_deaths.items.len);
try a.extra.ensureUnusedCapacity(std.meta.fields(@TypeOf(CondBr)).len +
try a.extra.ensureUnusedCapacity(gpa, std.meta.fields(Air.CondBr).len +
then_death_count + else_death_count);
const extra_index = a.addExtraAssumeCapacity(CondBr{
.then_death_count = then_death_count,
@ -352,7 +383,7 @@ fn analyzeInst(
});
a.extra.appendSliceAssumeCapacity(then_entry_deaths.items);
a.extra.appendSliceAssumeCapacity(else_entry_deaths.items);
try a.special.put(inst, extra_index);
try a.special.put(gpa, inst, extra_index);
// Continue on with the instruction analysis. The following code will find the condition
// instruction, and the deaths flag for the CondBr instruction will indicate whether the
@ -438,12 +469,12 @@ fn analyzeInst(
});
for (case_deaths[0 .. case_deaths.len - 1]) |*cd| {
const case_death_count = @intCast(u32, cd.items.len);
try a.extra.ensureUnusedCapacity(1 + case_death_count + else_death_count);
try a.extra.ensureUnusedCapacity(gpa, 1 + case_death_count + else_death_count);
a.extra.appendAssumeCapacity(case_death_count);
a.extra.appendSliceAssumeCapacity(cd.items);
}
a.extra.appendSliceAssumeCapacity(case_deaths[case_deaths.len - 1].items);
try a.special.put(inst, extra_index);
try a.special.put(gpa, inst, extra_index);
return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none });
},
@ -452,7 +483,7 @@ fn analyzeInst(
fn trackOperands(
a: *Analysis,
new_set: ?*std.AutoHashMap(Air.Inst.Index, void),
new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void),
inst: Air.Inst.Index,
main_tomb: bool,
operands: [bpi - 1]Air.Inst.Ref,
@ -468,12 +499,12 @@ fn trackOperands(
tomb_bits <<= 1;
const op_int = @enumToInt(operands[i]);
if (op_int < Air.Inst.Ref.typed_value_map.len) continue;
const operand: Air.Inst.Index = op_int - Air.Inst.Ref.typed_value_map.len;
const operand: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len);
const prev = try table.fetchPut(gpa, operand, {});
if (prev == null) {
// Death.
tomb_bits |= 1;
if (new_set) |ns| try ns.putNoClobber(operand, {});
if (new_set) |ns| try ns.putNoClobber(gpa, operand, {});
}
}
a.storeTombBits(inst, tomb_bits);

View File

@ -1225,6 +1225,30 @@ pub const Scope = struct {
pub fn getFileScope(block: *Block) *Scope.File {
return block.src_decl.namespace.file_scope;
}
pub fn addTyOp(
block: *Block,
tag: Air.Inst.Tag,
ty: Type,
operand: Air.Inst.Ref,
) error{OutOfMemory}!Air.Inst.Ref {
const sema = block.sema;
const gpa = sema.gpa;
try sema.air_instructions.ensureUnusedCapacity(gpa, 1);
try block.instructions.ensureUnusedCapacity(gpa, 1);
const inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
sema.air_instructions.appendAssumeCapacity(.{
.tag = tag,
.data = .{ .ty_op = .{
.ty = try sema.addType(ty),
.operand = operand,
} },
});
block.instructions.appendAssumeCapacity(inst);
return Sema.indexToRef(inst);
}
};
};
@ -3408,7 +3432,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air {
defer decl.value_arena.?.* = arena.state;
const fn_ty = decl.ty;
const param_inst_list = try gpa.alloc(Air.Inst.Index, fn_ty.fnParamLen());
const param_inst_list = try gpa.alloc(Air.Inst.Ref, fn_ty.fnParamLen());
defer gpa.free(param_inst_list);
var sema: Sema = .{
@ -3440,10 +3464,13 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air {
defer inner_block.instructions.deinit(gpa);
// AIR requires the arg parameters to be the first N instructions.
try inner_block.instructions.ensureTotalCapacity(gpa, param_inst_list.len);
for (param_inst_list) |*param_inst, param_index| {
const param_type = fn_ty.fnParamType(param_index);
const ty_ref = try sema.addType(param_type);
param_inst.* = @intCast(u32, sema.air_instructions.len);
const arg_index = @intCast(u32, sema.air_instructions.len);
inner_block.instructions.appendAssumeCapacity(arg_index);
param_inst.* = Sema.indexToRef(arg_index);
try sema.air_instructions.append(gpa, .{
.tag = .arg,
.data = .{
@ -3454,7 +3481,6 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air {
},
});
}
try inner_block.instructions.appendSlice(gpa, param_inst_list);
func.state = .in_progress;
log.debug("set {s} to in_progress", .{decl.name});
@ -4043,13 +4069,11 @@ pub fn floatMul(
}
pub fn simplePtrType(
mod: *Module,
arena: *Allocator,
elem_ty: Type,
mutable: bool,
size: std.builtin.TypeInfo.Pointer.Size,
) Allocator.Error!Type {
_ = mod;
if (!mutable and size == .Slice and elem_ty.eql(Type.initTag(.u8))) {
return Type.initTag(.const_slice_u8);
}

File diff suppressed because it is too large Load Diff

View File

@ -494,7 +494,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
defer function.blocks.deinit(bin_file.allocator);
defer function.exitlude_jump_relocs.deinit(bin_file.allocator);
var call_info = function.resolveCallingConventionValues(src_loc.lazy, fn_type) catch |err| switch (err) {
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
else => |e| return e,
};
@ -537,7 +537,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
self.code.items.len += 4;
try self.dbgSetPrologueEnd();
try self.genBody(self.mod_fn.body);
try self.genBody(self.air.getMainBody());
const stack_end = self.max_end_stack;
if (stack_end > math.maxInt(i32))
@ -578,7 +578,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
});
} else {
try self.dbgSetPrologueEnd();
try self.genBody(self.mod_fn.body);
try self.genBody(self.air.getMainBody());
try self.dbgSetEpilogueBegin();
}
},
@ -758,11 +758,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
// TODO inline this logic into every instruction
var i: ir.Inst.DeathsBitIndex = 0;
while (inst.getOperand(i)) |operand| : (i += 1) {
if (inst.operandDies(i))
self.processDeath(operand);
}
@panic("TODO rework AIR memory layout codegen for processing deaths");
//var i: ir.Inst.DeathsBitIndex = 0;
//while (inst.getOperand(i)) |operand| : (i += 1) {
// if (inst.operandDies(i))
// self.processDeath(operand);
//}
}
}
@ -858,74 +859,76 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const air_tags = self.air.instructions.items(.tag);
switch (air_tags[inst]) {
// zig fmt: off
.add => return self.genAdd(inst.castTag(.add).?),
.addwrap => return self.genAddWrap(inst.castTag(.addwrap).?),
.sub => return self.genSub(inst.castTag(.sub).?),
.subwrap => return self.genSubWrap(inst.castTag(.subwrap).?),
.mul => return self.genMul(inst.castTag(.mul).?),
.mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?),
.div => return self.genDiv(inst.castTag(.div).?),
//.add => return self.genAdd(inst.castTag(.add).?),
//.addwrap => return self.genAddWrap(inst.castTag(.addwrap).?),
//.sub => return self.genSub(inst.castTag(.sub).?),
//.subwrap => return self.genSubWrap(inst.castTag(.subwrap).?),
//.mul => return self.genMul(inst.castTag(.mul).?),
//.mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?),
//.div => return self.genDiv(inst.castTag(.div).?),
.cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt),
.cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte),
.cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq),
.cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte),
.cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt),
.cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq),
//.cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt),
//.cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte),
//.cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq),
//.cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte),
//.cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt),
//.cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq),
.bool_and => return self.genBoolOp(inst.castTag(.bool_and).?),
.bool_or => return self.genBoolOp(inst.castTag(.bool_or).?),
.bit_and => return self.genBitAnd(inst.castTag(.bit_and).?),
.bit_or => return self.genBitOr(inst.castTag(.bit_or).?),
.xor => return self.genXor(inst.castTag(.xor).?),
//.bool_and => return self.genBoolOp(inst.castTag(.bool_and).?),
//.bool_or => return self.genBoolOp(inst.castTag(.bool_or).?),
//.bit_and => return self.genBitAnd(inst.castTag(.bit_and).?),
//.bit_or => return self.genBitOr(inst.castTag(.bit_or).?),
//.xor => return self.genXor(inst.castTag(.xor).?),
.alloc => return self.genAlloc(inst.castTag(.alloc).?),
.arg => return self.genArg(inst.castTag(.arg).?),
.assembly => return self.genAsm(inst.castTag(.assembly).?),
.bitcast => return self.genBitCast(inst.castTag(.bitcast).?),
.block => return self.genBlock(inst.castTag(.block).?),
.br => return self.genBr(inst.castTag(.br).?),
.br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?),
.breakpoint => return self.genBreakpoint(inst.src),
.call => return self.genCall(inst.castTag(.call).?),
.cond_br => return self.genCondBr(inst.castTag(.condbr).?),
.dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?),
.floatcast => return self.genFloatCast(inst.castTag(.floatcast).?),
.intcast => return self.genIntCast(inst.castTag(.intcast).?),
.is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?),
.is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?),
.is_null => return self.genIsNull(inst.castTag(.is_null).?),
.is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?),
.is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?),
.is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?),
.is_err => return self.genIsErr(inst.castTag(.is_err).?),
.is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?),
.load => return self.genLoad(inst.castTag(.load).?),
.loop => return self.genLoop(inst.castTag(.loop).?),
.not => return self.genNot(inst.castTag(.not).?),
.ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?),
.ref => return self.genRef(inst.castTag(.ref).?),
.ret => return self.genRet(inst.castTag(.ret).?),
.store => return self.genStore(inst.castTag(.store).?),
.struct_field_ptr=> return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?),
.switchbr => return self.genSwitch(inst.castTag(.switchbr).?),
.varptr => return self.genVarPtr(inst.castTag(.varptr).?),
//.alloc => return self.genAlloc(inst.castTag(.alloc).?),
//.arg => return self.genArg(inst.castTag(.arg).?),
//.assembly => return self.genAsm(inst.castTag(.assembly).?),
//.bitcast => return self.genBitCast(inst.castTag(.bitcast).?),
//.block => return self.genBlock(inst.castTag(.block).?),
//.br => return self.genBr(inst.castTag(.br).?),
//.br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?),
//.breakpoint => return self.genBreakpoint(inst.src),
//.call => return self.genCall(inst.castTag(.call).?),
//.cond_br => return self.genCondBr(inst.castTag(.condbr).?),
//.dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?),
//.floatcast => return self.genFloatCast(inst.castTag(.floatcast).?),
//.intcast => return self.genIntCast(inst.castTag(.intcast).?),
//.is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?),
//.is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?),
//.is_null => return self.genIsNull(inst.castTag(.is_null).?),
//.is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?),
//.is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?),
//.is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?),
//.is_err => return self.genIsErr(inst.castTag(.is_err).?),
//.is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?),
//.load => return self.genLoad(inst.castTag(.load).?),
//.loop => return self.genLoop(inst.castTag(.loop).?),
//.not => return self.genNot(inst.castTag(.not).?),
//.ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?),
//.ref => return self.genRef(inst.castTag(.ref).?),
//.ret => return self.genRet(inst.castTag(.ret).?),
//.store => return self.genStore(inst.castTag(.store).?),
//.struct_field_ptr=> return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?),
//.switch_br => return self.genSwitch(inst.castTag(.switchbr).?),
//.varptr => return self.genVarPtr(inst.castTag(.varptr).?),
.constant => unreachable, // excluded from function bodies
.unreach => return MCValue{ .unreach = {} },
//.constant => unreachable, // excluded from function bodies
//.unreach => return MCValue{ .unreach = {} },
.optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?),
.optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?),
.unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?),
.unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?),
.unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?),
.unwrap_errunion_payload_ptr=> return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?),
//.optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?),
//.optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?),
//.unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?),
//.unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?),
//.unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?),
//.unwrap_errunion_payload_ptr=> return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?),
.wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?),
.wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?),
.wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?),
//.wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?),
//.wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?),
//.wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?),
// zig fmt: on
else => @panic("TODO finish air memory layout branch, more codegen.zig instructions"),
}
}
@ -4785,14 +4788,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
};
}
fn fail(self: *Self, src: LazySrcLoc, comptime format: []const u8, args: anytype) InnerError {
fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError {
@setCold(true);
assert(self.err_msg == null);
const src_loc = if (src != .unneeded)
src.toSrcLocWithDecl(self.mod_fn.owner_decl)
else
self.src_loc;
self.err_msg = try ErrorMsg.create(self.bin_file.allocator, src_loc, format, args);
self.err_msg = try ErrorMsg.create(self.bin_file.allocator, self.src_loc, format, args);
return error.CodegenFail;
}

View File

@ -25,7 +25,7 @@ pub const CValue = union(enum) {
/// Index into local_names, but take the address.
local_ref: usize,
/// A constant instruction, to be rendered inline.
constant: *Inst,
constant: Air.Inst.Index,
/// Index into the parameters
arg: usize,
/// By-value
@ -99,7 +99,7 @@ pub const Object = struct {
gpa: *mem.Allocator,
code: std.ArrayList(u8),
value_map: CValueMap,
blocks: std.AutoHashMapUnmanaged(*ir.Inst.Block, BlockData) = .{},
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{},
next_arg_index: usize = 0,
next_local_index: usize = 0,
next_block_index: usize = 0,
@ -133,7 +133,12 @@ pub const Object = struct {
.none => unreachable,
.local => |i| return w.print("t{d}", .{i}),
.local_ref => |i| return w.print("&t{d}", .{i}),
.constant => |inst| return o.dg.renderValue(w, inst.ty, inst.value().?),
.constant => |inst| {
const ty_pl = o.air.instructions.items(.data)[inst].ty_pl;
const ty = o.air.getRefType(ty_pl.ty);
const val = o.air.values[ty_pl.payload];
return o.dg.renderValue(w, ty, val);
},
.arg => |i| return w.print("a{d}", .{i}),
.decl => |decl| return w.writeAll(mem.span(decl.name)),
.decl_ref => |decl| return w.print("&{s}", .{decl.name}),
@ -213,8 +218,9 @@ pub const DeclGen = struct {
error_msg: ?*Module.ErrorMsg,
typedefs: TypedefMap,
fn fail(dg: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
@setCold(true);
const src: LazySrcLoc = .{ .node_offset = 0 };
const src_loc = src.toSrcLocWithDecl(dg.decl);
dg.error_msg = try Module.ErrorMsg.create(dg.module.gpa, src_loc, format, args);
return error.AnalysisFail;
@ -230,7 +236,7 @@ pub const DeclGen = struct {
// This should lower to 0xaa bytes in safe modes, and for unsafe modes should
// lower to leaving variables uninitialized (that might need to be implemented
// outside of this function).
return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement renderValue undef", .{});
return dg.fail("TODO: C backend: implement renderValue undef", .{});
}
switch (t.zigTypeTag()) {
.Int => {
@ -440,7 +446,7 @@ pub const DeclGen = struct {
},
else => unreachable,
},
else => |e| return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement value {s}", .{
else => |e| return dg.fail("TODO: C backend: implement value {s}", .{
@tagName(e),
}),
}
@ -519,14 +525,14 @@ pub const DeclGen = struct {
break;
}
} else {
return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement integer types larger than 128 bits", .{});
return dg.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
}
},
else => unreachable,
}
},
.Float => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Float", .{}),
.Float => return dg.fail("TODO: C backend: implement type Float", .{}),
.Pointer => {
if (t.isSlice()) {
@ -681,7 +687,7 @@ pub const DeclGen = struct {
try dg.renderType(w, int_tag_ty);
},
.Union => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Union", .{}),
.Union => return dg.fail("TODO: C backend: implement type Union", .{}),
.Fn => {
try dg.renderType(w, t.fnReturnType());
try w.writeAll(" (*)(");
@ -704,10 +710,10 @@ pub const DeclGen = struct {
}
try w.writeByte(')');
},
.Opaque => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Opaque", .{}),
.Frame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Frame", .{}),
.AnyFrame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type AnyFrame", .{}),
.Vector => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Vector", .{}),
.Opaque => return dg.fail("TODO: C backend: implement type Opaque", .{}),
.Frame => return dg.fail("TODO: C backend: implement type Frame", .{}),
.AnyFrame => return dg.fail("TODO: C backend: implement type AnyFrame", .{}),
.Vector => return dg.fail("TODO: C backend: implement type Vector", .{}),
.Null,
.Undefined,
@ -760,7 +766,8 @@ pub fn genDecl(o: *Object) !void {
try o.dg.renderFunctionSignature(o.writer(), is_global);
try o.writer().writeByte(' ');
try genBody(o, func.body);
const main_body = o.air.getMainBody();
try genBody(o, main_body);
try o.indent_writer.insertNewline();
return;
@ -833,9 +840,9 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void {
}
}
pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!void {
fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void {
const writer = o.writer();
if (body.instructions.len == 0) {
if (body.len == 0) {
try writer.writeAll("{}");
return;
}
@ -843,82 +850,85 @@ pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!voi
try writer.writeAll("{\n");
o.indent_writer.pushIndent();
for (body.instructions) |inst| {
const result_value = switch (inst.tag) {
// TODO use a different strategy for add that communicates to the optimizer
// that wrapping is UB.
.add => try genBinOp(o, inst.castTag(.add).?, " + "),
.addwrap => try genWrapOp(o, inst.castTag(.addwrap).?, " + ", "addw_"),
// TODO use a different strategy for sub that communicates to the optimizer
// that wrapping is UB.
.sub => try genBinOp(o, inst.castTag(.sub).?, " - "),
.subwrap => try genWrapOp(o, inst.castTag(.subwrap).?, " - ", "subw_"),
// TODO use a different strategy for mul that communicates to the optimizer
// that wrapping is UB.
.mul => try genBinOp(o, inst.castTag(.sub).?, " * "),
.mulwrap => try genWrapOp(o, inst.castTag(.mulwrap).?, " * ", "mulw_"),
// TODO use a different strategy for div that communicates to the optimizer
// that wrapping is UB.
.div => try genBinOp(o, inst.castTag(.div).?, " / "),
const air_tags = o.air.instructions.items(.tag);
.constant => unreachable, // excluded from function bodies
.alloc => try genAlloc(o, inst.castTag(.alloc).?),
.arg => genArg(o),
.assembly => try genAsm(o, inst.castTag(.assembly).?),
.block => try genBlock(o, inst.castTag(.block).?),
.bitcast => try genBitcast(o, inst.castTag(.bitcast).?),
.breakpoint => try genBreakpoint(o, inst.castTag(.breakpoint).?),
.call => try genCall(o, inst.castTag(.call).?),
.cmp_eq => try genBinOp(o, inst.castTag(.cmp_eq).?, " == "),
.cmp_gt => try genBinOp(o, inst.castTag(.cmp_gt).?, " > "),
.cmp_gte => try genBinOp(o, inst.castTag(.cmp_gte).?, " >= "),
.cmp_lt => try genBinOp(o, inst.castTag(.cmp_lt).?, " < "),
.cmp_lte => try genBinOp(o, inst.castTag(.cmp_lte).?, " <= "),
.cmp_neq => try genBinOp(o, inst.castTag(.cmp_neq).?, " != "),
.dbg_stmt => try genDbgStmt(o, inst.castTag(.dbg_stmt).?),
.intcast => try genIntCast(o, inst.castTag(.intcast).?),
.load => try genLoad(o, inst.castTag(.load).?),
.ret => try genRet(o, inst.castTag(.ret).?),
.retvoid => try genRetVoid(o),
.store => try genStore(o, inst.castTag(.store).?),
.unreach => try genUnreach(o, inst.castTag(.unreach).?),
.loop => try genLoop(o, inst.castTag(.loop).?),
.condbr => try genCondBr(o, inst.castTag(.condbr).?),
.br => try genBr(o, inst.castTag(.br).?),
.br_void => try genBrVoid(o, inst.castTag(.br_void).?.block),
.switchbr => try genSwitchBr(o, inst.castTag(.switchbr).?),
// bool_and and bool_or are non-short-circuit operations
.bool_and => try genBinOp(o, inst.castTag(.bool_and).?, " & "),
.bool_or => try genBinOp(o, inst.castTag(.bool_or).?, " | "),
.bit_and => try genBinOp(o, inst.castTag(.bit_and).?, " & "),
.bit_or => try genBinOp(o, inst.castTag(.bit_or).?, " | "),
.xor => try genBinOp(o, inst.castTag(.xor).?, " ^ "),
.not => try genUnOp(o, inst.castTag(.not).?, "!"),
.is_null => try genIsNull(o, inst.castTag(.is_null).?),
.is_non_null => try genIsNull(o, inst.castTag(.is_non_null).?),
.is_null_ptr => try genIsNull(o, inst.castTag(.is_null_ptr).?),
.is_non_null_ptr => try genIsNull(o, inst.castTag(.is_non_null_ptr).?),
.wrap_optional => try genWrapOptional(o, inst.castTag(.wrap_optional).?),
.optional_payload => try genOptionalPayload(o, inst.castTag(.optional_payload).?),
.optional_payload_ptr => try genOptionalPayload(o, inst.castTag(.optional_payload_ptr).?),
.ref => try genRef(o, inst.castTag(.ref).?),
.struct_field_ptr => try genStructFieldPtr(o, inst.castTag(.struct_field_ptr).?),
for (body) |inst| {
const result_value = switch (air_tags[inst]) {
//// TODO use a different strategy for add that communicates to the optimizer
//// that wrapping is UB.
//.add => try genBinOp(o, inst.castTag(.add).?, " + "),
//.addwrap => try genWrapOp(o, inst.castTag(.addwrap).?, " + ", "addw_"),
//// TODO use a different strategy for sub that communicates to the optimizer
//// that wrapping is UB.
//.sub => try genBinOp(o, inst.castTag(.sub).?, " - "),
//.subwrap => try genWrapOp(o, inst.castTag(.subwrap).?, " - ", "subw_"),
//// TODO use a different strategy for mul that communicates to the optimizer
//// that wrapping is UB.
//.mul => try genBinOp(o, inst.castTag(.sub).?, " * "),
//.mulwrap => try genWrapOp(o, inst.castTag(.mulwrap).?, " * ", "mulw_"),
//// TODO use a different strategy for div that communicates to the optimizer
//// that wrapping is UB.
//.div => try genBinOp(o, inst.castTag(.div).?, " / "),
.is_err => try genIsErr(o, inst.castTag(.is_err).?, "", ".", "!="),
.is_non_err => try genIsErr(o, inst.castTag(.is_non_err).?, "", ".", "=="),
.is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?, "*", "->", "!="),
.is_non_err_ptr => try genIsErr(o, inst.castTag(.is_non_err_ptr).?, "*", "->", "=="),
//.constant => unreachable, // excluded from function bodies
//.alloc => try genAlloc(o, inst.castTag(.alloc).?),
//.arg => genArg(o),
//.assembly => try genAsm(o, inst.castTag(.assembly).?),
//.block => try genBlock(o, inst.castTag(.block).?),
//.bitcast => try genBitcast(o, inst.castTag(.bitcast).?),
//.breakpoint => try genBreakpoint(o, inst.castTag(.breakpoint).?),
//.call => try genCall(o, inst.castTag(.call).?),
//.cmp_eq => try genBinOp(o, inst.castTag(.cmp_eq).?, " == "),
//.cmp_gt => try genBinOp(o, inst.castTag(.cmp_gt).?, " > "),
//.cmp_gte => try genBinOp(o, inst.castTag(.cmp_gte).?, " >= "),
//.cmp_lt => try genBinOp(o, inst.castTag(.cmp_lt).?, " < "),
//.cmp_lte => try genBinOp(o, inst.castTag(.cmp_lte).?, " <= "),
//.cmp_neq => try genBinOp(o, inst.castTag(.cmp_neq).?, " != "),
//.dbg_stmt => try genDbgStmt(o, inst.castTag(.dbg_stmt).?),
//.intcast => try genIntCast(o, inst.castTag(.intcast).?),
//.load => try genLoad(o, inst.castTag(.load).?),
//.ret => try genRet(o, inst.castTag(.ret).?),
//.retvoid => try genRetVoid(o),
//.store => try genStore(o, inst.castTag(.store).?),
//.unreach => try genUnreach(o, inst.castTag(.unreach).?),
//.loop => try genLoop(o, inst.castTag(.loop).?),
//.condbr => try genCondBr(o, inst.castTag(.condbr).?),
//.br => try genBr(o, inst.castTag(.br).?),
//.br_void => try genBrVoid(o, inst.castTag(.br_void).?.block),
//.switchbr => try genSwitchBr(o, inst.castTag(.switchbr).?),
//// bool_and and bool_or are non-short-circuit operations
//.bool_and => try genBinOp(o, inst.castTag(.bool_and).?, " & "),
//.bool_or => try genBinOp(o, inst.castTag(.bool_or).?, " | "),
//.bit_and => try genBinOp(o, inst.castTag(.bit_and).?, " & "),
//.bit_or => try genBinOp(o, inst.castTag(.bit_or).?, " | "),
//.xor => try genBinOp(o, inst.castTag(.xor).?, " ^ "),
//.not => try genUnOp(o, inst.castTag(.not).?, "!"),
//.is_null => try genIsNull(o, inst.castTag(.is_null).?),
//.is_non_null => try genIsNull(o, inst.castTag(.is_non_null).?),
//.is_null_ptr => try genIsNull(o, inst.castTag(.is_null_ptr).?),
//.is_non_null_ptr => try genIsNull(o, inst.castTag(.is_non_null_ptr).?),
//.wrap_optional => try genWrapOptional(o, inst.castTag(.wrap_optional).?),
//.optional_payload => try genOptionalPayload(o, inst.castTag(.optional_payload).?),
//.optional_payload_ptr => try genOptionalPayload(o, inst.castTag(.optional_payload_ptr).?),
//.ref => try genRef(o, inst.castTag(.ref).?),
//.struct_field_ptr => try genStructFieldPtr(o, inst.castTag(.struct_field_ptr).?),
.unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?),
.unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?),
.unwrap_errunion_payload_ptr => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload_ptr).?),
.unwrap_errunion_err_ptr => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err_ptr).?),
.wrap_errunion_payload => try genWrapErrUnionPay(o, inst.castTag(.wrap_errunion_payload).?),
.wrap_errunion_err => try genWrapErrUnionErr(o, inst.castTag(.wrap_errunion_err).?),
.br_block_flat => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for br_block_flat", .{}),
.ptrtoint => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for ptrtoint", .{}),
.varptr => try genVarPtr(o, inst.castTag(.varptr).?),
.floatcast => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for floatcast", .{}),
//.is_err => try genIsErr(o, inst.castTag(.is_err).?, "", ".", "!="),
//.is_non_err => try genIsErr(o, inst.castTag(.is_non_err).?, "", ".", "=="),
//.is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?, "*", "->", "!="),
//.is_non_err_ptr => try genIsErr(o, inst.castTag(.is_non_err_ptr).?, "*", "->", "=="),
//.unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?),
//.unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?),
//.unwrap_errunion_payload_ptr => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload_ptr).?),
//.unwrap_errunion_err_ptr => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err_ptr).?),
//.wrap_errunion_payload => try genWrapErrUnionPay(o, inst.castTag(.wrap_errunion_payload).?),
//.wrap_errunion_err => try genWrapErrUnionErr(o, inst.castTag(.wrap_errunion_err).?),
//.br_block_flat => return o.dg.fail("TODO: C backend: implement codegen for br_block_flat", .{}),
//.ptrtoint => return o.dg.fail("TODO: C backend: implement codegen for ptrtoint", .{}),
//.varptr => try genVarPtr(o, inst.castTag(.varptr).?),
//.floatcast => return o.dg.fail("TODO: C backend: implement codegen for floatcast", .{}),
else => return o.dg.fail("TODO: C backend: rework AIR memory layout", .{}),
};
switch (result_value) {
.none => {},
@ -1060,7 +1070,7 @@ fn genWrapOp(o: *Object, inst: *Inst.BinOp, str_op: [*:0]const u8, fn_op: [*:0]c
}
if (bits > 64) {
return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: genWrapOp for large integers", .{});
return o.dg.fail("TODO: C backend: genWrapOp for large integers", .{});
}
var min_buf: [80]u8 = undefined;
@ -1227,7 +1237,7 @@ fn genCall(o: *Object, inst: *Inst.Call) !CValue {
try writer.writeAll(");\n");
return result_local;
} else {
return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement function pointers", .{});
return o.dg.fail("TODO: C backend: implement function pointers", .{});
}
}
@ -1390,13 +1400,13 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue {
try o.writeCValue(writer, arg_c_value);
try writer.writeAll(";\n");
} else {
return o.dg.fail(.{ .node_offset = 0 }, "TODO non-explicit inline asm regs", .{});
return o.dg.fail("TODO non-explicit inline asm regs", .{});
}
}
const volatile_string: []const u8 = if (as.is_volatile) "volatile " else "";
try writer.print("__asm {s}(\"{s}\"", .{ volatile_string, as.asm_source });
if (as.output_constraint) |_| {
return o.dg.fail(.{ .node_offset = 0 }, "TODO: CBE inline asm output", .{});
return o.dg.fail("TODO: CBE inline asm output", .{});
}
if (as.inputs.len > 0) {
if (as.output_constraint == null) {
@ -1421,7 +1431,7 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue {
if (as.base.isUnused())
return CValue.none;
return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: inline asm expression result used", .{});
return o.dg.fail("TODO: C backend: inline asm expression result used", .{});
}
fn genIsNull(o: *Object, inst: *Inst.UnOp) !CValue {

View File

@ -2519,6 +2519,9 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator);
defer dbg_line_buffer.deinit();
var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator);
defer dbg_info_buffer.deinit();

View File

@ -1700,7 +1700,7 @@ pub const Value = extern union {
/// peer type resolution. This is stored in a separate list so that
/// the items are contiguous in memory and thus can be passed to
/// `Module.resolvePeerTypes`.
stored_inst_list: std.ArrayListUnmanaged(*ir.Inst) = .{},
stored_inst_list: std.ArrayListUnmanaged(Air.Inst.Index) = .{},
},
};