Merge branch 'register-allocation'

This commit is contained in:
Andrew Kelley 2020-07-20 13:12:25 -07:00
commit 4abf119d95
10 changed files with 1897 additions and 1583 deletions

View File

@ -1349,8 +1349,8 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
fn analyzeBodyValueAsType(self: *Module, block_scope: *Scope.Block, body: zir.Module.Body) !Type {
try self.analyzeBody(&block_scope.base, body);
for (block_scope.instructions.items) |inst| {
if (inst.cast(Inst.Ret)) |ret| {
const val = try self.resolveConstValue(&block_scope.base, ret.args.operand);
if (inst.castTag(.ret)) |ret| {
const val = try self.resolveConstValue(&block_scope.base, ret.operand);
return val.toType();
} else {
return self.fail(&block_scope.base, inst.src, "unable to resolve comptime value", .{});
@ -1938,16 +1938,132 @@ fn analyzeExport(self: *Module, scope: *Scope, src: usize, symbol_name: []const
};
}
fn addNewInstArgs(
fn addNoOp(
self: *Module,
block: *Scope.Block,
src: usize,
ty: Type,
comptime T: type,
args: Inst.Args(T),
comptime tag: Inst.Tag,
) !*Inst {
const inst = try self.addNewInst(block, src, ty, T);
inst.args = args;
const inst = try block.arena.create(tag.Type());
inst.* = .{
.base = .{
.tag = tag,
.ty = ty,
.src = src,
},
};
try block.instructions.append(self.gpa, &inst.base);
return &inst.base;
}
fn addUnOp(
self: *Module,
block: *Scope.Block,
src: usize,
ty: Type,
tag: Inst.Tag,
operand: *Inst,
) !*Inst {
const inst = try block.arena.create(Inst.UnOp);
inst.* = .{
.base = .{
.tag = tag,
.ty = ty,
.src = src,
},
.operand = operand,
};
try block.instructions.append(self.gpa, &inst.base);
return &inst.base;
}
fn addBinOp(
self: *Module,
block: *Scope.Block,
src: usize,
ty: Type,
tag: Inst.Tag,
lhs: *Inst,
rhs: *Inst,
) !*Inst {
const inst = try block.arena.create(Inst.BinOp);
inst.* = .{
.base = .{
.tag = tag,
.ty = ty,
.src = src,
},
.lhs = lhs,
.rhs = rhs,
};
try block.instructions.append(self.gpa, &inst.base);
return &inst.base;
}
fn addBr(
self: *Module,
scope_block: *Scope.Block,
src: usize,
target_block: *Inst.Block,
operand: *Inst,
) !*Inst {
const inst = try scope_block.arena.create(Inst.Br);
inst.* = .{
.base = .{
.tag = .br,
.ty = Type.initTag(.noreturn),
.src = src,
},
.operand = operand,
.block = target_block,
};
try scope_block.instructions.append(self.gpa, &inst.base);
return &inst.base;
}
fn addCondBr(
self: *Module,
block: *Scope.Block,
src: usize,
condition: *Inst,
then_body: ir.Body,
else_body: ir.Body,
) !*Inst {
const inst = try block.arena.create(Inst.CondBr);
inst.* = .{
.base = .{
.tag = .condbr,
.ty = Type.initTag(.noreturn),
.src = src,
},
.condition = condition,
.then_body = then_body,
.else_body = else_body,
};
try block.instructions.append(self.gpa, &inst.base);
return &inst.base;
}
fn addCall(
self: *Module,
block: *Scope.Block,
src: usize,
ty: Type,
func: *Inst,
args: []const *Inst,
) !*Inst {
const inst = try block.arena.create(Inst.Call);
inst.* = .{
.base = .{
.tag = .call,
.ty = ty,
.src = src,
},
.func = func,
.args = args,
};
try block.instructions.append(self.gpa, &inst.base);
return &inst.base;
}
@ -2017,7 +2133,6 @@ fn addNewInst(self: *Module, block: *Scope.Block, src: usize, ty: Type, comptime
.ty = ty,
.src = src,
},
.args = undefined,
};
try block.instructions.append(self.gpa, &inst.base);
return inst;
@ -2269,7 +2384,7 @@ fn analyzeInstArg(self: *Module, scope: *Scope, inst: *zir.Inst.Arg) InnerError!
});
}
const param_type = fn_ty.fnParamType(param_index);
return self.addNewInstArgs(b, inst.base.src, param_type, Inst.Arg, {});
return self.addNoOp(b, inst.base.src, param_type, .arg);
}
fn analyzeInstBlock(self: *Module, scope: *Scope, inst: *zir.Inst.Block) InnerError!*Inst {
@ -2285,7 +2400,7 @@ fn analyzeInstBlock(self: *Module, scope: *Scope, inst: *zir.Inst.Block) InnerEr
.ty = undefined, // Set after analysis.
.src = inst.base.src,
},
.args = undefined,
.body = undefined,
};
var child_block: Scope.Block = .{
@ -2316,13 +2431,13 @@ fn analyzeInstBlock(self: *Module, scope: *Scope, inst: *zir.Inst.Block) InnerEr
// to emit a jump instruction to after the block when it encounters the break.
try parent_block.instructions.append(self.gpa, &block_inst.base);
block_inst.base.ty = try self.resolvePeerTypes(scope, label.results.items);
block_inst.args.body = .{ .instructions = try parent_block.arena.dupe(*Inst, child_block.instructions.items) };
block_inst.body = .{ .instructions = try parent_block.arena.dupe(*Inst, child_block.instructions.items) };
return &block_inst.base;
}
fn analyzeInstBreakpoint(self: *Module, scope: *Scope, inst: *zir.Inst.Breakpoint) InnerError!*Inst {
const b = try self.requireRuntimeBlock(scope, inst.base.src);
return self.addNewInstArgs(b, inst.base.src, Type.initTag(.void), Inst.Breakpoint, {});
return self.addNoOp(b, inst.base.src, Type.initTag(.void), .breakpoint);
}
fn analyzeInstBreak(self: *Module, scope: *Scope, inst: *zir.Inst.Break) InnerError!*Inst {
@ -2350,10 +2465,7 @@ fn analyzeBreak(
if (label.zir_block == zir_block) {
try label.results.append(self.gpa, operand);
const b = try self.requireRuntimeBlock(scope, src);
return self.addNewInstArgs(b, src, Type.initTag(.noreturn), Inst.Br, .{
.block = label.block_inst,
.operand = operand,
});
return self.addBr(b, src, label.block_inst, operand);
}
}
opt_block = block.parent;
@ -2484,10 +2596,7 @@ fn analyzeInstCall(self: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerErro
}
const b = try self.requireRuntimeBlock(scope, inst.base.src);
return self.addNewInstArgs(b, inst.base.src, Type.initTag(.void), Inst.Call, .{
.func = func,
.args = casted_args,
});
return self.addCall(b, inst.base.src, Type.initTag(.void), func, casted_args);
}
fn analyzeInstFn(self: *Module, scope: *Scope, fn_inst: *zir.Inst.Fn) InnerError!*Inst {
@ -2570,14 +2679,14 @@ fn analyzeInstAs(self: *Module, scope: *Scope, as: *zir.Inst.As) InnerError!*Ins
}
fn analyzeInstPtrToInt(self: *Module, scope: *Scope, ptrtoint: *zir.Inst.PtrToInt) InnerError!*Inst {
const ptr = try self.resolveInst(scope, ptrtoint.positionals.ptr);
const ptr = try self.resolveInst(scope, ptrtoint.positionals.operand);
if (ptr.ty.zigTypeTag() != .Pointer) {
return self.fail(scope, ptrtoint.positionals.ptr.src, "expected pointer, found '{}'", .{ptr.ty});
return self.fail(scope, ptrtoint.positionals.operand.src, "expected pointer, found '{}'", .{ptr.ty});
}
// TODO handle known-pointer-address
const b = try self.requireRuntimeBlock(scope, ptrtoint.base.src);
const ty = Type.initTag(.usize);
return self.addNewInstArgs(b, ptrtoint.base.src, ty, Inst.PtrToInt, .{ .ptr = ptr });
return self.addUnOp(b, ptrtoint.base.src, ty, .ptrtoint, ptr);
}
fn analyzeInstFieldPtr(self: *Module, scope: *Scope, fieldptr: *zir.Inst.FieldPtr) InnerError!*Inst {
@ -2734,10 +2843,7 @@ fn analyzeInstAdd(self: *Module, scope: *Scope, inst: *zir.Inst.Add) InnerError!
}
const b = try self.requireRuntimeBlock(scope, inst.base.src);
return self.addNewInstArgs(b, inst.base.src, lhs.ty, Inst.Add, .{
.lhs = lhs,
.rhs = rhs,
});
return self.addBinOp(b, inst.base.src, lhs.ty, .add, lhs, rhs);
}
return self.fail(scope, inst.base.src, "TODO analyze add for {} + {}", .{ lhs.ty.zigTypeTag(), rhs.ty.zigTypeTag() });
}
@ -2783,14 +2889,22 @@ fn analyzeInstAsm(self: *Module, scope: *Scope, assembly: *zir.Inst.Asm) InnerEr
}
const b = try self.requireRuntimeBlock(scope, assembly.base.src);
return self.addNewInstArgs(b, assembly.base.src, return_type, Inst.Assembly, .{
const inst = try b.arena.create(Inst.Assembly);
inst.* = .{
.base = .{
.tag = .assembly,
.ty = return_type,
.src = assembly.base.src,
},
.asm_source = asm_source,
.is_volatile = assembly.kw_args.@"volatile",
.output = output,
.inputs = inputs,
.clobbers = clobbers,
.args = args,
});
};
try b.instructions.append(self.gpa, &inst.base);
return &inst.base;
}
fn analyzeInstCmp(self: *Module, scope: *Scope, inst: *zir.Inst.Cmp) InnerError!*Inst {
@ -2818,15 +2932,12 @@ fn analyzeInstCmp(self: *Module, scope: *Scope, inst: *zir.Inst.Cmp) InnerError!
return self.constBool(scope, inst.base.src, if (op == .eq) is_null else !is_null);
}
const b = try self.requireRuntimeBlock(scope, inst.base.src);
switch (op) {
.eq => return self.addNewInstArgs(b, inst.base.src, Type.initTag(.bool), Inst.IsNull, .{
.operand = opt_operand,
}),
.neq => return self.addNewInstArgs(b, inst.base.src, Type.initTag(.bool), Inst.IsNonNull, .{
.operand = opt_operand,
}),
const inst_tag: Inst.Tag = switch (op) {
.eq => .isnull,
.neq => .isnonnull,
else => unreachable,
}
};
return self.addUnOp(b, inst.base.src, Type.initTag(.bool), inst_tag, opt_operand);
} else if (is_equality_cmp and
((lhs_ty_tag == .Null and rhs.ty.isCPtr()) or (rhs_ty_tag == .Null and lhs.ty.isCPtr())))
{
@ -2861,7 +2972,7 @@ fn analyzeInstBoolNot(self: *Module, scope: *Scope, inst: *zir.Inst.BoolNot) Inn
return self.constBool(scope, inst.base.src, !val.toBool());
}
const b = try self.requireRuntimeBlock(scope, inst.base.src);
return self.addNewInstArgs(b, inst.base.src, bool_type, Inst.Not, .{ .operand = operand });
return self.addUnOp(b, inst.base.src, bool_type, .not, operand);
}
fn analyzeInstIsNull(self: *Module, scope: *Scope, inst: *zir.Inst.IsNull) InnerError!*Inst {
@ -2879,7 +2990,7 @@ fn analyzeInstCondBr(self: *Module, scope: *Scope, inst: *zir.Inst.CondBr) Inner
const cond = try self.coerce(scope, Type.initTag(.bool), uncasted_cond);
if (try self.resolveDefinedValue(scope, cond)) |cond_val| {
const body = if (cond_val.toBool()) &inst.positionals.true_body else &inst.positionals.false_body;
const body = if (cond_val.toBool()) &inst.positionals.then_body else &inst.positionals.else_body;
try self.analyzeBody(scope, body.*);
return self.constVoid(scope, inst.base.src);
}
@ -2894,7 +3005,7 @@ fn analyzeInstCondBr(self: *Module, scope: *Scope, inst: *zir.Inst.CondBr) Inner
.arena = parent_block.arena,
};
defer true_block.instructions.deinit(self.gpa);
try self.analyzeBody(&true_block.base, inst.positionals.true_body);
try self.analyzeBody(&true_block.base, inst.positionals.then_body);
var false_block: Scope.Block = .{
.parent = parent_block,
@ -2904,13 +3015,11 @@ fn analyzeInstCondBr(self: *Module, scope: *Scope, inst: *zir.Inst.CondBr) Inner
.arena = parent_block.arena,
};
defer false_block.instructions.deinit(self.gpa);
try self.analyzeBody(&false_block.base, inst.positionals.false_body);
try self.analyzeBody(&false_block.base, inst.positionals.else_body);
return self.addNewInstArgs(parent_block, inst.base.src, Type.initTag(.noreturn), Inst.CondBr, Inst.Args(Inst.CondBr){
.condition = cond,
.true_body = .{ .instructions = try scope.arena().dupe(*Inst, true_block.instructions.items) },
.false_body = .{ .instructions = try scope.arena().dupe(*Inst, false_block.instructions.items) },
});
const then_body: ir.Body = .{ .instructions = try scope.arena().dupe(*Inst, true_block.instructions.items) };
const else_body: ir.Body = .{ .instructions = try scope.arena().dupe(*Inst, false_block.instructions.items) };
return self.addCondBr(parent_block, inst.base.src, cond, then_body, else_body);
}
fn wantSafety(self: *Module, scope: *Scope) bool {
@ -2926,20 +3035,20 @@ fn analyzeInstUnreachable(self: *Module, scope: *Scope, unreach: *zir.Inst.Unrea
const b = try self.requireRuntimeBlock(scope, unreach.base.src);
if (self.wantSafety(scope)) {
// TODO Once we have a panic function to call, call it here instead of this.
_ = try self.addNewInstArgs(b, unreach.base.src, Type.initTag(.void), Inst.Breakpoint, {});
_ = try self.addNoOp(b, unreach.base.src, Type.initTag(.void), .breakpoint);
}
return self.addNewInstArgs(b, unreach.base.src, Type.initTag(.noreturn), Inst.Unreach, {});
return self.addNoOp(b, unreach.base.src, Type.initTag(.noreturn), .unreach);
}
fn analyzeInstRet(self: *Module, scope: *Scope, inst: *zir.Inst.Return) InnerError!*Inst {
const operand = try self.resolveInst(scope, inst.positionals.operand);
const b = try self.requireRuntimeBlock(scope, inst.base.src);
return self.addNewInstArgs(b, inst.base.src, Type.initTag(.noreturn), Inst.Ret, .{ .operand = operand });
return self.addUnOp(b, inst.base.src, Type.initTag(.noreturn), .ret, operand);
}
fn analyzeInstRetVoid(self: *Module, scope: *Scope, inst: *zir.Inst.ReturnVoid) InnerError!*Inst {
const b = try self.requireRuntimeBlock(scope, inst.base.src);
return self.addNewInstArgs(b, inst.base.src, Type.initTag(.noreturn), Inst.RetVoid, {});
return self.addNoOp(b, inst.base.src, Type.initTag(.noreturn), .retvoid);
}
fn analyzeBody(self: *Module, scope: *Scope, body: zir.Module.Body) !void {
@ -3027,11 +3136,7 @@ fn cmpNumeric(
};
const casted_lhs = try self.coerce(scope, dest_type, lhs);
const casted_rhs = try self.coerce(scope, dest_type, rhs);
return self.addNewInstArgs(b, src, dest_type, Inst.Cmp, .{
.lhs = casted_lhs,
.rhs = casted_rhs,
.op = op,
});
return self.addBinOp(b, src, dest_type, Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs);
}
// For mixed unsigned integer sizes, implicit cast both operands to the larger integer.
// For mixed signed and unsigned integers, implicit cast both operands to a signed
@ -3131,11 +3236,7 @@ fn cmpNumeric(
const casted_lhs = try self.coerce(scope, dest_type, lhs);
const casted_rhs = try self.coerce(scope, dest_type, rhs);
return self.addNewInstArgs(b, src, Type.initTag(.bool), Inst.Cmp, .{
.lhs = casted_lhs,
.rhs = casted_rhs,
.op = op,
});
return self.addBinOp(b, src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs);
}
fn makeIntType(self: *Module, scope: *Scope, signed: bool, bits: u16) !Type {
@ -3236,7 +3337,7 @@ fn bitcast(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst {
}
// TODO validate the type size and other compile errors
const b = try self.requireRuntimeBlock(scope, inst.src);
return self.addNewInstArgs(b, inst.src, dest_type, Inst.BitCast, .{ .operand = inst });
return self.addUnOp(b, inst.src, dest_type, .bitcast, inst);
}
fn coerceArrayPtrToSlice(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst {

View File

@ -173,8 +173,8 @@ fn ifExpr(mod: *Module, scope: *Scope, if_node: *ast.Node.If) InnerError!*zir.In
const if_src = tree.token_locs[if_node.if_token].start;
const condbr = try mod.addZIRInstSpecial(&block_scope.base, if_src, zir.Inst.CondBr, .{
.condition = cond,
.true_body = undefined, // populated below
.false_body = undefined, // populated below
.then_body = undefined, // populated below
.else_body = undefined, // populated below
}, .{});
const block = try mod.addZIRInstBlock(scope, if_src, .{
@ -196,7 +196,7 @@ fn ifExpr(mod: *Module, scope: *Scope, if_node: *ast.Node.If) InnerError!*zir.In
.operand = then_result,
}, .{});
}
condbr.positionals.true_body = .{
condbr.positionals.then_body = .{
.instructions = try then_scope.arena.dupe(*zir.Inst, then_scope.instructions.items),
};
@ -225,7 +225,7 @@ fn ifExpr(mod: *Module, scope: *Scope, if_node: *ast.Node.If) InnerError!*zir.In
.block = block,
}, .{});
}
condbr.positionals.false_body = .{
condbr.positionals.else_body = .{
.instructions = try else_scope.arena.dupe(*zir.Inst, else_scope.instructions.items),
};

File diff suppressed because it is too large Load Diff

View File

@ -92,9 +92,9 @@ fn genFn(file: *C, decl: *Decl) !void {
for (instructions) |inst| {
try writer.writeAll("\n\t");
switch (inst.tag) {
.assembly => try genAsm(file, inst.cast(Inst.Assembly).?, decl),
.call => try genCall(file, inst.cast(Inst.Call).?, decl),
.ret => try genRet(file, inst.cast(Inst.Ret).?, decl, tv.ty.fnReturnType()),
.assembly => try genAsm(file, inst.castTag(.assembly).?, decl),
.call => try genCall(file, inst.castTag(.call).?, decl),
.ret => try genRet(file, inst.castTag(.ret).?, decl, tv.ty.fnReturnType()),
.retvoid => try file.main.writer().print("return;", .{}),
else => |e| return file.fail(decl.src(), "TODO implement C codegen for {}", .{e}),
}
@ -105,9 +105,9 @@ fn genFn(file: *C, decl: *Decl) !void {
try writer.writeAll("}\n\n");
}
fn genRet(file: *C, inst: *Inst.Ret, decl: *Decl, expected_return_type: Type) !void {
fn genRet(file: *C, inst: *Inst.UnOp, decl: *Decl, expected_return_type: Type) !void {
const writer = file.main.writer();
const ret_value = inst.args.operand;
const ret_value = inst.operand;
const value = ret_value.value().?;
if (expected_return_type.eql(ret_value.ty))
return file.fail(decl.src(), "TODO return {}", .{expected_return_type})
@ -126,7 +126,7 @@ fn genRet(file: *C, inst: *Inst.Ret, decl: *Decl, expected_return_type: Type) !v
fn genCall(file: *C, inst: *Inst.Call, decl: *Decl) !void {
const writer = file.main.writer();
const header = file.header.writer();
if (inst.args.func.cast(Inst.Constant)) |func_inst| {
if (inst.func.castTag(.constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const target = func_val.func.owner_decl;
const target_ty = target.typed_value.most_recent.typed_value.ty;
@ -144,7 +144,7 @@ fn genCall(file: *C, inst: *Inst.Call, decl: *Decl) !void {
} else {
return file.fail(decl.src(), "TODO non-function call target?", .{});
}
if (inst.args.args.len != 0) {
if (inst.args.len != 0) {
return file.fail(decl.src(), "TODO function arguments", .{});
}
} else {
@ -152,14 +152,13 @@ fn genCall(file: *C, inst: *Inst.Call, decl: *Decl) !void {
}
}
fn genAsm(file: *C, inst: *Inst.Assembly, decl: *Decl) !void {
const as = inst.args;
fn genAsm(file: *C, as: *Inst.Assembly, decl: *Decl) !void {
const writer = file.main.writer();
for (as.inputs) |i, index| {
if (i[0] == '{' and i[i.len - 1] == '}') {
const reg = i[1 .. i.len - 1];
const arg = as.args[index];
if (arg.cast(Inst.Constant)) |c| {
if (arg.castTag(.constant)) |c| {
if (c.val.tag() == .int_u64) {
try writer.writeAll("register ");
try renderType(file, writer, arg.ty, decl.src());
@ -190,7 +189,7 @@ fn genAsm(file: *C, inst: *Inst.Assembly, decl: *Decl) !void {
if (index > 0) {
try writer.writeAll(", ");
}
if (arg.cast(Inst.Constant)) |c| {
if (arg.castTag(.constant)) |c| {
try writer.print("\"\"({}_constant)", .{reg});
} else {
// This is blocked by the earlier test

View File

@ -25,6 +25,20 @@ pub const Register = enum(u8) {
pub fn id(self: @This()) u3 {
return @truncate(u3, @enumToInt(self));
}
/// Returns the index into `callee_preserved_regs`.
pub fn allocIndex(self: Register) ?u4 {
return switch (self) {
.eax, .ax, .al => 0,
.ecx, .cx, .cl => 1,
.edx, .dx, .dl => 2,
.esi, .si => 3,
.edi, .di => 4,
else => null,
};
}
};
// zig fmt: on
pub const callee_preserved_regs = [_]Register{ .eax, .ecx, .edx, .esi, .edi };

View File

@ -38,7 +38,7 @@ pub const Register = enum(u8) {
r8b, r9b, r10b, r11b, r12b, r13b, r14b, r15b,
/// Returns the bit-width of the register.
pub fn size(self: @This()) u7 {
pub fn size(self: Register) u7 {
return switch (@enumToInt(self)) {
0...15 => 64,
16...31 => 32,
@ -53,7 +53,7 @@ pub const Register = enum(u8) {
/// other variant of access to those registers, such as r8b, r15d, and so
/// on. This is needed because access to these registers requires special
/// handling via the REX prefix, via the B or R bits, depending on context.
pub fn isExtended(self: @This()) bool {
pub fn isExtended(self: Register) bool {
return @enumToInt(self) & 0x08 != 0;
}
@ -62,9 +62,29 @@ pub const Register = enum(u8) {
/// an instruction (@see isExtended), and requires special handling. The
/// lower three bits are often embedded directly in instructions (such as
/// the B8 variant of moves), or used in R/M bytes.
pub fn id(self: @This()) u4 {
pub fn id(self: Register) u4 {
return @truncate(u4, @enumToInt(self));
}
/// Returns the index into `callee_preserved_regs`.
pub fn allocIndex(self: Register) ?u4 {
return switch (self) {
.rax, .eax, .ax, .al => 0,
.rcx, .ecx, .cx, .cl => 1,
.rdx, .edx, .dx, .dl => 2,
.rsi, .esi, .si => 3,
.rdi, .edi, .di => 4,
.r8, .r8d, .r8w, .r8b => 5,
.r9, .r9d, .r9w, .r9b => 6,
.r10, .r10d, .r10w, .r10b => 7,
.r11, .r11d, .r11w, .r11b => 8,
else => null,
};
}
};
// zig fmt: on
// zig fmt: on
/// These registers belong to the called function.
pub const callee_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .rsi, .rdi, .r8, .r9, .r10, .r11 };
pub const c_abi_int_param_regs = [_]Register{ .rdi, .rsi, .rdx, .rcx, .r8, .r9 };

View File

@ -38,7 +38,7 @@ pub const Inst = struct {
pub fn operandDies(self: Inst, index: DeathsBitIndex) bool {
assert(index < deaths_bits);
return @truncate(u1, self.deaths << index) != 0;
return @truncate(u1, self.deaths >> index) != 0;
}
pub fn specialOperandDeaths(self: Inst) bool {
@ -55,7 +55,12 @@ pub const Inst = struct {
breakpoint,
brvoid,
call,
cmp,
cmp_lt,
cmp_lte,
cmp_eq,
cmp_gte,
cmp_gt,
cmp_neq,
condbr,
constant,
isnonnull,
@ -66,13 +71,80 @@ pub const Inst = struct {
sub,
unreach,
not,
/// There is one-to-one correspondence between tag and type for now,
/// but this will not always be the case. For example, binary operations
/// such as + and - will have different tags but the same type.
pub fn Type(tag: Tag) type {
return switch (tag) {
.retvoid,
.unreach,
.arg,
.breakpoint,
=> NoOp,
.ret,
.bitcast,
.not,
.isnonnull,
.isnull,
.ptrtoint,
=> UnOp,
.add,
.sub,
.cmp_lt,
.cmp_lte,
.cmp_eq,
.cmp_gte,
.cmp_gt,
.cmp_neq,
=> BinOp,
.assembly => Assembly,
.block => Block,
.br => Br,
.brvoid => BrVoid,
.call => Call,
.condbr => CondBr,
.constant => Constant,
};
}
pub fn fromCmpOp(op: std.math.CompareOperator) Tag {
return switch (op) {
.lt => .cmp_lt,
.lte => .cmp_lte,
.eq => .cmp_eq,
.gte => .cmp_gte,
.gt => .cmp_gt,
.neq => .cmp_neq,
};
}
};
/// Prefer `castTag` to this.
pub fn cast(base: *Inst, comptime T: type) ?*T {
if (base.tag != T.base_tag)
return null;
if (@hasField(T, "base_tag")) {
return base.castTag(T.base_tag);
}
inline for (@typeInfo(Tag).Enum.fields) |field| {
const tag = @intToEnum(Tag, field.value);
if (base.tag == tag) {
if (T == tag.Type()) {
return @fieldParentPtr(T, "base", base);
}
return null;
}
}
unreachable;
}
return @fieldParentPtr(T, "base", base);
pub fn castTag(base: *Inst, comptime tag: Tag) ?*tag.Type() {
if (base.tag == tag) {
return @fieldParentPtr(tag.Type(), "base", base);
}
return null;
}
pub fn Args(comptime T: type) type {
@ -88,186 +160,219 @@ pub const Inst = struct {
return inst.val;
}
pub const Add = struct {
pub const base_tag = Tag.add;
pub fn cmpOperator(base: *Inst) ?std.math.CompareOperator {
return switch (self.base.tag) {
.cmp_lt => .lt,
.cmp_lte => .lte,
.cmp_eq => .eq,
.cmp_gte => .gte,
.cmp_gt => .gt,
.cmp_neq => .neq,
else => null,
};
}
pub fn operandCount(base: *Inst) usize {
inline for (@typeInfo(Tag).Enum.fields) |field| {
const tag = @intToEnum(Tag, field.value);
if (tag == base.tag) {
return @fieldParentPtr(tag.Type(), "base", base).operandCount();
}
}
unreachable;
}
pub fn getOperand(base: *Inst, index: usize) ?*Inst {
inline for (@typeInfo(Tag).Enum.fields) |field| {
const tag = @intToEnum(Tag, field.value);
if (tag == base.tag) {
return @fieldParentPtr(tag.Type(), "base", base).getOperand(index);
}
}
unreachable;
}
pub const NoOp = struct {
base: Inst,
args: struct {
lhs: *Inst,
rhs: *Inst,
},
pub fn operandCount(self: *const NoOp) usize {
return 0;
}
pub fn getOperand(self: *const NoOp, index: usize) ?*Inst {
return null;
}
};
pub const Arg = struct {
pub const base_tag = Tag.arg;
pub const UnOp = struct {
base: Inst,
args: void,
operand: *Inst,
pub fn operandCount(self: *const UnOp) usize {
return 1;
}
pub fn getOperand(self: *const UnOp, index: usize) ?*Inst {
if (index == 0)
return self.operand;
return null;
}
};
pub const BinOp = struct {
base: Inst,
lhs: *Inst,
rhs: *Inst,
pub fn operandCount(self: *const BinOp) usize {
return 2;
}
pub fn getOperand(self: *const BinOp, index: usize) ?*Inst {
var i = index;
if (i < 1)
return self.lhs;
i -= 1;
if (i < 1)
return self.rhs;
i -= 1;
return null;
}
};
pub const Assembly = struct {
pub const base_tag = Tag.assembly;
base: Inst,
args: struct {
asm_source: []const u8,
is_volatile: bool,
output: ?[]const u8,
inputs: []const []const u8,
clobbers: []const []const u8,
args: []const *Inst,
},
};
pub const BitCast = struct {
pub const base_tag = Tag.bitcast;
base: Inst,
args: struct {
operand: *Inst,
},
asm_source: []const u8,
is_volatile: bool,
output: ?[]const u8,
inputs: []const []const u8,
clobbers: []const []const u8,
args: []const *Inst,
pub fn operandCount(self: *const Assembly) usize {
return self.args.len;
}
pub fn getOperand(self: *const Assembly, index: usize) ?*Inst {
if (index < self.args.len)
return self.args[index];
return null;
}
};
pub const Block = struct {
pub const base_tag = Tag.block;
base: Inst,
args: struct {
body: Body,
},
body: Body,
/// This memory is reserved for codegen code to do whatever it needs to here.
codegen: codegen.BlockData = .{},
pub fn operandCount(self: *const Block) usize {
return 0;
}
pub fn getOperand(self: *const Block, index: usize) ?*Inst {
return null;
}
};
pub const Br = struct {
pub const base_tag = Tag.br;
base: Inst,
args: struct {
block: *Block,
operand: *Inst,
},
};
pub const Breakpoint = struct {
pub const base_tag = Tag.breakpoint;
base: Inst,
args: void,
block: *Block,
operand: *Inst,
pub fn operandCount(self: *const Br) usize {
return 0;
}
pub fn getOperand(self: *const Br, index: usize) ?*Inst {
if (index == 0)
return self.operand;
return null;
}
};
pub const BrVoid = struct {
pub const base_tag = Tag.brvoid;
base: Inst,
args: struct {
block: *Block,
},
block: *Block,
pub fn operandCount(self: *const BrVoid) usize {
return 0;
}
pub fn getOperand(self: *const BrVoid, index: usize) ?*Inst {
return null;
}
};
pub const Call = struct {
pub const base_tag = Tag.call;
base: Inst,
args: struct {
func: *Inst,
args: []const *Inst,
},
};
pub const Cmp = struct {
pub const base_tag = Tag.cmp;
base: Inst,
args: struct {
lhs: *Inst,
op: std.math.CompareOperator,
rhs: *Inst,
},
func: *Inst,
args: []const *Inst,
pub fn operandCount(self: *const Call) usize {
return self.args.len + 1;
}
pub fn getOperand(self: *const Call, index: usize) ?*Inst {
var i = index;
if (i < 1)
return self.func;
i -= 1;
if (i < self.args.len)
return self.args[i];
i -= self.args.len;
return null;
}
};
pub const CondBr = struct {
pub const base_tag = Tag.condbr;
base: Inst,
args: struct {
condition: *Inst,
true_body: Body,
false_body: Body,
},
condition: *Inst,
then_body: Body,
else_body: Body,
/// Set of instructions whose lifetimes end at the start of one of the branches.
/// The `true` branch is first: `deaths[0..true_death_count]`.
/// The `false` branch is next: `(deaths + true_death_count)[..false_death_count]`.
deaths: [*]*Inst = undefined,
true_death_count: u32 = 0,
false_death_count: u32 = 0,
};
pub const Not = struct {
pub const base_tag = Tag.not;
pub fn operandCount(self: *const CondBr) usize {
return 1;
}
pub fn getOperand(self: *const CondBr, index: usize) ?*Inst {
var i = index;
base: Inst,
args: struct {
operand: *Inst,
},
if (i < 1)
return self.condition;
i -= 1;
return null;
}
};
pub const Constant = struct {
pub const base_tag = Tag.constant;
base: Inst,
base: Inst,
val: Value,
};
pub const IsNonNull = struct {
pub const base_tag = Tag.isnonnull;
base: Inst,
args: struct {
operand: *Inst,
},
};
pub const IsNull = struct {
pub const base_tag = Tag.isnull;
base: Inst,
args: struct {
operand: *Inst,
},
};
pub const PtrToInt = struct {
pub const base_tag = Tag.ptrtoint;
base: Inst,
args: struct {
ptr: *Inst,
},
};
pub const Ret = struct {
pub const base_tag = Tag.ret;
base: Inst,
args: struct {
operand: *Inst,
},
};
pub const RetVoid = struct {
pub const base_tag = Tag.retvoid;
base: Inst,
args: void,
};
pub const Sub = struct {
pub const base_tag = Tag.sub;
base: Inst,
args: struct {
lhs: *Inst,
rhs: *Inst,
},
};
pub const Unreach = struct {
pub const base_tag = Tag.unreach;
base: Inst,
args: void,
pub fn operandCount(self: *const Constant) usize {
return 0;
}
pub fn getOperand(self: *const Constant, index: usize) ?*Inst {
return null;
}
};
};

View File

@ -25,53 +25,38 @@ fn analyzeWithTable(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst,
while (i != 0) {
i -= 1;
const base = body.instructions[i];
try analyzeInstGeneric(arena, table, base);
try analyzeInst(arena, table, base);
}
}
fn analyzeInstGeneric(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void), base: *ir.Inst) error{OutOfMemory}!void {
// Obtain the corresponding instruction type based on the tag type.
inline for (std.meta.declarations(ir.Inst)) |decl| {
switch (decl.data) {
.Type => |T| {
if (@typeInfo(T) == .Struct and @hasDecl(T, "base_tag")) {
if (T.base_tag == base.tag) {
return analyzeInst(arena, table, T, @fieldParentPtr(T, "base", base));
}
}
},
else => {},
}
}
unreachable;
}
fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void), comptime T: type, inst: *T) error{OutOfMemory}!void {
if (table.contains(&inst.base)) {
inst.base.deaths = 0;
fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void), base: *ir.Inst) error{OutOfMemory}!void {
if (table.contains(base)) {
base.deaths = 0;
} else {
// No tombstone for this instruction means it is never referenced,
// and its birth marks its own death. Very metal 🤘
inst.base.deaths = 1 << ir.Inst.unreferenced_bit_index;
base.deaths = 1 << ir.Inst.unreferenced_bit_index;
}
switch (T) {
ir.Inst.Constant => return,
ir.Inst.Block => {
try analyzeWithTable(arena, table, inst.args.body);
switch (base.tag) {
.constant => return,
.block => {
const inst = base.castTag(.block).?;
try analyzeWithTable(arena, table, inst.body);
// We let this continue so that it can possibly mark the block as
// unreferenced below.
},
ir.Inst.CondBr => {
.condbr => {
const inst = base.castTag(.condbr).?;
var true_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
defer true_table.deinit();
try true_table.ensureCapacity(inst.args.true_body.instructions.len);
try analyzeWithTable(arena, &true_table, inst.args.true_body);
try true_table.ensureCapacity(inst.then_body.instructions.len);
try analyzeWithTable(arena, &true_table, inst.then_body);
var false_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
defer false_table.deinit();
try false_table.ensureCapacity(inst.args.false_body.instructions.len);
try analyzeWithTable(arena, &false_table, inst.args.false_body);
try false_table.ensureCapacity(inst.else_body.instructions.len);
try analyzeWithTable(arena, &false_table, inst.else_body);
// Each death that occurs inside one branch, but not the other, needs
// to be added as a death immediately upon entering the other branch.
@ -112,47 +97,22 @@ fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void
// instruction, and the deaths flag for the CondBr instruction will indicate whether the
// condition's lifetime ends immediately before entering any branch.
},
ir.Inst.Call => {
// Call instructions have a runtime-known number of operands so we have to handle them ourselves here.
const needed_bits = 1 + inst.args.args.len;
if (needed_bits <= ir.Inst.deaths_bits) {
var bit_i: ir.Inst.DeathsBitIndex = 0;
{
const prev = try table.fetchPut(inst.args.func, {});
if (prev == null) inst.base.deaths |= @as(ir.Inst.DeathsInt, 1) << bit_i;
bit_i += 1;
}
for (inst.args.args) |arg| {
const prev = try table.fetchPut(arg, {});
if (prev == null) inst.base.deaths |= @as(ir.Inst.DeathsInt, 1) << bit_i;
bit_i += 1;
}
} else {
@panic("Handle liveness analysis for function calls with many parameters");
}
},
else => {},
}
const Args = ir.Inst.Args(T);
if (Args == void) {
return;
}
comptime var arg_index: usize = 0;
inline for (std.meta.fields(Args)) |field| {
if (field.field_type == *ir.Inst) {
if (arg_index >= 6) {
@compileError("out of bits to mark deaths of operands");
}
const prev = try table.fetchPut(@field(inst.args, field.name), {});
const needed_bits = base.operandCount();
if (needed_bits <= ir.Inst.deaths_bits) {
var bit_i: ir.Inst.DeathsBitIndex = 0;
while (base.getOperand(bit_i)) |operand| : (bit_i += 1) {
const prev = try table.fetchPut(operand, {});
if (prev == null) {
// Death.
inst.base.deaths |= 1 << arg_index;
base.deaths |= @as(ir.Inst.DeathsInt, 1) << bit_i;
}
arg_index += 1;
}
} else {
@panic("Handle liveness analysis for instructions with many parameters");
}
std.log.debug(.liveness, "analyze {}: 0b{b}\n", .{ inst.base.tag, inst.base.deaths });
std.log.debug(.liveness, "analyze {}: 0b{b}\n", .{ base.tag, base.deaths });
}

View File

@ -337,7 +337,7 @@ pub const Inst = struct {
base: Inst,
positionals: struct {
ptr: *Inst,
operand: *Inst,
},
kw_args: struct {},
};
@ -629,8 +629,8 @@ pub const Inst = struct {
positionals: struct {
condition: *Inst,
true_body: Module.Body,
false_body: Module.Body,
then_body: Module.Body,
else_body: Module.Body,
},
kw_args: struct {},
};
@ -1615,7 +1615,7 @@ const EmitZIR = struct {
}
}
fn emitTrivial(self: *EmitZIR, src: usize, comptime T: type) Allocator.Error!*Inst {
fn emitNoOp(self: *EmitZIR, src: usize, comptime T: type) Allocator.Error!*Inst {
const new_inst = try self.arena.allocator.create(T);
new_inst.* = .{
.base = .{
@ -1628,6 +1628,72 @@ const EmitZIR = struct {
return &new_inst.base;
}
fn emitCmp(
self: *EmitZIR,
src: usize,
new_body: ZirBody,
old_inst: *ir.Inst.BinOp,
op: std.math.CompareOperator,
) Allocator.Error!*Inst {
const new_inst = try self.arena.allocator.create(Inst.Cmp);
new_inst.* = .{
.base = .{
.src = src,
.tag = Inst.Cmp.base_tag,
},
.positionals = .{
.lhs = try self.resolveInst(new_body, old_inst.lhs),
.rhs = try self.resolveInst(new_body, old_inst.rhs),
.op = op,
},
.kw_args = .{},
};
return &new_inst.base;
}
fn emitUnOp(
self: *EmitZIR,
src: usize,
new_body: ZirBody,
old_inst: *ir.Inst.UnOp,
comptime I: type,
) Allocator.Error!*Inst {
const new_inst = try self.arena.allocator.create(I);
new_inst.* = .{
.base = .{
.src = src,
.tag = I.base_tag,
},
.positionals = .{
.operand = try self.resolveInst(new_body, old_inst.operand),
},
.kw_args = .{},
};
return &new_inst.base;
}
fn emitBinOp(
self: *EmitZIR,
src: usize,
new_body: ZirBody,
old_inst: *ir.Inst.BinOp,
comptime I: type,
) Allocator.Error!*Inst {
const new_inst = try self.arena.allocator.create(I);
new_inst.* = .{
.base = .{
.src = src,
.tag = I.base_tag,
},
.positionals = .{
.lhs = try self.resolveInst(new_body, old_inst.lhs),
.rhs = try self.resolveInst(new_body, old_inst.rhs),
},
.kw_args = .{},
};
return &new_inst.base;
}
fn emitBody(
self: *EmitZIR,
body: ir.Body,
@ -1640,69 +1706,48 @@ const EmitZIR = struct {
};
for (body.instructions) |inst| {
const new_inst = switch (inst.tag) {
.not => blk: {
const old_inst = inst.cast(ir.Inst.Not).?;
assert(inst.ty.zigTypeTag() == .Bool);
const new_inst = try self.arena.allocator.create(Inst.BoolNot);
.constant => unreachable, // excluded from function bodies
.arg => try self.emitNoOp(inst.src, Inst.Arg),
.breakpoint => try self.emitNoOp(inst.src, Inst.Breakpoint),
.unreach => try self.emitNoOp(inst.src, Inst.Unreachable),
.retvoid => try self.emitNoOp(inst.src, Inst.ReturnVoid),
.not => try self.emitUnOp(inst.src, new_body, inst.castTag(.not).?, Inst.BoolNot),
.ret => try self.emitUnOp(inst.src, new_body, inst.castTag(.ret).?, Inst.Return),
.ptrtoint => try self.emitUnOp(inst.src, new_body, inst.castTag(.ptrtoint).?, Inst.PtrToInt),
.isnull => try self.emitUnOp(inst.src, new_body, inst.castTag(.isnull).?, Inst.IsNull),
.isnonnull => try self.emitUnOp(inst.src, new_body, inst.castTag(.isnonnull).?, Inst.IsNonNull),
.add => try self.emitBinOp(inst.src, new_body, inst.castTag(.add).?, Inst.Add),
.sub => try self.emitBinOp(inst.src, new_body, inst.castTag(.sub).?, Inst.Sub),
.cmp_lt => try self.emitCmp(inst.src, new_body, inst.castTag(.cmp_lt).?, .lt),
.cmp_lte => try self.emitCmp(inst.src, new_body, inst.castTag(.cmp_lte).?, .lte),
.cmp_eq => try self.emitCmp(inst.src, new_body, inst.castTag(.cmp_eq).?, .eq),
.cmp_gte => try self.emitCmp(inst.src, new_body, inst.castTag(.cmp_gte).?, .gte),
.cmp_gt => try self.emitCmp(inst.src, new_body, inst.castTag(.cmp_gt).?, .gt),
.cmp_neq => try self.emitCmp(inst.src, new_body, inst.castTag(.cmp_neq).?, .neq),
.bitcast => blk: {
const old_inst = inst.castTag(.bitcast).?;
const new_inst = try self.arena.allocator.create(Inst.BitCast);
new_inst.* = .{
.base = .{
.src = inst.src,
.tag = Inst.BoolNot.base_tag,
.tag = Inst.BitCast.base_tag,
},
.positionals = .{
.operand = try self.resolveInst(new_body, old_inst.args.operand),
.dest_type = (try self.emitType(inst.src, inst.ty)).inst,
.operand = try self.resolveInst(new_body, old_inst.operand),
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.add => blk: {
const old_inst = inst.cast(ir.Inst.Add).?;
const new_inst = try self.arena.allocator.create(Inst.Add);
new_inst.* = .{
.base = .{
.src = inst.src,
.tag = Inst.Add.base_tag,
},
.positionals = .{
.lhs = try self.resolveInst(new_body, old_inst.args.lhs),
.rhs = try self.resolveInst(new_body, old_inst.args.rhs),
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.sub => blk: {
const old_inst = inst.cast(ir.Inst.Sub).?;
const new_inst = try self.arena.allocator.create(Inst.Sub);
new_inst.* = .{
.base = .{
.src = inst.src,
.tag = Inst.Sub.base_tag,
},
.positionals = .{
.lhs = try self.resolveInst(new_body, old_inst.args.lhs),
.rhs = try self.resolveInst(new_body, old_inst.args.rhs),
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.arg => blk: {
const old_inst = inst.cast(ir.Inst.Arg).?;
const new_inst = try self.arena.allocator.create(Inst.Arg);
new_inst.* = .{
.base = .{
.src = inst.src,
.tag = Inst.Arg.base_tag,
},
.positionals = .{},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.block => blk: {
const old_inst = inst.cast(ir.Inst.Block).?;
const old_inst = inst.castTag(.block).?;
const new_inst = try self.arena.allocator.create(Inst.Block);
try self.block_table.put(old_inst, new_inst);
@ -1710,7 +1755,7 @@ const EmitZIR = struct {
var block_body = std.ArrayList(*Inst).init(self.allocator);
defer block_body.deinit();
try self.emitBody(old_inst.args.body, inst_table, &block_body);
try self.emitBody(old_inst.body, inst_table, &block_body);
new_inst.* = .{
.base = .{
@ -1725,27 +1770,10 @@ const EmitZIR = struct {
break :blk &new_inst.base;
},
.br => blk: {
const old_inst = inst.cast(ir.Inst.Br).?;
const new_block = self.block_table.get(old_inst.args.block).?;
const new_inst = try self.arena.allocator.create(Inst.Break);
new_inst.* = .{
.base = .{
.src = inst.src,
.tag = Inst.Break.base_tag,
},
.positionals = .{
.block = new_block,
.operand = try self.resolveInst(new_body, old_inst.args.operand),
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.breakpoint => try self.emitTrivial(inst.src, Inst.Breakpoint),
.brvoid => blk: {
const old_inst = inst.cast(ir.Inst.BrVoid).?;
const new_block = self.block_table.get(old_inst.args.block).?;
const new_block = self.block_table.get(old_inst.block).?;
const new_inst = try self.arena.allocator.create(Inst.BreakVoid);
new_inst.* = .{
.base = .{
@ -1759,13 +1787,32 @@ const EmitZIR = struct {
};
break :blk &new_inst.base;
},
.br => blk: {
const old_inst = inst.castTag(.br).?;
const new_block = self.block_table.get(old_inst.block).?;
const new_inst = try self.arena.allocator.create(Inst.Break);
new_inst.* = .{
.base = .{
.src = inst.src,
.tag = Inst.Break.base_tag,
},
.positionals = .{
.block = new_block,
.operand = try self.resolveInst(new_body, old_inst.operand),
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.call => blk: {
const old_inst = inst.cast(ir.Inst.Call).?;
const old_inst = inst.castTag(.call).?;
const new_inst = try self.arena.allocator.create(Inst.Call);
const args = try self.arena.allocator.alloc(*Inst, old_inst.args.args.len);
const args = try self.arena.allocator.alloc(*Inst, old_inst.args.len);
for (args) |*elem, i| {
elem.* = try self.resolveInst(new_body, old_inst.args.args[i]);
elem.* = try self.resolveInst(new_body, old_inst.args[i]);
}
new_inst.* = .{
.base = .{
@ -1773,48 +1820,31 @@ const EmitZIR = struct {
.tag = Inst.Call.base_tag,
},
.positionals = .{
.func = try self.resolveInst(new_body, old_inst.args.func),
.func = try self.resolveInst(new_body, old_inst.func),
.args = args,
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.unreach => try self.emitTrivial(inst.src, Inst.Unreachable),
.ret => blk: {
const old_inst = inst.cast(ir.Inst.Ret).?;
const new_inst = try self.arena.allocator.create(Inst.Return);
new_inst.* = .{
.base = .{
.src = inst.src,
.tag = Inst.Return.base_tag,
},
.positionals = .{
.operand = try self.resolveInst(new_body, old_inst.args.operand),
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.retvoid => try self.emitTrivial(inst.src, Inst.ReturnVoid),
.constant => unreachable, // excluded from function bodies
.assembly => blk: {
const old_inst = inst.cast(ir.Inst.Assembly).?;
const old_inst = inst.castTag(.assembly).?;
const new_inst = try self.arena.allocator.create(Inst.Asm);
const inputs = try self.arena.allocator.alloc(*Inst, old_inst.args.inputs.len);
const inputs = try self.arena.allocator.alloc(*Inst, old_inst.inputs.len);
for (inputs) |*elem, i| {
elem.* = (try self.emitStringLiteral(inst.src, old_inst.args.inputs[i])).inst;
elem.* = (try self.emitStringLiteral(inst.src, old_inst.inputs[i])).inst;
}
const clobbers = try self.arena.allocator.alloc(*Inst, old_inst.args.clobbers.len);
const clobbers = try self.arena.allocator.alloc(*Inst, old_inst.clobbers.len);
for (clobbers) |*elem, i| {
elem.* = (try self.emitStringLiteral(inst.src, old_inst.args.clobbers[i])).inst;
elem.* = (try self.emitStringLiteral(inst.src, old_inst.clobbers[i])).inst;
}
const args = try self.arena.allocator.alloc(*Inst, old_inst.args.args.len);
const args = try self.arena.allocator.alloc(*Inst, old_inst.args.len);
for (args) |*elem, i| {
elem.* = try self.resolveInst(new_body, old_inst.args.args[i]);
elem.* = try self.resolveInst(new_body, old_inst.args[i]);
}
new_inst.* = .{
@ -1823,12 +1853,12 @@ const EmitZIR = struct {
.tag = Inst.Asm.base_tag,
},
.positionals = .{
.asm_source = (try self.emitStringLiteral(inst.src, old_inst.args.asm_source)).inst,
.asm_source = (try self.emitStringLiteral(inst.src, old_inst.asm_source)).inst,
.return_type = (try self.emitType(inst.src, inst.ty)).inst,
},
.kw_args = .{
.@"volatile" = old_inst.args.is_volatile,
.output = if (old_inst.args.output) |o|
.@"volatile" = old_inst.is_volatile,
.output = if (old_inst.output) |o|
(try self.emitStringLiteral(inst.src, o)).inst
else
null,
@ -1839,65 +1869,18 @@ const EmitZIR = struct {
};
break :blk &new_inst.base;
},
.ptrtoint => blk: {
const old_inst = inst.cast(ir.Inst.PtrToInt).?;
const new_inst = try self.arena.allocator.create(Inst.PtrToInt);
new_inst.* = .{
.base = .{
.src = inst.src,
.tag = Inst.PtrToInt.base_tag,
},
.positionals = .{
.ptr = try self.resolveInst(new_body, old_inst.args.ptr),
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.bitcast => blk: {
const old_inst = inst.cast(ir.Inst.BitCast).?;
const new_inst = try self.arena.allocator.create(Inst.BitCast);
new_inst.* = .{
.base = .{
.src = inst.src,
.tag = Inst.BitCast.base_tag,
},
.positionals = .{
.dest_type = (try self.emitType(inst.src, inst.ty)).inst,
.operand = try self.resolveInst(new_body, old_inst.args.operand),
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.cmp => blk: {
const old_inst = inst.cast(ir.Inst.Cmp).?;
const new_inst = try self.arena.allocator.create(Inst.Cmp);
new_inst.* = .{
.base = .{
.src = inst.src,
.tag = Inst.Cmp.base_tag,
},
.positionals = .{
.lhs = try self.resolveInst(new_body, old_inst.args.lhs),
.rhs = try self.resolveInst(new_body, old_inst.args.rhs),
.op = old_inst.args.op,
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.condbr => blk: {
const old_inst = inst.cast(ir.Inst.CondBr).?;
const old_inst = inst.castTag(.condbr).?;
var true_body = std.ArrayList(*Inst).init(self.allocator);
var false_body = std.ArrayList(*Inst).init(self.allocator);
var then_body = std.ArrayList(*Inst).init(self.allocator);
var else_body = std.ArrayList(*Inst).init(self.allocator);
defer true_body.deinit();
defer false_body.deinit();
defer then_body.deinit();
defer else_body.deinit();
try self.emitBody(old_inst.args.true_body, inst_table, &true_body);
try self.emitBody(old_inst.args.false_body, inst_table, &false_body);
try self.emitBody(old_inst.then_body, inst_table, &then_body);
try self.emitBody(old_inst.else_body, inst_table, &else_body);
const new_inst = try self.arena.allocator.create(Inst.CondBr);
new_inst.* = .{
@ -1906,39 +1889,9 @@ const EmitZIR = struct {
.tag = Inst.CondBr.base_tag,
},
.positionals = .{
.condition = try self.resolveInst(new_body, old_inst.args.condition),
.true_body = .{ .instructions = true_body.toOwnedSlice() },
.false_body = .{ .instructions = false_body.toOwnedSlice() },
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.isnull => blk: {
const old_inst = inst.cast(ir.Inst.IsNull).?;
const new_inst = try self.arena.allocator.create(Inst.IsNull);
new_inst.* = .{
.base = .{
.src = inst.src,
.tag = Inst.IsNull.base_tag,
},
.positionals = .{
.operand = try self.resolveInst(new_body, old_inst.args.operand),
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
.isnonnull => blk: {
const old_inst = inst.cast(ir.Inst.IsNonNull).?;
const new_inst = try self.arena.allocator.create(Inst.IsNonNull);
new_inst.* = .{
.base = .{
.src = inst.src,
.tag = Inst.IsNonNull.base_tag,
},
.positionals = .{
.operand = try self.resolveInst(new_body, old_inst.args.operand),
.condition = try self.resolveInst(new_body, old_inst.condition),
.then_body = .{ .instructions = then_body.toOwnedSlice() },
.else_body = .{ .instructions = else_body.toOwnedSlice() },
},
.kw_args = .{},
};

View File

@ -169,9 +169,8 @@ pub fn addCases(ctx: *TestContext) !void {
,
"",
);
}
{
var case = ctx.exe("assert function", linux_x64);
// Tests the assert() function.
case.addCompareOutput(
\\export fn _start() noreturn {
\\ add(3, 4);
@ -199,15 +198,94 @@ pub fn addCases(ctx: *TestContext) !void {
,
"",
);
// Tests copying a register. For the `c = a + b`, it has to
// preserve both a and b, because they are both used later.
case.addCompareOutput(
\\export fn _start() noreturn {
\\ add(100, 200);
\\ add(3, 4);
\\
\\ exit();
\\}
\\
\\fn add(a: u32, b: u32) void {
\\ assert(a + b == 300);
\\ const c = a + b; // 7
\\ const d = a + c; // 10
\\ const e = d + b; // 14
\\ assert(e == 14);
\\}
\\
\\pub fn assert(ok: bool) void {
\\ if (!ok) unreachable; // assertion failure
\\}
\\
\\fn exit() noreturn {
\\ asm volatile ("syscall"
\\ :
\\ : [number] "{rax}" (231),
\\ [arg1] "{rdi}" (0)
\\ : "rcx", "r11", "memory"
\\ );
\\ unreachable;
\\}
,
"",
);
// More stress on the liveness detection.
case.addCompareOutput(
\\export fn _start() noreturn {
\\ add(3, 4);
\\
\\ exit();
\\}
\\
\\fn add(a: u32, b: u32) void {
\\ const c = a + b; // 7
\\ const d = a + c; // 10
\\ const e = d + b; // 14
\\ const f = d + e; // 24
\\ const g = e + f; // 38
\\ const h = f + g; // 62
\\ const i = g + h; // 100
\\ assert(i == 100);
\\}
\\
\\pub fn assert(ok: bool) void {
\\ if (!ok) unreachable; // assertion failure
\\}
\\
\\fn exit() noreturn {
\\ asm volatile ("syscall"
\\ :
\\ : [number] "{rax}" (231),
\\ [arg1] "{rdi}" (0)
\\ : "rcx", "r11", "memory"
\\ );
\\ unreachable;
\\}
,
"",
);
// Requires a second move. The register allocator should figure out to re-use rax.
case.addCompareOutput(
\\export fn _start() noreturn {
\\ add(3, 4);
\\
\\ exit();
\\}
\\
\\fn add(a: u32, b: u32) void {
\\ const c = a + b; // 7
\\ const d = a + c; // 10
\\ const e = d + b; // 14
\\ const f = d + e; // 24
\\ const g = e + f; // 38
\\ const h = f + g; // 62
\\ const i = g + h; // 100
\\ const j = i + d; // 110
\\ assert(j == 110);
\\}
\\
\\pub fn assert(ok: bool) void {