From 004d0c8978d4b5e4212c06abb33d7a594930f8c5 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Fri, 19 Apr 2024 12:40:24 -0700 Subject: [PATCH 01/24] riscv: switch progress + by-ref return progress --- src/arch/riscv64/CodeGen.zig | 159 ++++++++++++++++-- src/arch/riscv64/Encoding.zig | 3 + src/arch/riscv64/Mir.zig | 3 - test/behavior/align.zig | 1 - test/behavior/cast.zig | 1 - test/behavior/enum.zig | 2 - test/behavior/eval.zig | 5 - test/behavior/inline_switch.zig | 6 - ...ef_var_in_if_after_if_2nd_switch_prong.zig | 1 - test/behavior/switch.zig | 32 +--- 10 files changed, 158 insertions(+), 55 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 762251bc44..56d2f062cc 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1223,7 +1223,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .field_parent_ptr => try self.airFieldParentPtr(inst), - .switch_br => try self.airSwitch(inst), + .switch_br => try self.airSwitchBr(inst), .slice_ptr => try self.airSlicePtr(inst), .slice_len => try self.airSliceLen(inst), @@ -1960,7 +1960,7 @@ fn binOp( switch (lhs_ty.zigTypeTag(zcu)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), - .Int => { + .Int, .Enum => { assert(lhs_ty.eql(rhs_ty, zcu)); const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 64) { @@ -3682,7 +3682,6 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { switch (self.ret_mcv.short) { .none => {}, .register, .register_pair => try self.load(self.ret_mcv.short, ptr, ptr_ty), - .indirect => |reg_off| try self.genSetReg(ptr_ty, reg_off.reg, ptr), else => unreachable, } self.ret_mcv.liveOut(self, inst); @@ -4160,12 +4159,97 @@ fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) ! self.finishAirBookkeeping(); } -fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { +fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const condition = pl_op.operand; - _ = condition; - return self.fail("TODO airSwitch for {}", .{self.target.cpu.arch}); - // return self.finishAir(inst, .dead, .{ condition, .none, .none }); + const condition = try self.resolveInst(pl_op.operand); + const condition_ty = self.typeOf(pl_op.operand); + const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); + var extra_index: usize = switch_br.end; + var case_i: u32 = 0; + const liveness = try self.liveness.getSwitchBr(self.gpa, inst, switch_br.data.cases_len + 1); + defer self.gpa.free(liveness.deaths); + + // If the condition dies here in this switch instruction, process + // that death now instead of later as this has an effect on + // whether it needs to be spilled in the branches + if (self.liveness.operandDies(inst, 0)) { + if (pl_op.operand.toIndex()) |op_inst| try self.processDeath(op_inst); + } + + self.scope_generation += 1; + const state = try self.saveState(); + + while (case_i < switch_br.data.cases_len) : (case_i += 1) { + const case = self.air.extraData(Air.SwitchBr.Case, extra_index); + const items: []const Air.Inst.Ref = + @ptrCast(self.air.extra[case.end..][0..case.data.items_len]); + const case_body: []const Air.Inst.Index = + @ptrCast(self.air.extra[case.end + items.len ..][0..case.data.body_len]); + extra_index = case.end + items.len + case_body.len; + + var relocs = try self.gpa.alloc(Mir.Inst.Index, items.len); + defer self.gpa.free(relocs); + + for (items, relocs, 0..) |item, *reloc, i| { + // switch branches must be comptime-known, so this is stored in an immediate + const item_mcv = try self.resolveInst(item); + + const cmp_mcv: MCValue = try self.binOp( + .cmp_neq, + condition, + condition_ty, + item_mcv, + condition_ty, + ); + + const cmp_reg = try self.copyToTmpRegister(Type.bool, cmp_mcv); + + if (!(i < relocs.len - 1)) { + _ = try self.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_not, + .data = .{ .rr = .{ + .rd = cmp_reg, + .rs = cmp_reg, + } }, + }); + } + + reloc.* = try self.condBr(condition_ty, .{ .register = cmp_reg }); + } + + for (liveness.deaths[case_i]) |operand| try self.processDeath(operand); + + for (relocs[0 .. relocs.len - 1]) |reloc| self.performReloc(reloc); + try self.genBody(case_body); + try self.restoreState(state, &.{}, .{ + .emit_instructions = false, + .update_tracking = true, + .resurrect = true, + .close_scope = true, + }); + + self.performReloc(relocs[relocs.len - 1]); + } + + if (switch_br.data.else_body_len > 0) { + const else_body: []const Air.Inst.Index = + @ptrCast(self.air.extra[extra_index..][0..switch_br.data.else_body_len]); + + const else_deaths = liveness.deaths.len - 1; + for (liveness.deaths[else_deaths]) |operand| try self.processDeath(operand); + + try self.genBody(else_body); + try self.restoreState(state, &.{}, .{ + .emit_instructions = false, + .update_tracking = true, + .resurrect = true, + .close_scope = true, + }); + } + + // We already took care of pl_op.operand earlier, so there's nothing left to do + self.finishAirBookkeeping(); } fn performReloc(self: *Self, inst: Mir.Inst.Index) void { @@ -4249,9 +4333,60 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const air_tags = self.air.instructions.items(.tag); - _ = air_tags; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement boolean operations for {}", .{self.target.cpu.arch}); + const tag: Air.Inst.Tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; + + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const lhs_ty = Type.bool; + const rhs_ty = Type.bool; + + const lhs_reg, const lhs_lock = blk: { + if (lhs == .register) break :blk .{ lhs.register, null }; + + const lhs_reg, const lhs_lock = try self.allocReg(); + try self.genSetReg(lhs_ty, lhs_reg, lhs); + break :blk .{ lhs_reg, lhs_lock }; + }; + defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); + + const rhs_reg, const rhs_lock = blk: { + if (rhs == .register) break :blk .{ rhs.register, null }; + + const rhs_reg, const rhs_lock = try self.allocReg(); + try self.genSetReg(rhs_ty, rhs_reg, rhs); + break :blk .{ rhs_reg, rhs_lock }; + }; + defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); + + const result_reg, const result_lock = try self.allocReg(); + defer self.register_manager.unlockReg(result_lock); + + _ = try self.addInst(.{ + .tag = if (tag == .bool_or) .@"or" else .@"and", + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = result_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + } }, + }); + + // safety truncate + if (self.wantSafety()) { + _ = try self.addInst(.{ + .tag = .andi, + .ops = .rri, + .data = .{ .i_type = .{ + .rd = result_reg, + .rs1 = result_reg, + .imm12 = Immediate.s(1), + } }, + }); + } + + break :result .{ .register = result_reg }; + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -5265,7 +5400,9 @@ fn resolveCallingConventionValues( }, .memory => { const param_int_regs = abi.function_arg_regs; + const param_int_reg = param_int_regs[param_int_reg_i]; + param_int_reg_i += 1; arg_mcv[arg_mcv_i] = .{ .indirect = .{ .reg = param_int_reg } }; arg_mcv_i += 1; diff --git a/src/arch/riscv64/Encoding.zig b/src/arch/riscv64/Encoding.zig index 91f100993b..cccf0c8aac 100644 --- a/src/arch/riscv64/Encoding.zig +++ b/src/arch/riscv64/Encoding.zig @@ -38,6 +38,7 @@ pub const Mnemonic = enum { // R Type add, @"and", + @"or", sub, slt, mul, @@ -55,6 +56,7 @@ pub const Mnemonic = enum { .add => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0000000 }, .sltu => .{ .opcode = 0b0110011, .funct3 = 0b011, .funct7 = 0b0000000 }, .@"and" => .{ .opcode = 0b0110011, .funct3 = 0b111, .funct7 = 0b0000000 }, + .@"or" => .{ .opcode = 0b0110011, .funct3 = 0b110, .funct7 = 0b0000000 }, .sub => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0100000 }, .ld => .{ .opcode = 0b0000011, .funct3 = 0b011, .funct7 = null }, @@ -152,6 +154,7 @@ pub const InstEnc = enum { .add, .sub, .@"and", + .@"or", => .R, .ecall, diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 0ce2185197..064aff0415 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -80,9 +80,6 @@ pub const Inst = struct { /// Branch if not equal, Uses b_type bne, - /// Boolean NOT, Uses rr payload - not, - /// Generates a NO-OP, uses nop payload nop, diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 659733962b..6497734993 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -624,7 +624,6 @@ test "alignment of slice element" { } test "sub-aligned pointer field access" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index a3ffb7cb3a..858a14cea1 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -881,7 +881,6 @@ test "peer resolution of string literals" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const E = enum { a, b, c, d }; diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index 0742c2d91c..28d6dccf29 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -610,7 +610,6 @@ fn testEnumWithSpecifiedTagValues(x: MultipleChoice) !void { test "enum with specified tag values" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testEnumWithSpecifiedTagValues(MultipleChoice.C); try comptime testEnumWithSpecifiedTagValues(MultipleChoice.C); @@ -749,7 +748,6 @@ test "cast integer literal to enum" { test "enum with specified and unspecified tag values" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2.D); try comptime testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2.D); diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index c62e116a5f..d08743ff3d 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -1088,7 +1088,6 @@ test "comptime break operand passing through runtime condition converted to runt test "comptime break operand passing through runtime switch converted to runtime break" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(runtime: u8) !void { @@ -1631,8 +1630,6 @@ test "struct in comptime false branch is not evaluated" { } test "result of nested switch assigned to variable" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var zds: u32 = 0; zds = switch (zds) { 0 => switch (zds) { @@ -1667,8 +1664,6 @@ test "inline for loop of functions returning error unions" { } test "if inside a switch" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var condition = true; var wave_type: u32 = 0; _ = .{ &condition, &wave_type }; diff --git a/test/behavior/inline_switch.zig b/test/behavior/inline_switch.zig index 444697b091..0ae05c3857 100644 --- a/test/behavior/inline_switch.zig +++ b/test/behavior/inline_switch.zig @@ -5,7 +5,6 @@ const builtin = @import("builtin"); test "inline scalar prongs" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: usize = 0; switch (x) { @@ -21,7 +20,6 @@ test "inline scalar prongs" { test "inline prong ranges" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: usize = 0; _ = &x; @@ -37,7 +35,6 @@ const E = enum { a, b, c, d }; test "inline switch enums" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: E = .a; _ = &x; @@ -106,7 +103,6 @@ test "inline else error" { test "inline else enum" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E2 = enum(u8) { a = 2, b = 3, c = 4, d = 5 }; var a: E2 = .a; @@ -120,7 +116,6 @@ test "inline else enum" { test "inline else int with gaps" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u8 = 0; _ = &a; @@ -139,7 +134,6 @@ test "inline else int with gaps" { test "inline else int all values" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u2 = 0; _ = &a; diff --git a/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig b/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig index 366730424a..bb6d5b1359 100644 --- a/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig +++ b/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig @@ -8,7 +8,6 @@ test "reference a variable in an if after an if in the 2nd switch prong" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try foo(true, Num.Two, false, "aoeu"); try expect(!ok); diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index 78365e8763..7fa4709d3b 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -7,7 +7,6 @@ const expectEqual = std.testing.expectEqual; test "switch with numbers" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSwitchWithNumbers(13); } @@ -23,7 +22,6 @@ fn testSwitchWithNumbers(x: u32) !void { test "switch with all ranges" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(testSwitchWithAllRanges(50, 3) == 1); try expect(testSwitchWithAllRanges(101, 0) == 2); @@ -57,27 +55,25 @@ test "implicit comptime switch" { test "switch on enum" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const fruit = Fruit.Orange; - nonConstSwitchOnEnum(fruit); + try expect(nonConstSwitchOnEnum(fruit)); } const Fruit = enum { Apple, Orange, Banana, }; -fn nonConstSwitchOnEnum(fruit: Fruit) void { - switch (fruit) { - Fruit.Apple => unreachable, - Fruit.Orange => {}, - Fruit.Banana => unreachable, - } +fn nonConstSwitchOnEnum(fruit: Fruit) bool { + return switch (fruit) { + Fruit.Apple => false, + Fruit.Orange => true, + Fruit.Banana => false, + }; } test "switch statement" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try nonConstSwitch(SwitchStatementFoo.C); } @@ -94,7 +90,6 @@ const SwitchStatementFoo = enum { A, B, C, D }; test "switch with multiple expressions" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x = switch (returnsFive()) { 1, 2, 3 => 1, @@ -179,7 +174,6 @@ test "undefined.u0" { test "switch with disjoint range" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var q: u8 = 0; _ = &q; @@ -191,8 +185,6 @@ test "switch with disjoint range" { } test "switch variable for range and multiple prongs" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { try doTheSwitch(16); @@ -382,7 +374,6 @@ test "anon enum literal used in switch on union enum" { test "switch all prongs unreachable" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testAllProngsUnreachable(); try comptime testAllProngsUnreachable(); @@ -420,7 +411,6 @@ fn return_a_number() anyerror!i32 { test "switch on integer with else capturing expr" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -735,7 +725,6 @@ test "switch capture copies its payload" { test "capture of integer forwards the switch condition directly" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(x: u8) !void { @@ -757,7 +746,6 @@ test "capture of integer forwards the switch condition directly" { test "enum value without tag name used as switch item" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = enum(u32) { a = 1, @@ -775,8 +763,6 @@ test "enum value without tag name used as switch item" { } test "switch item sizeof" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { var a: usize = 0; @@ -873,8 +859,6 @@ test "switch pointer capture peer type resolution" { } test "inline switch range that includes the maximum value of the switched type" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const inputs: [3]u8 = .{ 0, 254, 255 }; for (inputs) |input| { switch (input) { @@ -970,8 +954,6 @@ test "prong with inline call to unreachable" { } test "block error return trace index is reset between prongs" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn returnError() error{TestFailed} { return error.TestFailed; From fcafaae747c0d032401ca7936b667f5dfcf0466b Mon Sep 17 00:00:00 2001 From: David Rubin Date: Mon, 22 Apr 2024 20:40:55 -0700 Subject: [PATCH 02/24] riscv: get basic libc interop --- src/arch/riscv64/CodeGen.zig | 43 ++++++++++++++++++------ src/arch/riscv64/Emit.zig | 13 ++++++++ src/arch/riscv64/Encoding.zig | 3 ++ src/arch/riscv64/Lower.zig | 24 ++++++++++++- src/arch/riscv64/Mir.zig | 13 +++++--- src/link/Elf.zig | 3 +- src/link/Elf/Atom.zig | 63 +++++++++++++++++++++++++---------- 7 files changed, 128 insertions(+), 34 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 56d2f062cc..d6f11ef8c9 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -3590,22 +3590,42 @@ fn genCall( const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func.owner_decl); const sym = elf_file.symbol(sym_index); - _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); - const got_addr = sym.zigGotAddress(elf_file); - try self.genSetReg(Type.usize, .ra, .{ .memory = @intCast(got_addr) }); + if (self.mod.pic) { + return self.fail("TODO: genCall pic", .{}); + } else { + _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); + const got_addr = sym.zigGotAddress(elf_file); + try self.genSetReg(Type.usize, .ra, .{ .memory = @intCast(got_addr) }); + _ = try self.addInst(.{ + .tag = .jalr, + .ops = .rri, + .data = .{ .i_type = .{ + .rd = .ra, + .rs1 = .ra, + .imm12 = Immediate.s(0), + } }, + }); + } + } else unreachable; // not a valid riscv64 format + }, + .extern_func => |extern_func| { + const owner_decl = zcu.declPtr(extern_func.decl); + const lib_name = extern_func.lib_name.toSlice(&zcu.intern_pool); + const decl_name = owner_decl.name.toSlice(&zcu.intern_pool); + const atom_index = try self.symbolIndex(); + + if (self.bin_file.cast(link.File.Elf)) |elf_file| { _ = try self.addInst(.{ - .tag = .jalr, - .ops = .rri, - .data = .{ .i_type = .{ - .rd = .ra, - .rs1 = .ra, - .imm12 = Immediate.s(0), + .tag = .pseudo, + .ops = .pseudo_extern_fn_reloc, + .data = .{ .reloc = .{ + .atom_index = atom_index, + .sym_index = try elf_file.getGlobalSymbol(decl_name, lib_name), } }, }); - } else unreachable; + } else unreachable; // not a valid riscv64 format }, - .extern_func => return self.fail("TODO: extern func calls", .{}), else => return self.fail("TODO implement calling bitcasted functions", .{}), } } else { @@ -3613,6 +3633,7 @@ fn genCall( const addr_reg, const addr_lock = try self.allocReg(); defer self.register_manager.unlockReg(addr_lock); try self.genSetReg(Type.usize, addr_reg, .{ .air_ref = callee }); + _ = try self.addInst(.{ .tag = .jalr, .ops = .rri, diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index ec256fefb3..6f136e7fb4 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -70,6 +70,19 @@ pub fn emitMir(emit: *Emit) Error!void { }); } else return emit.fail("TODO: load_symbol_reloc non-ELF", .{}); }, + .call_extern_fn_reloc => |symbol| { + if (emit.lower.bin_file.cast(link.File.Elf)) |elf_file| { + const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?; + + const r_type: u32 = @intFromEnum(std.elf.R_RISCV.CALL_PLT); + + try atom_ptr.addReloc(elf_file, .{ + .r_offset = start_offset, + .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | r_type, + .r_addend = 0, + }); + } else return emit.fail("TODO: call_extern_fn_reloc non-ELF", .{}); + }, }; } std.debug.assert(lowered_relocs.len == 0); diff --git a/src/arch/riscv64/Encoding.zig b/src/arch/riscv64/Encoding.zig index cccf0c8aac..3a0ef90584 100644 --- a/src/arch/riscv64/Encoding.zig +++ b/src/arch/riscv64/Encoding.zig @@ -22,6 +22,7 @@ pub const Mnemonic = enum { // U Type lui, + auipc, // S Type sd, @@ -78,6 +79,7 @@ pub const Mnemonic = enum { .srai => .{ .opcode = 0b0010011, .funct3 = 0b101, .funct7 = null, .offset = 1 << 10 }, .lui => .{ .opcode = 0b0110111, .funct3 = null, .funct7 = null }, + .auipc => .{ .opcode = 0b0010111, .funct3 = null, .funct7 = null }, .sd => .{ .opcode = 0b0100011, .funct3 = 0b011, .funct7 = null }, .sw => .{ .opcode = 0b0100011, .funct3 = 0b010, .funct7 = null }, @@ -133,6 +135,7 @@ pub const InstEnc = enum { => .I, .lui, + .auipc, => .U, .sd, diff --git a/src/arch/riscv64/Lower.zig b/src/arch/riscv64/Lower.zig index 4b77f9cdee..dba902eaa4 100644 --- a/src/arch/riscv64/Lower.zig +++ b/src/arch/riscv64/Lower.zig @@ -32,8 +32,10 @@ pub const Reloc = struct { const Target = union(enum) { inst: Mir.Inst.Index, - /// Relocs the lowered_inst_index and the next one. + /// Relocs the lowered_inst_index and the next instruction. load_symbol_reloc: bits.Symbol, + /// Relocs the lowered_inst_index and the next instruction. + call_extern_fn_reloc: bits.Symbol, }; }; @@ -247,6 +249,26 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }); }, + .pseudo_extern_fn_reloc => { + const inst_reloc = inst.data.reloc; + + try lower.emit(.auipc, &.{ + .{ .reg = .ra }, + .{ .imm = lower.reloc( + .{ .call_extern_fn_reloc = .{ + .atom_index = inst_reloc.atom_index, + .sym_index = inst_reloc.sym_index, + } }, + ) }, + }); + + try lower.emit(.jalr, &.{ + .{ .reg = .ra }, + .{ .reg = .ra }, + .{ .imm = Immediate.s(0) }, + }); + }, + else => return lower.fail("TODO lower: psuedo {s}", .{@tagName(inst.ops)}), }, } diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 064aff0415..5d21719da2 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -202,6 +202,11 @@ pub const Inst = struct { lte, }, }, + + reloc: struct { + atom_index: u32, + sym_index: u32, + }, }; pub const Ops = enum { @@ -214,10 +219,7 @@ pub const Inst = struct { /// Two registers + immediate, uses the i_type payload. rri, - /// Two registers + Two Immediates - rrii, - - /// Two registers + another instruction. + //extern_fn_reloc/ Two registers + another instruction. rr_inst, /// Register + Memory @@ -283,6 +285,9 @@ pub const Inst = struct { pseudo_compare, pseudo_not, + + /// Generates an auipc + jalr pair, with a R_RISCV_CALL_PLT reloc + pseudo_extern_fn_reloc, }; // Make sure we don't accidentally make instructions bigger than expected. diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 770d483e98..6464ac73e2 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -5842,7 +5842,8 @@ pub fn tpAddress(self: *Elf) i64 { const addr = switch (self.getTarget().cpu.arch) { .x86_64 => mem.alignForward(u64, phdr.p_vaddr + phdr.p_memsz, phdr.p_align), .aarch64 => mem.alignBackward(u64, phdr.p_vaddr - 16, phdr.p_align), - else => @panic("TODO implement getTpAddress for this arch"), + .riscv64 => phdr.p_vaddr, + else => |arch| std.debug.panic("TODO implement getTpAddress for {s}", .{@tagName(arch)}), }; return @intCast(addr); } diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig index 239186ffaa..1c303980c3 100644 --- a/src/link/Elf/Atom.zig +++ b/src/link/Elf/Atom.zig @@ -1409,11 +1409,11 @@ const x86_64 = struct { .GOTPC64 => try cwriter.writeInt(i64, GOT + A, .little), .SIZE32 => { const size = @as(i64, @intCast(target.elfSym(elf_file).st_size)); - try cwriter.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(size + A)))), .little); + try cwriter.writeInt(u32, @bitCast(@as(i32, @intCast(size + A))), .little); }, .SIZE64 => { const size = @as(i64, @intCast(target.elfSym(elf_file).st_size)); - try cwriter.writeInt(i64, @as(i64, @intCast(size + A)), .little); + try cwriter.writeInt(i64, @intCast(size + A), .little); }, else => try atom.reportUnhandledRelocError(rel, elf_file), } @@ -2001,26 +2001,25 @@ const riscv = struct { const r_type: elf.R_RISCV = @enumFromInt(rel.r_type()); switch (r_type) { - .@"64" => { - try atom.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file); - }, - - .HI20 => { - try atom.scanReloc(symbol, rel, absRelocAction(symbol, elf_file), elf_file); - }, + .@"32" => try atom.scanReloc(symbol, rel, absRelocAction(symbol, elf_file), elf_file), + .@"64" => try atom.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file), + .HI20 => try atom.scanReloc(symbol, rel, absRelocAction(symbol, elf_file), elf_file), .CALL_PLT => if (symbol.flags.import) { symbol.flags.needs_plt = true; }, + .GOT_HI20 => symbol.flags.needs_got = true, - .GOT_HI20 => { - symbol.flags.needs_got = true; - }, + .TPREL_HI20, + .TPREL_LO12_I, + .TPREL_LO12_S, + .TPREL_ADD, .PCREL_HI20, .PCREL_LO12_I, .PCREL_LO12_S, .LO12_I, + .LO12_S, .ADD32, .SUB32, => {}, @@ -2058,6 +2057,8 @@ const riscv = struct { switch (r_type) { .NONE => unreachable, + .@"32" => try cwriter.writeInt(u32, @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little), + .@"64" => { try atom.resolveDynAbsReloc( target, @@ -2076,11 +2077,6 @@ const riscv = struct { riscv_util.writeInstU(code[r_offset..][0..4], value); }, - .LO12_I => { - const value: u32 = @bitCast(math.cast(i32, S + A) orelse return error.Overflow); - riscv_util.writeInstI(code[r_offset..][0..4], value); - }, - .GOT_HI20 => { assert(target.flags.has_got); const disp: u32 = @bitCast(math.cast(i32, G + GOT + A - P) orelse return error.Overflow); @@ -2143,6 +2139,39 @@ const riscv = struct { } }, + .LO12_I, + .LO12_S, + => { + const disp: u32 = @bitCast(math.cast(i32, S + A) orelse return error.Overflow); + switch (r_type) { + .LO12_I => riscv_util.writeInstI(code[r_offset..][0..4], disp), + .LO12_S => riscv_util.writeInstS(code[r_offset..][0..4], disp), + else => unreachable, + } + }, + + .TPREL_HI20 => { + const target_addr: u32 = @intCast(target.address(.{}, elf_file)); + const val: i32 = @intCast(S + A - target_addr); + riscv_util.writeInstU(code[r_offset..][0..4], @bitCast(val)); + }, + + .TPREL_LO12_I, + .TPREL_LO12_S, + => { + const target_addr: u32 = @intCast(target.address(.{}, elf_file)); + const val: i32 = @intCast(S + A - target_addr); + switch (r_type) { + .TPREL_LO12_I => riscv_util.writeInstI(code[r_offset..][0..4], @bitCast(val)), + .TPREL_LO12_S => riscv_util.writeInstS(code[r_offset..][0..4], @bitCast(val)), + else => unreachable, + } + }, + + .TPREL_ADD => { + // TODO: annotates an ADD instruction that can be removed when TPREL is relaxed + }, + else => |x| switch (@intFromEnum(x)) { // Zig custom relocations Elf.R_ZIG_GOT_HI20 => { From f34dcd067b6e9783a53f007746309bb1f635fbf0 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Mon, 22 Apr 2024 21:10:52 -0700 Subject: [PATCH 03/24] riscv: basic libc varargs --- src/arch/riscv64/CodeGen.zig | 46 +++++++++++++++++++++++++++++++++--- 1 file changed, 43 insertions(+), 3 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index d6f11ef8c9..592c1941d6 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -774,7 +774,7 @@ pub fn generate( ); const fn_info = zcu.typeToFunc(fn_type).?; - var call_info = function.resolveCallingConventionValues(fn_info) catch |err| switch (err) { + var call_info = function.resolveCallingConventionValues(fn_info, &.{}) catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, error.OutOfRegisters => return Result{ .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), @@ -3552,7 +3552,14 @@ fn genCall( }; const fn_info = zcu.typeToFunc(fn_ty).?; - var call_info = try self.resolveCallingConventionValues(fn_info); + + const allocator = self.gpa; + + const var_args = try allocator.alloc(Type, args.len - fn_info.param_types.len); + defer allocator.free(var_args); + for (var_args, arg_tys[fn_info.param_types.len..]) |*var_arg, arg_ty| var_arg.* = arg_ty; + + var call_info = try self.resolveCallingConventionValues(fn_info, var_args); defer call_info.deinit(self); // We need a properly aligned and sized call frame to be able to call this function. @@ -5318,16 +5325,19 @@ const CallMCValues = struct { fn resolveCallingConventionValues( self: *Self, fn_info: InternPool.Key.FuncType, + var_args: []const Type, ) !CallMCValues { const zcu = self.bin_file.comp.module.?; const ip = &zcu.intern_pool; - const param_types = try self.gpa.alloc(Type, fn_info.param_types.len); + const param_types = try self.gpa.alloc(Type, fn_info.param_types.len + var_args.len); defer self.gpa.free(param_types); for (param_types[0..fn_info.param_types.len], fn_info.param_types.get(ip)) |*dest, src| { dest.* = Type.fromInterned(src); } + for (param_types[fn_info.param_types.len..], var_args) |*param_ty, arg_ty| + param_ty.* = self.promoteVarArg(arg_ty); const cc = fn_info.cc; var result: CallMCValues = .{ @@ -5514,3 +5524,33 @@ pub fn errUnionErrorOffset(payload_ty: Type, zcu: *Module) u64 { return 0; } } + +fn promoteInt(self: *Self, ty: Type) Type { + const mod = self.bin_file.comp.module.?; + const int_info: InternPool.Key.IntType = switch (ty.toIntern()) { + .bool_type => .{ .signedness = .unsigned, .bits = 1 }, + else => if (ty.isAbiInt(mod)) ty.intInfo(mod) else return ty, + }; + for ([_]Type{ + Type.c_int, Type.c_uint, + Type.c_long, Type.c_ulong, + Type.c_longlong, Type.c_ulonglong, + }) |promote_ty| { + const promote_info = promote_ty.intInfo(mod); + if (int_info.signedness == .signed and promote_info.signedness == .unsigned) continue; + if (int_info.bits + @intFromBool(int_info.signedness == .unsigned and + promote_info.signedness == .signed) <= promote_info.bits) return promote_ty; + } + return ty; +} + +fn promoteVarArg(self: *Self, ty: Type) Type { + if (!ty.isRuntimeFloat()) return self.promoteInt(ty); + switch (ty.floatBits(self.target.*)) { + 32, 64 => return Type.f64, + else => |float_bits| { + assert(float_bits == self.target.c_type_bit_size(.longdouble)); + return Type.c_longdouble; + }, + } +} From c457f35da565e589071db9e55d0866f35cd095dc Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 28 Apr 2024 22:22:26 -0700 Subject: [PATCH 04/24] riscv: arbitrary sized arrays --- src/arch/riscv64/CodeGen.zig | 539 ++++++++++++++++++++++-------- src/arch/riscv64/abi.zig | 13 + src/arch/riscv64/bits.zig | 27 +- test/behavior/alignof.zig | 2 - test/behavior/array.zig | 4 - test/behavior/cast.zig | 3 - test/behavior/eval.zig | 1 - test/behavior/extern.zig | 2 - test/behavior/fn.zig | 2 - test/behavior/for.zig | 1 - test/behavior/generics.zig | 1 - test/behavior/maximum_minimum.zig | 1 - test/behavior/member_func.zig | 1 - test/behavior/memset.zig | 3 - test/behavior/packed-struct.zig | 1 - test/behavior/ptrcast.zig | 1 - test/behavior/ptrfromint.zig | 1 - test/behavior/return_address.zig | 1 - test/behavior/struct.zig | 10 - test/behavior/tuple.zig | 2 - test/behavior/union.zig | 5 - 21 files changed, 428 insertions(+), 193 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 592c1941d6..f2432bc57d 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -666,10 +666,11 @@ fn restoreState(self: *Self, state: State, deaths: []const Air.Inst.Index, compt if (current_maybe_inst) |current_inst| { try self.inst_tracking.getPtr(current_inst).?.trackSpill(self, current_inst); } - { + blk: { + const inst = target_maybe_inst orelse break :blk; const reg = RegisterManager.regAtTrackedIndex(@intCast(index)); self.register_manager.freeReg(reg); - self.register_manager.getRegAssumeFree(reg, target_maybe_inst); + self.register_manager.getRegAssumeFree(reg, inst); } if (target_maybe_inst) |target_inst| { self.inst_tracking.getPtr(target_inst).?.trackMaterialize( @@ -2272,13 +2273,10 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }, }); - const add_result_frame: FrameAddr = .{ - .index = offset.index, - .off = offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))), - }; - try self.genSetStack( + try self.genSetMem( + .{ .frame = offset.index }, + offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))), lhs_ty, - add_result_frame, add_result, ); @@ -2289,14 +2287,10 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .{ .register = add_result_reg }, lhs_ty, ); - - const overflow_frame: FrameAddr = .{ - .index = offset.index, - .off = offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), - }; - try self.genSetStack( + try self.genSetMem( + .{ .frame = offset.index }, + offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), Type.u1, - overflow_frame, overflow_mcv, ); @@ -2340,14 +2334,16 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const tuple_ty = self.typeOfIndex(inst); - // TODO: optimization, set this to true. needs the other struct access stuff to support - // accessing registers. - const result_mcv = try self.allocRegOrMem(inst, false); + const result_mcv = try self.allocRegOrMem(inst, true); const result_off: i32 = @intCast(tuple_ty.structFieldOffset(0, zcu)); const overflow_off: i32 = @intCast(tuple_ty.structFieldOffset(1, zcu)); - try self.genSetStack(lhs_ty, result_mcv.offset(result_off).load_frame, dest); + try self.genCopy( + lhs_ty, + result_mcv.offset(result_off), + dest, + ); if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { if (int_info.signedness == .unsigned) { @@ -2385,9 +2381,9 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { lhs_ty, ); - try self.genSetStack( + try self.genCopy( lhs_ty, - result_mcv.offset(overflow_off).load_frame, + result_mcv.offset(overflow_off), overflow_mcv, ); @@ -2668,9 +2664,9 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu)); const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu)); const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu)); - try self.genSetStack(pl_ty, .{ .index = frame_index, .off = pl_off }, .undef); + try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef); const operand = try self.resolveInst(ty_op.operand); - try self.genSetStack(err_ty, .{ .index = frame_index, .off = err_off }, operand); + try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand); break :result .{ .load_frame = .{ .index = frame_index } }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -2854,7 +2850,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { switch (array_mcv) { .register => { const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, zcu)); - try self.genSetStack(array_ty, .{ .index = frame_index }, array_mcv); + try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mcv); try self.genSetReg(Type.usize, addr_reg, .{ .lea_frame = .{ .index = frame_index } }); }, .load_frame => |frame_addr| { @@ -2891,9 +2887,48 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { + const zcu = self.bin_file.comp.module.?; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement ptr_elem_ptr for {}", .{self.target.cpu.arch}); + + const result = result: { + const elem_ptr_ty = self.typeOfIndex(inst); + const base_ptr_ty = self.typeOf(extra.lhs); + + const base_ptr_mcv = try self.resolveInst(extra.lhs); + const base_ptr_lock: ?RegisterLock = switch (base_ptr_mcv) { + .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (base_ptr_lock) |lock| self.register_manager.unlockReg(lock); + + if (elem_ptr_ty.ptrInfo(zcu).flags.vector_index != .none) { + break :result if (self.reuseOperand(inst, extra.lhs, 0, base_ptr_mcv)) + base_ptr_mcv + else + try self.copyToNewRegister(inst, base_ptr_mcv); + } + + const elem_ty = base_ptr_ty.elemType2(zcu); + const elem_abi_size = elem_ty.abiSize(zcu); + const index_ty = self.typeOf(extra.rhs); + const index_mcv = try self.resolveInst(extra.rhs); + const index_lock: ?RegisterLock = switch (index_mcv) { + .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (index_lock) |lock| self.register_manager.unlockReg(lock); + + const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_abi_size); + const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); + defer self.register_manager.unlockReg(offset_reg_lock); + + if (true) return self.fail("TODO: airPtrElemPtr", .{}); + + // TODO: something is breaking here dunno + + break :result try self.binOp(.ptr_add, base_ptr_mcv, base_ptr_ty, .{ .register = offset_reg }, base_ptr_ty); + }; return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } @@ -4563,22 +4598,18 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { .off = -dst_reg_off.off, } }, }), - .indirect => |ro| { - const src_reg = try self.copyToTmpRegister(ty, src_mcv); - - _ = try self.addInst(.{ - .tag = .pseudo, - .ops = .pseudo_store_rm, - .data = .{ .rm = .{ - .r = src_reg, - .m = .{ - .base = .{ .reg = ro.reg }, - .mod = .{ .rm = .{ .disp = ro.off, .size = self.memSize(ty) } }, - }, - } }, - }); - }, - .load_frame => |frame| return self.genSetStack(ty, frame, src_mcv), + .indirect => |reg_off| try self.genSetMem( + .{ .reg = reg_off.reg }, + reg_off.off, + ty, + src_mcv, + ), + .load_frame => |frame_addr| try self.genSetMem( + .{ .frame = frame_addr.index }, + frame_addr.off, + ty, + src_mcv, + ), .memory => return self.fail("TODO: genCopy memory", .{}), .register_pair => |dst_regs| { const src_info: ?struct { addr_reg: Register, addr_lock: RegisterLock } = switch (src_mcv) { @@ -4617,88 +4648,6 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { } } -fn genSetStack( - self: *Self, - ty: Type, - frame: FrameAddr, - src_mcv: MCValue, -) InnerError!void { - const zcu = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(ty.abiSize(zcu)); - - switch (src_mcv) { - .none => return, - .dead => unreachable, - .undef => { - if (!self.wantSafety()) return; - try self.genSetStack(ty, frame, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); - }, - .immediate, - .lea_frame, - => { - // TODO: remove this lock in favor of a copyToTmpRegister when we load 64 bit immediates with - // a register allocation. - const reg, const reg_lock = try self.allocReg(); - defer self.register_manager.unlockReg(reg_lock); - - try self.genSetReg(ty, reg, src_mcv); - - return self.genSetStack(ty, frame, .{ .register = reg }); - }, - .register => |reg| { - switch (abi_size) { - 1, 2, 4, 8 => { - _ = try self.addInst(.{ - .tag = .pseudo, - .ops = .pseudo_store_rm, - .data = .{ .rm = .{ - .r = reg, - .m = .{ - .base = .{ .frame = frame.index }, - .mod = .{ - .rm = .{ - .size = self.memSize(ty), - .disp = frame.off, - }, - }, - }, - } }, - }); - }, - else => unreachable, // register can hold a max of 8 bytes - } - }, - .register_pair => |pair| { - var part_disp: i32 = frame.off; - for (try self.splitType(ty), pair) |src_ty, src_reg| { - try self.genSetStack( - src_ty, - .{ .index = frame.index, .off = part_disp }, - .{ .register = src_reg }, - ); - part_disp += @intCast(src_ty.abiSize(zcu)); - } - }, - .load_frame, - .indirect, - .load_symbol, - => { - if (abi_size <= 8) { - const reg = try self.copyToTmpRegister(ty, src_mcv); - return self.genSetStack(ty, frame, .{ .register = reg }); - } - - try self.genInlineMemcpy( - .{ .lea_frame = frame }, - src_mcv.address(), - .{ .immediate = abi_size }, - ); - }, - .air_ref => |ref| try self.genSetStack(ty, frame, try self.resolveInst(ref)), - else => return self.fail("TODO: genSetStack {s}", .{@tagName(src_mcv)}), - } -} - fn genInlineMemcpy( self: *Self, dst_ptr: MCValue, @@ -4805,6 +4754,86 @@ fn genInlineMemcpy( }); } +fn genInlineMemset( + self: *Self, + dst_ptr: MCValue, + src_value: MCValue, + len: MCValue, +) !void { + const regs = try self.register_manager.allocRegs(3, .{null} ** 3, tp); + const locks = self.register_manager.lockRegsAssumeUnused(3, regs); + defer for (locks) |lock| self.register_manager.unlockReg(lock); + + const count = regs[0]; + const src = regs[1]; + const dst = regs[2]; + + try self.genSetReg(Type.usize, count, len); + try self.genSetReg(Type.usize, src, src_value); + try self.genSetReg(Type.usize, dst, dst_ptr); + + // sb src, 0(dst) + const first_inst = try self.addInst(.{ + .tag = .sb, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = dst, + .rs1 = src, + .imm12 = Immediate.s(0), + }, + }, + }); + + // dec count by 1 + _ = try self.addInst(.{ + .tag = .addi, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = count, + .rs1 = count, + .imm12 = Immediate.s(-1), + }, + }, + }); + + // branch if count is 0 + _ = try self.addInst(.{ + .tag = .beq, + .ops = .rr_inst, + .data = .{ + .b_type = .{ + .inst = @intCast(self.mir_instructions.len + 4), // points after the last inst + .rs1 = count, + .rs2 = .zero, + }, + }, + }); + + // increment the pointers + _ = try self.addInst(.{ + .tag = .addi, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = dst, + .rs1 = dst, + .imm12 = Immediate.s(1), + }, + }, + }); + + // jump back to start of loop + _ = try self.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_j, + .data = .{ + .inst = first_inst, + }, + }); +} + /// Sets the value of `src_mcv` into `reg`. Assumes you have a lock on it. fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError!void { const zcu = self.bin_file.comp.module.?; @@ -4965,7 +4994,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! 2 => .lh, 4 => .lw, 8 => .ld, - else => return self.fail("TODO: genSetReg for size {d}", .{abi_size}), + else => return std.debug.panic("TODO: genSetReg for size {d}", .{abi_size}), }; _ = try self.addInst(.{ @@ -4998,6 +5027,126 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! } } +fn genSetMem( + self: *Self, + base: Memory.Base, + disp: i32, + ty: Type, + src_mcv: MCValue, +) InnerError!void { + const mod = self.bin_file.comp.module.?; + const abi_size: u32 = @intCast(ty.abiSize(mod)); + const dst_ptr_mcv: MCValue = switch (base) { + .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } }, + .frame => |base_frame_index| .{ .lea_frame = .{ .index = base_frame_index, .off = disp } }, + .reloc => |base_symbol| .{ .lea_symbol = .{ .sym = base_symbol.sym_index, .off = disp } }, + }; + switch (src_mcv) { + .none, + .unreach, + .dead, + .reserved_frame, + => unreachable, + .undef => try self.genInlineMemset( + dst_ptr_mcv, + src_mcv, + .{ .immediate = abi_size }, + ), + + .register_offset, + .memory, + .indirect, + .load_frame, + .lea_frame, + .load_symbol, + .lea_symbol, + => switch (abi_size) { + 0 => {}, + 1, 2, 4, 8 => { + const src_reg = try self.copyToTmpRegister(ty, src_mcv); + const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); + defer self.register_manager.unlockReg(src_lock); + + try self.genSetMem(base, disp, ty, .{ .register = src_reg }); + }, + else => try self.genInlineMemcpy( + dst_ptr_mcv, + src_mcv.address(), + .{ .immediate = abi_size }, + ), + }, + .register => |reg| { + const mem_size = switch (base) { + .frame => |base_fi| mem_size: { + assert(disp >= 0); + const frame_abi_size = self.frame_allocs.items(.abi_size)[@intFromEnum(base_fi)]; + const frame_spill_pad = self.frame_allocs.items(.spill_pad)[@intFromEnum(base_fi)]; + assert(frame_abi_size - frame_spill_pad - disp >= abi_size); + break :mem_size if (frame_abi_size - frame_spill_pad - disp == abi_size) + frame_abi_size + else + abi_size; + }, + else => abi_size, + }; + const src_size = math.ceilPowerOfTwoAssert(u32, abi_size); + const src_align = Alignment.fromNonzeroByteUnits(math.ceilPowerOfTwoAssert(u32, src_size)); + if (src_size > mem_size) { + const frame_index = try self.allocFrameIndex(FrameAlloc.init(.{ + .size = src_size, + .alignment = src_align, + })); + const frame_mcv: MCValue = .{ .load_frame = .{ .index = frame_index } }; + _ = try self.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_store_rm, + .data = .{ + .rm = .{ .r = reg, .m = .{ + .base = .{ .frame = frame_index }, + .mod = .{ .rm = .{ + .size = Memory.Size.fromByteSize(src_size), + } }, + } }, + }, + }); + try self.genSetMem(base, disp, ty, frame_mcv); + try self.freeValue(frame_mcv); + } else _ = try self.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_store_rm, + .data = .{ .rm = .{ + .r = reg, + .m = .{ + .base = base, + .mod = .{ .rm = .{ + .size = self.memSize(ty), + .disp = disp, + } }, + }, + } }, + }); + }, + .register_pair => |src_regs| { + var part_disp: i32 = disp; + for (try self.splitType(ty), src_regs) |src_ty, src_reg| { + try self.genSetMem(base, part_disp, src_ty, .{ .register = src_reg }); + part_disp += @intCast(src_ty.abiSize(mod)); + } + }, + .immediate => { + // TODO: remove this lock in favor of a copyToTmpRegister when we load 64 bit immediates with + // a register allocation. + const reg, const reg_lock = try self.allocReg(); + defer self.register_manager.unlockReg(reg_lock); + + try self.genSetReg(ty, reg, src_mcv); + + return self.genSetMem(base, disp, ty, .{ .register = reg }); + }, + .air_ref => |src_ref| try self.genSetMem(base, disp, ty, try self.resolveInst(src_ref)), + } +} + fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result = result: { @@ -5099,13 +5248,83 @@ fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOr } fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { - _ = inst; - if (safety) { - // TODO if the value is undef, write 0xaa bytes to dest - } else { - // TODO if the value is undef, don't lower this instruction + const zcu = self.bin_file.comp.module.?; + const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + + result: { + if (!safety and (try self.resolveInst(bin_op.rhs)) == .undef) break :result; + + const dst_ptr = try self.resolveInst(bin_op.lhs); + const dst_ptr_ty = self.typeOf(bin_op.lhs); + const dst_ptr_lock: ?RegisterLock = switch (dst_ptr) { + .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (dst_ptr_lock) |lock| self.register_manager.unlockReg(lock); + + const src_val = try self.resolveInst(bin_op.rhs); + const elem_ty = self.typeOf(bin_op.rhs); + const src_val_lock: ?RegisterLock = switch (src_val) { + .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock); + + const elem_abi_size: u31 = @intCast(elem_ty.abiSize(zcu)); + + if (elem_abi_size == 1) { + const ptr: MCValue = switch (dst_ptr_ty.ptrSize(zcu)) { + // TODO: this only handles slices stored in the stack + .Slice => dst_ptr, + .One => dst_ptr, + .C, .Many => unreachable, + }; + const len: MCValue = switch (dst_ptr_ty.ptrSize(zcu)) { + // TODO: this only handles slices stored in the stack + .Slice => dst_ptr.address().offset(8).deref(), + .One => .{ .immediate = dst_ptr_ty.childType(zcu).arrayLen(zcu) }, + .C, .Many => unreachable, + }; + const len_lock: ?RegisterLock = switch (len) { + .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (len_lock) |lock| self.register_manager.unlockReg(lock); + + try self.genInlineMemset(ptr, src_val, len); + break :result; + } + + // Store the first element, and then rely on memcpy copying forwards. + // Length zero requires a runtime check - so we handle arrays specially + // here to elide it. + switch (dst_ptr_ty.ptrSize(zcu)) { + .Slice => return self.fail("TODO: airMemset Slices", .{}), + .One => { + const elem_ptr_ty = try zcu.singleMutPtrType(elem_ty); + + const len = dst_ptr_ty.childType(zcu).arrayLen(zcu); + + assert(len != 0); // prevented by Sema + try self.store(dst_ptr, src_val, elem_ptr_ty, elem_ty); + + const second_elem_ptr_reg, const second_elem_ptr_lock = try self.allocReg(); + defer self.register_manager.unlockReg(second_elem_ptr_lock); + + const second_elem_ptr_mcv: MCValue = .{ .register = second_elem_ptr_reg }; + + try self.genSetReg(Type.usize, second_elem_ptr_reg, .{ .register_offset = .{ + .reg = try self.copyToTmpRegister(Type.usize, dst_ptr), + .off = elem_abi_size, + } }); + + const bytes_to_copy: MCValue = .{ .immediate = elem_abi_size * (len - 1) }; + try self.genInlineMemcpy(second_elem_ptr_mcv, dst_ptr, bytes_to_copy); + }, + .C, .Many => unreachable, + } } - return self.fail("TODO implement airMemset for {}", .{self.target.cpu.arch}); + return self.finishAir(inst, .unreach, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { @@ -5190,32 +5409,66 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const len: usize = @intCast(result_ty.arrayLen(zcu)); const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]); + const result: MCValue = result: { switch (result_ty.zigTypeTag(zcu)) { .Struct => { const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu)); + if (result_ty.containerLayout(zcu) == .@"packed") { + const struct_obj = zcu.typeToStruct(result_ty).?; + try self.genInlineMemset( + .{ .lea_frame = .{ .index = frame_index } }, + .{ .immediate = 0 }, + .{ .immediate = result_ty.abiSize(zcu) }, + ); - if (result_ty.containerLayout(zcu) == .@"packed") {} else for (elements, 0..) |elem, elem_i| { + for (elements, 0..) |elem, elem_i_usize| { + const elem_i: u32 = @intCast(elem_i_usize); + if ((try result_ty.structFieldValueComptime(zcu, elem_i)) != null) continue; + + const elem_ty = result_ty.structFieldType(elem_i, zcu); + const elem_bit_size: u32 = @intCast(elem_ty.bitSize(zcu)); + if (elem_bit_size > 64) { + return self.fail( + "TODO airAggregateInit implement packed structs with large fields", + .{}, + ); + } + + const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu)); + const elem_abi_bits = elem_abi_size * 8; + const elem_off = zcu.structPackedFieldBitOffset(struct_obj, elem_i); + const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size); + const elem_bit_off = elem_off % elem_abi_bits; + const elem_mcv = try self.resolveInst(elem); + + _ = elem_byte_off; + _ = elem_bit_off; + + const elem_lock = switch (elem_mcv) { + .register => |reg| self.register_manager.lockReg(reg), + .immediate => |imm| lock: { + if (imm == 0) continue; + break :lock null; + }, + else => null, + }; + defer if (elem_lock) |lock| self.register_manager.unlockReg(lock); + + return self.fail("TODO: airAggregateInit packed structs", .{}); + } + } else for (elements, 0..) |elem, elem_i| { if ((try result_ty.structFieldValueComptime(zcu, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i, zcu); const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, zcu)); const elem_mcv = try self.resolveInst(elem); - - const elem_frame: FrameAddr = .{ - .index = frame_index, - .off = elem_off, - }; - try self.genSetStack( - elem_ty, - elem_frame, - elem_mcv, - ); + try self.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, elem_mcv); } + break :result .{ .load_frame = .{ .index = frame_index } }; }, - else => return self.fail("TODO: airAggregateInit {}", .{result_ty.fmt(zcu)}), + else => return self.fail("TODO: airAggregate {}", .{result_ty.fmt(zcu)}), } - break :result .{ .register = .zero }; }; if (elements.len <= Liveness.bpi - 1) { diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index 35f5659685..e9d0db7136 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -169,6 +169,19 @@ pub fn classifySystem(ty: Type, zcu: *Module) [8]Class { return memory_class; }, + .Array => { + const ty_size = ty.abiSize(zcu); + if (ty_size <= 8) { + result[0] = .integer; + return result; + } + if (ty_size <= 16) { + result[0] = .integer; + result[1] = .integer; + return result; + } + return memory_class; + }, else => |bad_ty| std.debug.panic("classifySystem {s}", .{@tagName(bad_ty)}), } } diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig index eef0828cdb..26f275a157 100644 --- a/src/arch/riscv64/bits.zig +++ b/src/arch/riscv64/bits.zig @@ -35,10 +35,10 @@ pub const Memory = struct { pub fn fromByteSize(size: u64) Size { return switch (size) { - 1 => .byte, - 2 => .hword, - 4 => .word, - 8 => .dword, + 1...1 => .byte, + 2...2 => .hword, + 3...4 => .word, + 5...8 => .dword, else => unreachable, }; } @@ -149,10 +149,8 @@ pub const Immediate = union(enum) { pub const Register = enum(u6) { // zig fmt: off - x0, x1, x2, x3, x4, x5, x6, x7, - x8, x9, x10, x11, x12, x13, x14, x15, - x16, x17, x18, x19, x20, x21, x22, x23, - x24, x25, x26, x27, x28, x29, x30, x31, + + // general purpose registers zero, // zero ra, // return address. caller saved @@ -166,6 +164,13 @@ pub const Register = enum(u6) { a2, a3, a4, a5, a6, a7, // fn args. caller saved. s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, // saved registers. callee saved. t3, t4, t5, t6, // caller saved + + x0, x1, x2, x3, x4, x5, x6, x7, + x8, x9, x10, x11, x12, x13, x14, x15, + x16, x17, x18, x19, x20, x21, x22, x23, + x24, x25, x26, x27, x28, x29, x30, x31, + + // zig fmt: on /// Returns the unique 5-bit ID of this register which is used in @@ -177,6 +182,12 @@ pub const Register = enum(u6) { pub fn dwarfLocOp(reg: Register) u8 { return @as(u8, reg.id()); } + + pub fn bitSize(reg: Register) u32 { + return switch (@intFromEnum(reg)) { + @intFromEnum(Register.zero)...@intFromEnum(Register.x31) => 64, + }; + } }; pub const FrameIndex = enum(u32) { diff --git a/test/behavior/alignof.zig b/test/behavior/alignof.zig index a3e71a254f..e08a42cf19 100644 --- a/test/behavior/alignof.zig +++ b/test/behavior/alignof.zig @@ -29,8 +29,6 @@ test "comparison of @alignOf(T) against zero" { } test "correct alignment for elements and slices of aligned array" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var buf: [1024]u8 align(64) = undefined; var start: usize = 1; var end: usize = undefined; diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 49a03c05e2..a99b10cd3b 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -231,7 +231,6 @@ test "nested arrays of integers" { test "implicit comptime in array type size" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var arr: [plusOne(10)]bool = undefined; _ = &arr; @@ -505,7 +504,6 @@ test "anonymous literal in array" { test "access the null element of a null terminated array" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -778,8 +776,6 @@ test "slicing array of zero-sized values" { } test "array init with no result pointer sets field result types" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { // A function parameter has a result type, but no result pointer. fn f(arr: [1]u32) u32 { diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 858a14cea1..208f0a9c63 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -1695,7 +1695,6 @@ test "peer type resolution: const sentinel slice and mutable non-sentinel slice" if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(comptime T: type, comptime s: T) !void { @@ -1866,7 +1865,6 @@ test "peer type resolution: C pointer and @TypeOf(null)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [*c]c_int = 0x1000; _ = &a; @@ -2240,7 +2238,6 @@ test "peer type resolution: C pointer and many pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf = "hello".*; diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index d08743ff3d..945870ff24 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -73,7 +73,6 @@ fn constExprEvalOnSingleExprBlocksFn(x: i32, b: bool) i32 { test "constant expressions" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var array: [array_size]u8 = undefined; _ = &array; diff --git a/test/behavior/extern.zig b/test/behavior/extern.zig index 135f5e5648..9469b4dc21 100644 --- a/test/behavior/extern.zig +++ b/test/behavior/extern.zig @@ -20,7 +20,6 @@ test "function extern symbol" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = @extern(*const fn () callconv(.C) i32, .{ .name = "a_mystery_function" }); try expect(a() == 4567); @@ -34,7 +33,6 @@ test "function extern symbol matches extern decl" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { extern fn another_mystery_function() u32; diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index b6eafeefc1..befc5d509b 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -414,8 +414,6 @@ test "ability to give comptime types and non comptime types to same parameter" { } test "function with inferred error set but returning no error" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn foo() !void {} }; diff --git a/test/behavior/for.zig b/test/behavior/for.zig index 1eac03ec79..29fea7fe97 100644 --- a/test/behavior/for.zig +++ b/test/behavior/for.zig @@ -487,7 +487,6 @@ test "ref counter that starts at zero" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; for ([_]usize{ 0, 1, 2 }, 0..) |i, j| { try expectEqual(i, j); diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index 7ed75f0ead..efe568191c 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -117,7 +117,6 @@ test "function with return type type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var list: List(i32) = undefined; var list2: List(i32) = undefined; diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig index d08bc82828..5543201e74 100644 --- a/test/behavior/maximum_minimum.zig +++ b/test/behavior/maximum_minimum.zig @@ -300,7 +300,6 @@ test "@min/@max notices bounds from vector types when element of comptime-known if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .avx)) return error.SkipZigTest; diff --git a/test/behavior/member_func.zig b/test/behavior/member_func.zig index 1563ad7a4a..36fd7f3cdd 100644 --- a/test/behavior/member_func.zig +++ b/test/behavior/member_func.zig @@ -76,7 +76,6 @@ test "@field field calls" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(@field(HasFuncs, "one")(0) == 1); try expect(@field(HasFuncs, "two")(0) == 2); diff --git a/test/behavior/memset.zig b/test/behavior/memset.zig index 185c6fafe1..4db69b8fce 100644 --- a/test/behavior/memset.zig +++ b/test/behavior/memset.zig @@ -73,7 +73,6 @@ test "memset with bool element" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [5]bool = undefined; @memset(&buf, true); @@ -86,7 +85,6 @@ test "memset with 1-byte struct element" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: bool }; var buf: [5]S = undefined; @@ -100,7 +98,6 @@ test "memset with 1-byte array element" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = [1]bool; var buf: [5]A = undefined; diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index 89289d6063..dd4086912b 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -435,7 +435,6 @@ test "nested packed struct field pointers" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // ubsan unaligned pointer access - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet const S2 = packed struct { diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig index fc8a8b7482..b5a628e197 100644 --- a/test/behavior/ptrcast.zig +++ b/test/behavior/ptrcast.zig @@ -58,7 +58,6 @@ fn testReinterpretStructWrappedBytesAsInteger() !void { test "reinterpret bytes of an array into an extern struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testReinterpretBytesAsExternStruct(); try comptime testReinterpretBytesAsExternStruct(); diff --git a/test/behavior/ptrfromint.zig b/test/behavior/ptrfromint.zig index 5e4c6175c3..cc5edade80 100644 --- a/test/behavior/ptrfromint.zig +++ b/test/behavior/ptrfromint.zig @@ -47,7 +47,6 @@ test "@ptrFromInt creates allowzero zero pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const ptr = @as(*allowzero u32, @ptrFromInt(0)); try expectEqual(@as(usize, 0), @intFromPtr(ptr)); diff --git a/test/behavior/return_address.zig b/test/behavior/return_address.zig index 675e0e6191..3e8c18c04a 100644 --- a/test/behavior/return_address.zig +++ b/test/behavior/return_address.zig @@ -10,7 +10,6 @@ test "return address" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; _ = retAddr(); // TODO: #14938 diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 602be7e95e..9693201d22 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -111,7 +111,6 @@ fn testMutation(foo: *StructFoo) void { test "struct byval assign" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo1: StructFoo = undefined; var foo2: StructFoo = undefined; @@ -300,7 +299,6 @@ const Val = struct { test "struct point to self" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var root: Node = undefined; root.val.x = 1; @@ -1023,7 +1021,6 @@ test "struct with 0-length union array field" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = union { a: u32, @@ -1732,7 +1729,6 @@ test "extern struct field pointer has correct alignment" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1863,8 +1859,6 @@ test "comptimeness of optional and error union payload is analyzed properly" { } test "initializer uses own alignment" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { x: u32 = @alignOf(@This()) + 1, }; @@ -1876,8 +1870,6 @@ test "initializer uses own alignment" { } test "initializer uses own size" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { x: u32 = @sizeOf(@This()) + 1, }; @@ -1889,8 +1881,6 @@ test "initializer uses own size" { } test "initializer takes a pointer to a variable inside its struct" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const namespace = struct { const S = struct { s: *S = &S.instance, diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig index 736bbad806..8c683f2cac 100644 --- a/test/behavior/tuple.zig +++ b/test/behavior/tuple.zig @@ -579,8 +579,6 @@ test "comptime fields in tuple can be initialized" { } test "tuple default values" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const T = struct { usize, usize = 123, diff --git a/test/behavior/union.zig b/test/behavior/union.zig index 004774bd17..a6a452bd09 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -1744,7 +1744,6 @@ test "union with 128 bit integer" { test "memset extern union" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = extern union { foo: u8, @@ -1766,7 +1765,6 @@ test "memset extern union" { test "memset packed union" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = packed union { a: u32, @@ -1977,8 +1975,6 @@ test "reinterpret packed union inside packed struct" { } test "inner struct initializer uses union layout" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const namespace = struct { const U = union { a: struct { @@ -2004,7 +2000,6 @@ test "inner struct initializer uses union layout" { test "inner struct initializer uses packed union layout" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const namespace = struct { const U = packed union { From 55b28c7e4438b3b8404a0fd703aad45db9dfe2ff Mon Sep 17 00:00:00 2001 From: David Rubin Date: Thu, 2 May 2024 02:07:28 -0700 Subject: [PATCH 05/24] riscv: PRO member function calls this is enough progress for us to be able to call `stdout.write`! --- src/arch/riscv64/CodeGen.zig | 363 ++++++++++++++++++++++++++++------- src/arch/riscv64/abi.zig | 7 +- 2 files changed, 291 insertions(+), 79 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index f2432bc57d..3aa01c3b1c 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -932,6 +932,25 @@ fn gen(self: *Self) !void { const backpatch_fp_add = try self.addPseudo(.pseudo_dead); const backpatch_spill_callee_preserved_regs = try self.addPseudo(.pseudo_dead); + switch (self.ret_mcv.long) { + .none, .unreach => {}, + .indirect => { + // The address where to store the return value for the caller is in a + // register which the callee is free to clobber. Therefore, we purposely + // spill it to stack immediately. + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(Type.usize, mod)); + try self.genSetMem( + .{ .frame = frame_index }, + 0, + Type.usize, + self.ret_mcv.long.address().offset(-self.ret_mcv.short.indirect.off), + ); + self.ret_mcv.long = .{ .load_frame = .{ .index = frame_index } }; + tracking_log.debug("spill {} to {}", .{ self.ret_mcv.long, frame_index }); + }, + else => unreachable, + } + try self.genBody(self.air.getMainBody()); for (self.exitlude_jump_relocs.items) |jmp_reloc| { @@ -1306,12 +1325,13 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { var it = self.register_manager.free_registers.iterator(.{ .kind = .unset }); while (it.next()) |index| { const tracked_inst = self.register_manager.registers[index]; + tracking_log.debug("tracked inst: {}", .{tracked_inst}); const tracking = self.getResolvedInstValue(tracked_inst); for (tracking.getRegs()) |reg| { if (RegisterManager.indexOfRegIntoTracked(reg).? == index) break; - } else return self.fail( - \\%{} takes up these regs: {any}, however those regs don't use it - , .{ index, tracking.getRegs() }); + } else return std.debug.panic( + \\%{} takes up these regs: {any}, however these regs {any}, don't use it + , .{ tracked_inst, tracking.getRegs(), RegisterManager.regAtTrackedIndex(@intCast(index)) }); } } } @@ -1540,7 +1560,7 @@ fn symbolIndex(self: *Self) !u32 { const atom_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, decl_index); break :blk atom_index; }, - else => return self.fail("TODO genSetReg load_symbol for {s}", .{@tagName(self.bin_file.tag)}), + else => return self.fail("TODO symbolIndex {s}", .{@tagName(self.bin_file.tag)}), }; } @@ -1961,7 +1981,7 @@ fn binOp( switch (lhs_ty.zigTypeTag(zcu)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), - .Int, .Enum => { + .Int, .Enum, .ErrorSet => { assert(lhs_ty.eql(rhs_ty, zcu)); const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 64) { @@ -2304,8 +2324,127 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { - _ = inst; - return self.fail("TODO implement airSubWithOverflow for {}", .{self.target.cpu.arch}); + const zcu = self.bin_file.comp.module.?; + const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { + const lhs = try self.resolveInst(extra.lhs); + const rhs = try self.resolveInst(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); + + const int_info = lhs_ty.intInfo(zcu); + + if (!math.isPowerOfTwo(int_info.bits) or !(int_info.bits >= 8)) { + return self.fail("TODO: airSubWithOverflow non-power of 2 and less than 8 bits", .{}); + } + + const tuple_ty = self.typeOfIndex(inst); + const result_mcv = try self.allocRegOrMem(inst, false); + const offset = result_mcv.load_frame; + + const lhs_reg, const lhs_lock = blk: { + if (lhs == .register) break :blk .{ lhs.register, null }; + + const lhs_reg, const lhs_lock = try self.allocReg(); + try self.genSetReg(lhs_ty, lhs_reg, lhs); + break :blk .{ lhs_reg, lhs_lock }; + }; + defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); + + const rhs_reg, const rhs_lock = blk: { + if (rhs == .register) break :blk .{ rhs.register, null }; + + const rhs_reg, const rhs_lock = try self.allocReg(); + try self.genSetReg(rhs_ty, rhs_reg, rhs); + break :blk .{ rhs_reg, rhs_lock }; + }; + defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); + + const dest_reg, const dest_lock = try self.allocReg(); + defer self.register_manager.unlockReg(dest_lock); + + switch (int_info.signedness) { + .unsigned => return self.fail("TODO: airSubWithOverflow unsigned", .{}), + .signed => { + switch (int_info.bits) { + 64 => { + // result + _ = try self.addInst(.{ + .tag = .sub, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = dest_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + } }, + }); + + try self.genSetMem( + .{ .frame = offset.index }, + offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))), + lhs_ty, + .{ .register = dest_reg }, + ); + + // overflow check + const overflow_reg = try self.copyToTmpRegister(Type.usize, .{ .immediate = 0 }); + + _ = try self.addInst(.{ + .tag = .slt, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = overflow_reg, + .rs1 = overflow_reg, + .rs2 = rhs_reg, + } }, + }); + + _ = try self.addInst(.{ + .tag = .slt, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = rhs_reg, + .rs1 = rhs_reg, + .rs2 = lhs_reg, + } }, + }); + + _ = try self.addInst(.{ + .tag = .xor, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = lhs_reg, + .rs1 = overflow_reg, + .rs2 = rhs_reg, + } }, + }); + + const overflow_mcv = try self.binOp( + .cmp_neq, + .{ .register = overflow_reg }, + Type.usize, + .{ .register = rhs_reg }, + Type.usize, + ); + + try self.genSetMem( + .{ .frame = offset.index }, + offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), + Type.u1, + overflow_mcv, + ); + + break :result result_mcv; + }, + else => |int_bits| return self.fail("TODO: airSubWithOverflow signed {}", .{int_bits}), + } + }, + } + }; + + return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { @@ -2644,8 +2783,25 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const zcu = self.bin_file.comp.module.?; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement wrap errunion payload for {}", .{self.target.cpu.arch}); + + const eu_ty = ty_op.ty.toType(); + const pl_ty = eu_ty.errorUnionPayload(zcu); + const err_ty = eu_ty.errorUnionSet(zcu); + const operand = try self.resolveInst(ty_op.operand); + + const result: MCValue = result: { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .{ .immediate = 0 }; + + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu)); + const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu)); + const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu)); + try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand); + try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }); + break :result .{ .load_frame = .{ .index = frame_index } }; + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } @@ -3361,8 +3517,6 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { .rs1 = dst_reg, } }, }); - - return self.fail("TODO: airStructFieldVal register with field_off > 0", .{}); } break :result if (field_off == 0) dst_mcv else try self.copyToNewRegister(inst, dst_mcv); @@ -3444,7 +3598,6 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void { } fn airArg(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; var arg_index = self.arg_index; // we skip over args that have no bits @@ -3453,31 +3606,10 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const src_mcv = self.args[arg_index]; - const arg_ty = self.typeOfIndex(inst); - const dst_mcv = switch (src_mcv) { - .register => dst: { - const frame = try self.allocFrameIndex(FrameAlloc.init(.{ - .size = Type.usize.abiSize(zcu), - .alignment = Type.usize.abiAlignment(zcu), - })); - const dst_mcv: MCValue = .{ .load_frame = .{ .index = frame } }; - try self.genCopy(Type.usize, dst_mcv, src_mcv); - break :dst dst_mcv; - }, - .register_pair => dst: { - const frame = try self.allocFrameIndex(FrameAlloc.init(.{ - .size = Type.usize.abiSize(zcu) * 2, - .alignment = Type.usize.abiAlignment(zcu), - })); - const dst_mcv: MCValue = .{ .load_frame = .{ .index = frame } }; - try self.genCopy(arg_ty, dst_mcv, src_mcv); - break :dst dst_mcv; - }, - .load_frame => src_mcv, - else => return self.fail("TODO: airArg {s}", .{@tagName(src_mcv)}), - }; + const dst_mcv = try self.allocRegOrMem(inst, false); + try self.genCopy(arg_ty, dst_mcv, src_mcv); try self.genArgDbgInfo(inst, src_mcv); break :result dst_mcv; @@ -3612,7 +3744,68 @@ fn genCall( stack_frame_align.* = stack_frame_align.max(needed_call_frame.abi_align); } - for (call_info.args, 0..) |mc_arg, arg_i| try self.genCopy(arg_tys[arg_i], mc_arg, args[arg_i]); + var reg_locks = std.ArrayList(?RegisterLock).init(allocator); + defer reg_locks.deinit(); + try reg_locks.ensureTotalCapacity(8); + defer for (reg_locks.items) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock); + + const frame_indices = try allocator.alloc(FrameIndex, args.len); + defer allocator.free(frame_indices); + + switch (call_info.return_value.long) { + .none, .unreach => {}, + .indirect => |reg_off| try self.register_manager.getReg(reg_off.reg, null), + else => unreachable, + } + for (call_info.args, args, arg_tys, frame_indices) |dst_arg, src_arg, arg_ty, *frame_index| { + switch (dst_arg) { + .none => {}, + .register => |reg| { + try self.register_manager.getReg(reg, null); + try reg_locks.append(self.register_manager.lockReg(reg)); + }, + .register_pair => |regs| { + for (regs) |reg| try self.register_manager.getReg(reg, null); + try reg_locks.appendSlice(&self.register_manager.lockRegs(2, regs)); + }, + .indirect => |reg_off| { + frame_index.* = try self.allocFrameIndex(FrameAlloc.initType(arg_ty, zcu)); + try self.genSetMem(.{ .frame = frame_index.* }, 0, arg_ty, src_arg); + try self.register_manager.getReg(reg_off.reg, null); + try reg_locks.append(self.register_manager.lockReg(reg_off.reg)); + }, + else => return self.fail("TODO: genCall set arg {s}", .{@tagName(dst_arg)}), + } + } + + switch (call_info.return_value.long) { + .none, .unreach => {}, + .indirect => |reg_off| { + const ret_ty = Type.fromInterned(fn_info.return_type); + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ret_ty, zcu)); + try self.genSetReg(Type.usize, reg_off.reg, .{ + .lea_frame = .{ .index = frame_index, .off = -reg_off.off }, + }); + call_info.return_value.short = .{ .load_frame = .{ .index = frame_index } }; + try reg_locks.append(self.register_manager.lockReg(reg_off.reg)); + }, + else => unreachable, + } + + for (call_info.args, arg_tys, args, frame_indices) |dst_arg, arg_ty, src_arg, frame_index| { + switch (dst_arg) { + .register_pair => try self.genCopy(arg_ty, dst_arg, src_arg), + .register => |dst_reg| try self.genSetReg( + arg_ty, + dst_reg, + src_arg, + ), + .indirect => |reg_off| try self.genSetReg(Type.usize, reg_off.reg, .{ + .lea_frame = .{ .index = frame_index, .off = -reg_off.off }, + }), + else => return self.fail("TODO: genCall actual set {s}", .{@tagName(dst_arg)}), + } + } // Due to incremental compilation, how function calls are generated depends // on linking. @@ -3715,9 +3908,10 @@ fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void { defer self.register_manager.unlockReg(lock); try self.genSetReg(Type.usize, reg_off.reg, self.ret_mcv.long); - try self.genCopy( + try self.genSetMem( + .{ .reg = reg_off.reg }, + reg_off.off, ret_ty, - .{ .register_offset = reg_off }, .{ .air_ref = un_op }, ); }, @@ -3745,6 +3939,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { switch (self.ret_mcv.short) { .none => {}, .register, .register_pair => try self.load(self.ret_mcv.short, ptr, ptr_ty), + .indirect => |reg_off| try self.genSetReg(ptr_ty, reg_off.reg, ptr), else => unreachable, } self.ret_mcv.liveOut(self, inst); @@ -4058,7 +4253,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) _ = maybe_inst; - const err_off = errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), zcu); + const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), zcu)); switch (eu_mcv) { .register => |reg| { @@ -4081,15 +4276,25 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) ); } - return_mcv = try self.binOp( + return try self.binOp( .cmp_neq, return_mcv, Type.u16, .{ .immediate = 0 }, Type.u16, ); - - return return_mcv; + }, + .load_frame => |frame_addr| { + return self.binOp( + .cmp_neq, + .{ .load_frame = .{ + .index = frame_addr.index, + .off = frame_addr.off + err_off, + } }, + Type.anyerror, + .{ .immediate = 0 }, + Type.anyerror, + ); }, else => return self.fail("TODO implement isErr for {}", .{eu_mcv}), } @@ -4839,7 +5044,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! const zcu = self.bin_file.comp.module.?; const abi_size: u32 = @intCast(ty.abiSize(zcu)); - if (abi_size > 8) return self.fail("tried to set reg with size {}", .{abi_size}); + if (abi_size > 8) return std.debug.panic("tried to set reg with size {}", .{abi_size}); switch (src_mcv) { .dead => unreachable, @@ -4924,7 +5129,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! if (src_reg.id() == reg.id()) return; - // mov reg, src_reg + // mv reg, src_reg _ = try self.addInst(.{ .tag = .pseudo, .ops = .pseudo_mv, @@ -4934,20 +5139,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! } }, }); }, - .register_pair => |pair| try self.genSetReg(ty, reg, .{ .register = pair[0] }), - .memory => |addr| { - try self.genSetReg(ty, reg, .{ .immediate = addr }); - - _ = try self.addInst(.{ - .tag = .ld, - .ops = .rri, - .data = .{ .i_type = .{ - .rd = reg, - .rs1 = reg, - .imm12 = Immediate.s(0), - } }, - }); - }, + .register_pair => return self.fail("genSetReg should we allow reg -> reg_pair?", .{}), .load_frame => |frame| { _ = try self.addInst(.{ .tag = .pseudo, @@ -4966,28 +5158,49 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! } }, }); }, - .lea_frame => |frame| { + .memory => |addr| { + try self.genSetReg(ty, reg, .{ .immediate = addr }); + + _ = try self.addInst(.{ + .tag = .ld, + .ops = .rri, + .data = .{ .i_type = .{ + .rd = reg, + .rs1 = reg, + .imm12 = Immediate.s(0), + } }, + }); + }, + .lea_frame, .register_offset => { _ = try self.addInst(.{ .tag = .pseudo, .ops = .pseudo_lea_rm, .data = .{ .rm = .{ .r = reg, - .m = .{ - .base = .{ .frame = frame.index }, - .mod = .{ - .rm = .{ - .size = self.memSize(ty), - .disp = frame.off, + .m = switch (src_mcv) { + .register_offset => |reg_off| .{ + .base = .{ .reg = reg_off.reg }, + .mod = .{ + .rm = .{ + .size = self.memSize(ty), + .disp = reg_off.off, + }, }, }, + .lea_frame => |frame| .{ + .base = .{ .frame = frame.index }, + .mod = .{ + .rm = .{ + .size = self.memSize(ty), + .disp = frame.off, + }, + }, + }, + else => unreachable, }, } }, }); }, - .load_symbol => { - try self.genSetReg(ty, reg, src_mcv.address()); - try self.genSetReg(ty, reg, .{ .indirect = .{ .reg = reg } }); - }, .indirect => |reg_off| { const load_tag: Mir.Inst.Tag = switch (abi_size) { 1 => .lb, @@ -5022,6 +5235,10 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! }) }, }); }, + .load_symbol => { + try self.genSetReg(ty, reg, src_mcv.address()); + try self.genSetReg(ty, reg, .{ .indirect = .{ .reg = reg } }); + }, .air_ref => |ref| try self.genSetReg(ty, reg, try self.resolveInst(ref)), else => return self.fail("TODO: genSetReg {s}", .{@tagName(src_mcv)}), } @@ -5052,7 +5269,6 @@ fn genSetMem( src_mcv, .{ .immediate = abi_size }, ), - .register_offset, .memory, .indirect, @@ -5100,14 +5316,15 @@ fn genSetMem( _ = try self.addInst(.{ .tag = .pseudo, .ops = .pseudo_store_rm, - .data = .{ - .rm = .{ .r = reg, .m = .{ + .data = .{ .rm = .{ + .r = reg, + .m = .{ .base = .{ .frame = frame_index }, .mod = .{ .rm = .{ .size = Memory.Size.fromByteSize(src_size), } }, - } }, - }, + }, + } }, }); try self.genSetMem(base, disp, ty, frame_mcv); try self.freeValue(frame_mcv); diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index e9d0db7136..41edd60c67 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -149,12 +149,7 @@ pub fn classifySystem(ty: Type, zcu: *Module) [8]Class { // anyerror!void can fit into one register if (payload_bits == 0) return result; - if (payload_bits <= 64) { - result[1] = .integer; - return result; - } - - std.debug.panic("TODO: classifySystem ErrorUnion > 64 bit payload", .{}); + return memory_class; }, .Struct => { const layout = ty.containerLayout(zcu); From 05de6c279bb4b36bd8751659984a9918624f7108 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Thu, 2 May 2024 03:33:56 -0700 Subject: [PATCH 06/24] riscv: `std.fmt.format` running - implements `airSlice`, `airBitAnd`, `airBitOr`, `airShr`. - got a basic design going for the `airErrorName` but for some reason it simply returns empty bytes. will investigate further. - only generating `.got.zig` entries when not compiling an object or shared library - reduced the total amount of ops a mnemonic can have to 3, simplifying the logic --- src/arch/riscv64/CodeGen.zig | 207 ++++++++++++++++-- src/arch/riscv64/Emit.zig | 8 +- src/arch/riscv64/Encoding.zig | 4 + src/arch/riscv64/Lower.zig | 4 +- src/arch/riscv64/bits.zig | 7 + src/arch/riscv64/encoder.zig | 3 +- test/behavior/align.zig | 3 - test/behavior/array.zig | 1 - test/behavior/basic.zig | 6 - test/behavior/bitcast.zig | 5 - test/behavior/call.zig | 6 - test/behavior/cast.zig | 7 - test/behavior/comptime_memory.zig | 4 - test/behavior/defer.zig | 1 - test/behavior/empty_union.zig | 4 - test/behavior/enum.zig | 2 - test/behavior/error.zig | 22 -- test/behavior/eval.zig | 5 - test/behavior/fn.zig | 11 - test/behavior/fn_delegation.zig | 1 - test/behavior/for.zig | 2 - test/behavior/generics.zig | 6 - test/behavior/incomplete_struct_param_tld.zig | 1 - test/behavior/inline_switch.zig | 1 - test/behavior/ir_block_deps.zig | 1 - test/behavior/math.zig | 2 - test/behavior/maximum_minimum.zig | 1 + test/behavior/member_func.zig | 1 - test/behavior/memset.zig | 2 - test/behavior/merge_error_sets.zig | 1 - test/behavior/optional.zig | 2 - test/behavior/packed-struct.zig | 3 +- test/behavior/ptrfromint.zig | 3 - test/behavior/sizeof_and_typeof.zig | 2 - test/behavior/slice.zig | 4 - test/behavior/struct.zig | 25 --- test/behavior/switch.zig | 2 - test/behavior/try.zig | 1 - test/behavior/tuple.zig | 6 - test/behavior/type.zig | 1 - test/behavior/underscore.zig | 1 - test/behavior/union.zig | 1 - test/behavior/var_args.zig | 10 - test/behavior/while.zig | 3 - 44 files changed, 216 insertions(+), 177 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 3aa01c3b1c..13652a7bca 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1609,10 +1609,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { }; if (reg_ok) { - // Make sure the type can fit in a register before we try to allocate one. - const ptr_bits = self.target.ptrBitWidth(); - const ptr_bytes: u64 = @divExact(ptr_bits, 8); - if (abi_size <= ptr_bytes) { + if (abi_size <= 8) { if (self.register_manager.tryAllocReg(inst, gp)) |reg| { return .{ .register = reg }; } @@ -1625,7 +1622,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { /// Allocates a register from the general purpose set and returns the Register and the Lock. /// -/// Up to the user to unlock the register later. +/// Up to the caller to unlock the register later. fn allocReg(self: *Self) !struct { Register, RegisterLock } { const reg = try self.register_manager.allocReg(null, gp); const lock = self.register_manager.lockRegAssumeUnused(reg); @@ -1923,9 +1920,25 @@ fn airMinMax( } fn airSlice(self: *Self, inst: Air.Inst.Index) !void { + const zcu = self.bin_file.comp.module.?; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement slice for {}", .{self.target.cpu.arch}); + + const slice_ty = self.typeOfIndex(inst); + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, zcu)); + + const ptr_ty = self.typeOf(bin_op.lhs); + try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, .{ .air_ref = bin_op.lhs }); + + const len_ty = self.typeOf(bin_op.rhs); + try self.genSetMem( + .{ .frame = frame_index }, + @intCast(ptr_ty.abiSize(zcu)), + len_ty, + .{ .air_ref = bin_op.rhs }, + ); + + const result = MCValue{ .load_frame = .{ .index = frame_index } }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -2575,13 +2588,91 @@ fn airMod(self: *Self, inst: Air.Inst.Index) !void { fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement bitwise and for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); + + const lhs_reg, const lhs_lock = blk: { + if (lhs == .register) break :blk .{ lhs.register, null }; + + const lhs_reg, const lhs_lock = try self.allocReg(); + try self.genSetReg(lhs_ty, lhs_reg, lhs); + break :blk .{ lhs_reg, lhs_lock }; + }; + defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); + + const rhs_reg, const rhs_lock = blk: { + if (rhs == .register) break :blk .{ rhs.register, null }; + + const rhs_reg, const rhs_lock = try self.allocReg(); + try self.genSetReg(rhs_ty, rhs_reg, rhs); + break :blk .{ rhs_reg, rhs_lock }; + }; + defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); + + const dest_reg, const dest_lock = try self.allocReg(); + defer self.register_manager.unlockReg(dest_lock); + + _ = try self.addInst(.{ + .tag = .@"and", + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = dest_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + } }, + }); + + break :result .{ .register = dest_reg }; + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airBitOr(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement bitwise or for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); + + const lhs_reg, const lhs_lock = blk: { + if (lhs == .register) break :blk .{ lhs.register, null }; + + const lhs_reg, const lhs_lock = try self.allocReg(); + try self.genSetReg(lhs_ty, lhs_reg, lhs); + break :blk .{ lhs_reg, lhs_lock }; + }; + defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); + + const rhs_reg, const rhs_lock = blk: { + if (rhs == .register) break :blk .{ rhs.register, null }; + + const rhs_reg, const rhs_lock = try self.allocReg(); + try self.genSetReg(rhs_ty, rhs_reg, rhs); + break :blk .{ rhs_reg, rhs_lock }; + }; + defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); + + const dest_reg, const dest_lock = try self.allocReg(); + defer self.register_manager.unlockReg(dest_lock); + + _ = try self.addInst(.{ + .tag = .@"or", + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = dest_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + } }, + }); + + break :result .{ .register = dest_reg }; + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -2612,7 +2703,14 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { fn airShr(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement shr for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); + + break :result try self.binOp(.shr, lhs, lhs_ty, rhs, rhs_ty); + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -2671,6 +2769,10 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { } break :result result; }, + .load_frame => |frame_addr| break :result .{ .load_frame = .{ + .index = frame_addr.index, + .off = frame_addr.off + @as(i32, @intCast(err_off)), + } }, else => return self.fail("TODO implement unwrap_err_err for {}", .{operand}), } }; @@ -3317,6 +3419,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const zcu = self.bin_file.comp.module.?; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const elem_ty = self.typeOfIndex(inst); + const result: MCValue = result: { if (!elem_ty.hasRuntimeBits(zcu)) break :result .none; @@ -3326,8 +3429,11 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst) and !is_volatile) break :result .unreach; + const elem_size = elem_ty.abiSize(zcu); + const dst_mcv: MCValue = blk: { - if (self.reuseOperand(inst, ty_op.operand, 0, ptr)) { + // Pointer is 8 bytes, and if the element is more than that, we cannot reuse it. + if (elem_size <= 8 and self.reuseOperand(inst, ty_op.operand, 0, ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk ptr; } else { @@ -3794,6 +3900,7 @@ fn genCall( for (call_info.args, arg_tys, args, frame_indices) |dst_arg, arg_ty, src_arg, frame_index| { switch (dst_arg) { + .none, .load_frame => {}, .register_pair => try self.genCopy(arg_ty, dst_arg, src_arg), .register => |dst_reg| try self.genSetReg( arg_ty, @@ -5573,6 +5680,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { const addr_reg, const addr_lock = try self.allocReg(); defer self.register_manager.unlockReg(addr_lock); + // this is now the base address of the error name table const lazy_sym = link.File.LazySymbol.initDecl(.const_data, null, zcu); if (self.bin_file.cast(link.File.Elf)) |elf_file| { const sym_index = elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, lazy_sym) catch |err| @@ -5589,10 +5697,77 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { const end_reg, const end_lock = try self.allocReg(); defer self.register_manager.unlockReg(end_lock); - _ = start_reg; - _ = end_reg; + // const tmp_reg, const tmp_lock = try self.allocReg(); + // defer self.register_manager.unlockReg(tmp_lock); - return self.fail("TODO: airErrorName", .{}); + // we move the base address forward by the following formula: base + (errno * 8) + + // shifting left by 4 is the same as multiplying by 8 + _ = try self.addInst(.{ + .tag = .slli, + .ops = .rri, + .data = .{ .i_type = .{ + .imm12 = Immediate.s(4), + .rd = err_reg, + .rs1 = err_reg, + } }, + }); + + _ = try self.addInst(.{ + .tag = .add, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = addr_reg, + .rs1 = addr_reg, + .rs2 = err_reg, + } }, + }); + + _ = try self.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_load_rm, + .data = .{ + .rm = .{ + .r = start_reg, + .m = .{ + .base = .{ .reg = addr_reg }, + .mod = .{ .off = 0 }, + }, + }, + }, + }); + + _ = try self.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_load_rm, + .data = .{ + .rm = .{ + .r = end_reg, + .m = .{ + .base = .{ .reg = addr_reg }, + .mod = .{ .off = 8 }, + }, + }, + }, + }); + + const dst_mcv = try self.allocRegOrMem(inst, false); + const frame = dst_mcv.load_frame; + try self.genSetMem( + .{ .frame = frame.index }, + frame.off, + Type.usize, + .{ .register = start_reg }, + ); + + try self.genSetMem( + .{ .frame = frame.index }, + frame.off + 8, + Type.usize, + .{ .register = end_reg }, + ); + + return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); } fn airSplat(self: *Self, inst: Air.Inst.Index) !void { @@ -5881,7 +6056,11 @@ fn resolveCallingConventionValues( } for (param_types, result.args) |ty, *arg| { - assert(ty.hasRuntimeBitsIgnoreComptime(zcu)); + if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) { + assert(cc == .Unspecified); + arg.* = .none; + continue; + } var arg_mcv: [2]MCValue = undefined; var arg_mcv_i: usize = 0; diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 6f136e7fb4..8107c6350f 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -42,6 +42,12 @@ pub fn emitMir(emit: *Emit) Error!void { .enc = std.meta.activeTag(lowered_inst.encoding.data), }), .load_symbol_reloc => |symbol| { + const is_obj_or_static_lib = switch (emit.lower.output_mode) { + .Exe => false, + .Obj => true, + .Lib => emit.lower.link_mode == .static, + }; + if (emit.lower.bin_file.cast(link.File.Elf)) |elf_file| { const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?; const sym_index = elf_file.zigObjectPtr().?.symbol(symbol.sym_index); @@ -50,7 +56,7 @@ pub fn emitMir(emit: *Emit) Error!void { var hi_r_type: u32 = @intFromEnum(std.elf.R_RISCV.HI20); var lo_r_type: u32 = @intFromEnum(std.elf.R_RISCV.LO12_I); - if (sym.flags.needs_zig_got) { + if (sym.flags.needs_zig_got and !is_obj_or_static_lib) { _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); hi_r_type = Elf.R_ZIG_GOT_HI20; diff --git a/src/arch/riscv64/Encoding.zig b/src/arch/riscv64/Encoding.zig index 3a0ef90584..46b6735084 100644 --- a/src/arch/riscv64/Encoding.zig +++ b/src/arch/riscv64/Encoding.zig @@ -16,6 +16,7 @@ pub const Mnemonic = enum { slli, srli, srai, + sllw, addi, jalr, @@ -77,6 +78,8 @@ pub const Mnemonic = enum { .slli => .{ .opcode = 0b0010011, .funct3 = 0b001, .funct7 = null }, .srli => .{ .opcode = 0b0010011, .funct3 = 0b101, .funct7 = null }, .srai => .{ .opcode = 0b0010011, .funct3 = 0b101, .funct7 = null, .offset = 1 << 10 }, + + .sllw => .{ .opcode = 0b0111011, .funct3 = 0b001, .funct7 = 0b0000000 }, .lui => .{ .opcode = 0b0110111, .funct3 = null, .funct7 = null }, .auipc => .{ .opcode = 0b0010111, .funct3 = null, .funct7 = null }, @@ -152,6 +155,7 @@ pub const InstEnc = enum { .slt, .sltu, + .sllw, .mul, .xor, .add, diff --git a/src/arch/riscv64/Lower.zig b/src/arch/riscv64/Lower.zig index dba902eaa4..d44c614b06 100644 --- a/src/arch/riscv64/Lower.zig +++ b/src/arch/riscv64/Lower.zig @@ -71,7 +71,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { switch (inst.ops) { .pseudo_load_rm => { - const tag: Encoding.Mnemonic = switch (rm.m.mod.rm.size) { + const tag: Encoding.Mnemonic = switch (rm.m.mod.size()) { .byte => .lb, .hword => .lh, .word => .lw, @@ -85,7 +85,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }); }, .pseudo_store_rm => { - const tag: Encoding.Mnemonic = switch (rm.m.mod.rm.size) { + const tag: Encoding.Mnemonic = switch (rm.m.mod.size()) { .byte => .sb, .hword => .sh, .word => .sw, diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig index 26f275a157..281ad80292 100644 --- a/src/arch/riscv64/bits.zig +++ b/src/arch/riscv64/bits.zig @@ -21,6 +21,13 @@ pub const Memory = struct { disp: i32 = 0, }, off: i32, + + pub fn size(mod: Mod) Size { + return switch (mod) { + .rm => |rm| rm.size, + .off => Size.dword, // assumed to be a register size + }; + } }; pub const Size = enum(u4) { diff --git a/src/arch/riscv64/encoder.zig b/src/arch/riscv64/encoder.zig index ddd4f5f437..5be753c426 100644 --- a/src/arch/riscv64/encoder.zig +++ b/src/arch/riscv64/encoder.zig @@ -11,12 +11,11 @@ pub const Instruction = struct { pub fn new(mnemonic: Encoding.Mnemonic, ops: []const Operand) !Instruction { const encoding = (try Encoding.findByMnemonic(mnemonic, ops)) orelse { - std.log.err("no encoding found for: {s} {s} {s} {s} {s}", .{ + std.log.err("no encoding found for: {s} [{s} {s} {s}]", .{ @tagName(mnemonic), @tagName(if (ops.len > 0) ops[0] else .none), @tagName(if (ops.len > 1) ops[1] else .none), @tagName(if (ops.len > 2) ops[2] else .none), - @tagName(if (ops.len > 3) ops[3] else .none), }); return error.InvalidInstruction; }; diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 6497734993..3e22d8e175 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -54,7 +54,6 @@ fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 { } test "@alignCast pointers" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO var x: u32 align(4) = 1; expectsOnly1(&x); try expect(x == 2); @@ -426,7 +425,6 @@ test "function callconv expression depends on generic parameter" { } test "runtime-known array index has best alignment possible" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO // take full advantage of over-alignment @@ -562,7 +560,6 @@ test "align(@alignOf(T)) T does not force resolution of T" { } test "align(N) on functions" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/array.zig b/test/behavior/array.zig index a99b10cd3b..81a34ca1a8 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -1047,7 +1047,6 @@ test "union that needs padding bytes inside an array" { test "runtime index of array of zero-bit values" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var runtime: struct { array: [1]void, index: usize } = undefined; runtime = .{ .array = .{{}}, .index = 0 }; diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index eabac35787..643f1141d1 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -513,7 +513,6 @@ var global_foo: *i32 = undefined; test "peer result location with typed parent, runtime condition, comptime prongs" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(arg: i32) i32 { @@ -593,7 +592,6 @@ test "equality compare fn ptrs" { test "self reference through fn ptr field" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = struct { @@ -838,7 +836,6 @@ test "labeled block implicitly ends in a break" { test "catch in block has correct result location" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn open() error{A}!@This() { @@ -870,7 +867,6 @@ test "labeled block with runtime branch forwards its result location type to bre test "try in labeled block doesn't cast to wrong type" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: u32, @@ -1246,8 +1242,6 @@ test "pointer to tuple field can be dereferenced at comptime" { } test "proper value is returned from labeled block" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn hash(v: *u32, key: anytype) void { const Key = @TypeOf(key); diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index 28c797cef3..6d513a4ac7 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -250,7 +250,6 @@ test "bitcast packed struct to integer and back" { test "implicit cast to error union by returning" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -280,8 +279,6 @@ test "comptime bitcast used in expression has the correct type" { } test "bitcast passed as tuple element" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn foo(args: anytype) !void { comptime assert(@TypeOf(args[0]) == f32); @@ -292,8 +289,6 @@ test "bitcast passed as tuple element" { } test "triple level result location with bitcast sandwich passed as tuple element" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn foo(args: anytype) !void { comptime assert(@TypeOf(args[0]) == f64); diff --git a/test/behavior/call.zig b/test/behavior/call.zig index 2f737f098c..8636955215 100644 --- a/test/behavior/call.zig +++ b/test/behavior/call.zig @@ -60,7 +60,6 @@ test "tuple parameters" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const add = struct { fn add(a: i32, b: i32) i32 { @@ -94,7 +93,6 @@ test "result location of function call argument through runtime condition and st if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = enum { a, b }; const S = struct { @@ -411,7 +409,6 @@ test "recursive inline call with comptime known argument" { test "inline while with @call" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn inc(a: *u32) void { @@ -427,8 +424,6 @@ test "inline while with @call" { } test "method call as parameter type" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn foo(x: anytype, y: @TypeOf(x).Inner()) @TypeOf(y) { return y; @@ -477,7 +472,6 @@ test "argument to generic function has correct result type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(_: anytype, e: enum { a, b }) bool { diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 208f0a9c63..a671010740 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -483,7 +483,6 @@ fn castToOptionalTypeError(z: i32) !void { test "implicitly cast from [0]T to anyerror![]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCastZeroArrayToErrSliceMut(); try comptime testCastZeroArrayToErrSliceMut(); @@ -558,7 +557,6 @@ fn testCastConstArrayRefToConstSlice() !void { test "peer type resolution: error and [N]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, try testPeerErrorAndArray(0), "OK")); comptime assert(mem.eql(u8, try testPeerErrorAndArray(0), "OK")); @@ -1157,7 +1155,6 @@ fn foobar(func: PFN_void) !void { test "cast function with an opaque parameter" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) { // https://github.com/ziglang/zig/issues/16845 @@ -1309,7 +1306,6 @@ fn incrementVoidPtrValue(value: ?*anyopaque) void { test "implicit cast *[0]T to E![]const u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x = @as(anyerror![]const u8, &[0]u8{}); _ = &x; @@ -1496,7 +1492,6 @@ test "cast compatible optional types" { test "coerce undefined single-item pointer of array to error union of slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = @as([*]u8, undefined)[0..0]; var b: error{a}![]const u8 = a; @@ -2206,7 +2201,6 @@ test "peer type resolution: tuples with comptime fields" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = .{ 1, 2 }; const b = .{ @as(u32, 3), @as(i16, 4) }; @@ -2361,7 +2355,6 @@ test "cast builtins can wrap result in error union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const MyEnum = enum(u32) { _ }; diff --git a/test/behavior/comptime_memory.zig b/test/behavior/comptime_memory.zig index 597ba62dd4..968b7be79d 100644 --- a/test/behavior/comptime_memory.zig +++ b/test/behavior/comptime_memory.zig @@ -408,8 +408,6 @@ test "mutate entire slice at comptime" { } test "dereference undefined pointer to zero-bit type" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const p0: *void = undefined; try testing.expectEqual({}, p0.*); @@ -515,7 +513,5 @@ fn fieldPtrTest() u32 { return a.value; } test "pointer in aggregate field can mutate comptime state" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try comptime std.testing.expect(fieldPtrTest() == 2); } diff --git a/test/behavior/defer.zig b/test/behavior/defer.zig index ba0d949a7d..4ea6f54787 100644 --- a/test/behavior/defer.zig +++ b/test/behavior/defer.zig @@ -162,7 +162,6 @@ test "reference to errdefer payload" { test "simple else prong doesn't emit an error for unreachable else prong" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo() error{Foo}!void { diff --git a/test/behavior/empty_union.zig b/test/behavior/empty_union.zig index a42dfda7e1..f05feacfaf 100644 --- a/test/behavior/empty_union.zig +++ b/test/behavior/empty_union.zig @@ -48,8 +48,6 @@ test "empty extern union" { } test "empty union passed as argument" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const U = union(enum) { fn f(u: @This()) void { switch (u) {} @@ -59,8 +57,6 @@ test "empty union passed as argument" { } test "empty enum passed as argument" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const E = enum { fn f(e: @This()) void { switch (e) {} diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index 28d6dccf29..cf899ed3ca 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -856,8 +856,6 @@ fn doALoopThing(id: EnumWithOneMember) void { } test "comparison operator on enum with one member is comptime-known" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - doALoopThing(EnumWithOneMember.Eof); } diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 8db9703f51..fde116ffb1 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -31,7 +31,6 @@ fn shouldBeNotEqual(a: anyerror, b: anyerror) void { test "error binary operator" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = errBinaryOperatorG(true) catch 3; const b = errBinaryOperatorG(false) catch 3; @@ -63,14 +62,12 @@ pub fn baz() anyerror!i32 { test "error wrapping" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect((baz() catch unreachable) == 15); } test "unwrap simple value from error" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const i = unwrapSimpleValueFromErrorDo() catch unreachable; try expect(i == 13); @@ -81,7 +78,6 @@ fn unwrapSimpleValueFromErrorDo() anyerror!isize { test "error return in assignment" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; doErrReturnInAssignment() catch unreachable; } @@ -104,7 +100,6 @@ test "syntax: optional operator in front of error union operator" { test "widen cast integer payload of error union function call" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn errorable() !u64 { @@ -241,8 +236,6 @@ fn testExplicitErrorSetCast(set1: Set1) !void { } test "@errorCast on error unions" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { { @@ -270,7 +263,6 @@ test "@errorCast on error unions" { test "comptime test error for empty error set" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testComptimeTestErrorEmptySet(1234); try comptime testComptimeTestErrorEmptySet(1234); @@ -306,8 +298,6 @@ test "inferred empty error set comptime catch" { } test "error inference with an empty set" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { const Struct = struct { pub fn func() (error{})!usize { @@ -362,7 +352,6 @@ fn quux_1() !i32 { test "error: Zero sized error set returned with value payload crash" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; _ = try foo3(0); _ = try comptime foo3(0); @@ -376,7 +365,6 @@ fn foo3(b: usize) Error!usize { test "error: Infer error set from literals" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; _ = nullLiteral("n") catch |err| handleErrors(err); _ = floatLiteral("n") catch |err| handleErrors(err); @@ -498,7 +486,6 @@ test "optional error set is the same size as error set" { test "nested catch" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -524,7 +511,6 @@ test "nested catch" { test "function pointer with return type that is error union with payload which is pointer of parent struct" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Foo = struct { @@ -582,7 +568,6 @@ test "error payload type is correctly resolved" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const MyIntWrapper = struct { const Self = @This(); @@ -1039,7 +1024,6 @@ test "function called at runtime is properly analyzed for inferred error set" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo() !void { @@ -1063,7 +1047,6 @@ test "generic type constructed from inferred error set of unresolved function" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn write(_: void, bytes: []const u8) !usize { @@ -1079,8 +1062,6 @@ test "generic type constructed from inferred error set of unresolved function" { } test "errorCast to adhoc inferred error set" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { inline fn baz() !i32 { return @errorCast(err()); @@ -1093,8 +1074,6 @@ test "errorCast to adhoc inferred error set" { } test "errorCast from error sets to error unions" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const err_union: Set1!void = @errorCast(error.A); try expectError(error.A, err_union); } @@ -1103,7 +1082,6 @@ test "result location initialization of error union with OPV payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = struct { diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 945870ff24..1e7af5f871 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -505,7 +505,6 @@ test "comptime shlWithOverflow" { test "const ptr to variable data changes at runtime" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(foo_ref.name[0] == 'a'); foo_ref.name = "b"; @@ -720,8 +719,6 @@ fn loopNTimes(comptime n: usize) void { } test "variable inside inline loop that has different types on different iterations" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testVarInsideInlineLoop(.{ true, @as(u32, 42) }); } @@ -1643,8 +1640,6 @@ test "result of nested switch assigned to variable" { } test "inline for loop of functions returning error unions" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const T1 = struct { fn v() error{}!usize { return 1; diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index befc5d509b..433e24af00 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -71,7 +71,6 @@ fn outer(y: u32) *const fn (u32) u32 { test "return inner function which references comptime variable of outer function" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const func = outer(10); try expect(func(3) == 7); @@ -81,7 +80,6 @@ test "discard the result of a function that returns a struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() void { @@ -191,7 +189,6 @@ test "function with complex callconv and return type expressions" { test "pass by non-copying value" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(addPointCoords(Point{ .x = 1, .y = 2 }) == 3); } @@ -207,7 +204,6 @@ fn addPointCoords(pt: Point) i32 { test "pass by non-copying value through var arg" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect((try addPointCoordsVar(Point{ .x = 1, .y = 2 })) == 3); } @@ -219,7 +215,6 @@ fn addPointCoordsVar(pt: anytype) !i32 { test "pass by non-copying value as method" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var pt = Point2{ .x = 1, .y = 2 }; try expect(pt.addPointCoords() == 3); @@ -236,7 +231,6 @@ const Point2 = struct { test "pass by non-copying value as method, which is generic" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var pt = Point3{ .x = 1, .y = 2 }; try expect(pt.addPointCoords(i32) == 3); @@ -292,7 +286,6 @@ test "implicit cast fn call result to optional in field result" { test "void parameters" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try voidFun(1, void{}, 2, {}); } @@ -424,7 +417,6 @@ test "function with inferred error set but returning no error" { test "import passed byref to function in return type" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn get() @import("std").ArrayListUnmanaged(i32) { @@ -541,7 +533,6 @@ test "function returns function returning type" { test "peer type resolution of inferred error set with non-void payload" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn openDataFile(mode: enum { read, write }) !u32 { @@ -584,8 +575,6 @@ test "lazy values passed to anytype parameter" { } test "pass and return comptime-only types" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn returnNull(comptime x: @Type(.Null)) @Type(.Null) { return x; diff --git a/test/behavior/fn_delegation.zig b/test/behavior/fn_delegation.zig index 6a3d46c15d..95dbfeb4b2 100644 --- a/test/behavior/fn_delegation.zig +++ b/test/behavior/fn_delegation.zig @@ -34,7 +34,6 @@ fn custom(comptime T: type, comptime num: u64) fn (T) u64 { test "fn delegation" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const foo = Foo{}; try expect(foo.one() == 11); diff --git a/test/behavior/for.zig b/test/behavior/for.zig index 29fea7fe97..337e7b6767 100644 --- a/test/behavior/for.zig +++ b/test/behavior/for.zig @@ -438,7 +438,6 @@ test "inline for with counter as the comptime-known" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var runtime_slice = "hello"; var runtime_i: usize = 3; @@ -471,7 +470,6 @@ test "inline for on tuple pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { u32, u32, u32 }; var s: S = .{ 100, 200, 300 }; diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index efe568191c..6cbb2f0786 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -286,7 +286,6 @@ test "generic function instantiation turns into comptime call" { test "generic function with void and comptime parameter" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: i32 }; const namespace = struct { @@ -303,7 +302,6 @@ test "generic function with void and comptime parameter" { test "anonymous struct return type referencing comptime parameter" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { pub fn extraData(comptime T: type, index: usize) struct { data: T, end: usize } { @@ -394,7 +392,6 @@ test "extern function used as generic parameter" { test "generic struct as parameter type" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(comptime Int: type, thing: struct { int: Int }) !void { @@ -435,7 +432,6 @@ test "null sentinel pointer passed as generic argument" { test "generic function passed as comptime argument" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doMath(comptime f: fn (type, i32, i32) error{Overflow}!i32, a: i32, b: i32) !void { @@ -461,7 +457,6 @@ test "return type of generic function is function pointer" { test "coerced function body has inequal value with its uncoerced body" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = B(i32, c); @@ -546,7 +541,6 @@ test "call generic function with from function called by the generic function" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64 and builtin.os.tag == .windows) return error.SkipZigTest; diff --git a/test/behavior/incomplete_struct_param_tld.zig b/test/behavior/incomplete_struct_param_tld.zig index 485156de04..4edf974dab 100644 --- a/test/behavior/incomplete_struct_param_tld.zig +++ b/test/behavior/incomplete_struct_param_tld.zig @@ -23,7 +23,6 @@ fn foo(a: A) i32 { test "incomplete struct param top level declaration" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = A{ .b = B{ diff --git a/test/behavior/inline_switch.zig b/test/behavior/inline_switch.zig index 0ae05c3857..1c1654f3b0 100644 --- a/test/behavior/inline_switch.zig +++ b/test/behavior/inline_switch.zig @@ -89,7 +89,6 @@ test "inline else bool" { test "inline else error" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Err = error{ a, b, c }; var a = Err.a; diff --git a/test/behavior/ir_block_deps.zig b/test/behavior/ir_block_deps.zig index e3bb57cf89..a46ad2d8a8 100644 --- a/test/behavior/ir_block_deps.zig +++ b/test/behavior/ir_block_deps.zig @@ -21,7 +21,6 @@ test "ir block deps" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect((foo(1) catch unreachable) == 0); try expect((foo(2) catch unreachable) == 0); diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 66f86ede89..62df9c1b60 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -1471,8 +1471,6 @@ fn testShrExact(x: u8) !void { } test "shift left/right on u0 operand" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { var x: u0 = 0; diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig index 5543201e74..d08bc82828 100644 --- a/test/behavior/maximum_minimum.zig +++ b/test/behavior/maximum_minimum.zig @@ -300,6 +300,7 @@ test "@min/@max notices bounds from vector types when element of comptime-known if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .avx)) return error.SkipZigTest; diff --git a/test/behavior/member_func.zig b/test/behavior/member_func.zig index 36fd7f3cdd..bb1e1e1769 100644 --- a/test/behavior/member_func.zig +++ b/test/behavior/member_func.zig @@ -31,7 +31,6 @@ test "standard field calls" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(HasFuncs.one(0) == 1); try expect(HasFuncs.two(0) == 2); diff --git a/test/behavior/memset.zig b/test/behavior/memset.zig index 4db69b8fce..2def7e6ee2 100644 --- a/test/behavior/memset.zig +++ b/test/behavior/memset.zig @@ -7,7 +7,6 @@ test "@memset on array pointers" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testMemsetArray(); try comptime testMemsetArray(); @@ -167,7 +166,6 @@ test "zero keys with @memset" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Keys = struct { up: bool, diff --git a/test/behavior/merge_error_sets.zig b/test/behavior/merge_error_sets.zig index b1f7f69d56..492cb27699 100644 --- a/test/behavior/merge_error_sets.zig +++ b/test/behavior/merge_error_sets.zig @@ -13,7 +13,6 @@ fn foo() C!void { test "merge error sets" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (foo()) { @panic("unexpected"); diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig index 7884fec6cd..5af4e42b5c 100644 --- a/test/behavior/optional.zig +++ b/test/behavior/optional.zig @@ -615,8 +615,6 @@ test "cast slice to const slice nested in error union and optional" { } test "variable of optional of noreturn" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var null_opv: ?noreturn = null; _ = &null_opv; try std.testing.expectEqual(@as(?noreturn, null), null_opv); diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index dd4086912b..4c54ce730e 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -238,7 +238,6 @@ test "regular in irregular packed struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Irregular = packed struct { bar: Regular = Regular{}, @@ -435,6 +434,7 @@ test "nested packed struct field pointers" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // ubsan unaligned pointer access + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet const S2 = packed struct { @@ -1190,7 +1190,6 @@ test "packed struct field pointer aligned properly" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Foo = packed struct { a: i32, diff --git a/test/behavior/ptrfromint.zig b/test/behavior/ptrfromint.zig index cc5edade80..0ff54c9416 100644 --- a/test/behavior/ptrfromint.zig +++ b/test/behavior/ptrfromint.zig @@ -3,8 +3,6 @@ const builtin = @import("builtin"); const expectEqual = std.testing.expectEqual; test "casting integer address to function pointer" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - addressToFunction(); comptime addressToFunction(); } @@ -19,7 +17,6 @@ test "mutate through ptr initialized with constant ptrFromInt value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; forceCompilerAnalyzeBranchHardCodedPtrDereference(false); } diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig index 506baa2666..2f714f69d3 100644 --- a/test/behavior/sizeof_and_typeof.zig +++ b/test/behavior/sizeof_and_typeof.zig @@ -158,7 +158,6 @@ test "@TypeOf() has no runtime side effects" { test "branching logic inside @TypeOf" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { var data: i32 = 0; @@ -412,7 +411,6 @@ test "Extern function calls, dereferences and field access in @TypeOf" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Test = struct { fn test_fn_1(a: c_long) @TypeOf(c_fopen("test", "r").*) { diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index a1f38b1dfe..8614c78804 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -124,7 +124,6 @@ test "slice of type" { test "generic malloc free" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = memAlloc(u8, 10) catch unreachable; memFree(u8, a); @@ -874,8 +873,6 @@ test "slice of void" { } test "slice with dereferenced value" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var a: usize = 0; const idx: *usize = &a; _ = blk: { @@ -1004,7 +1001,6 @@ test "sentinel-terminated 0-length slices" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const u32s: [4]u32 = [_]u32{ 0, 1, 2, 3 }; diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 9693201d22..eaf795f861 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -92,7 +92,6 @@ test "structs" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo: StructFoo = undefined; @memset(@as([*]u8, @ptrCast(&foo))[0..@sizeOf(StructFoo)], 0); @@ -175,7 +174,6 @@ const MemberFnTestFoo = struct { test "call member function directly" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const instance = MemberFnTestFoo{ .x = 1234 }; const result = MemberFnTestFoo.member(instance); @@ -184,7 +182,6 @@ test "call member function directly" { test "store member function in variable" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const instance = MemberFnTestFoo{ .x = 1234 }; const memberFn = MemberFnTestFoo.member; @@ -206,7 +203,6 @@ const MemberFnRand = struct { test "return struct byval from function" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Bar = struct { x: i32, @@ -255,7 +251,6 @@ test "usingnamespace within struct scope" { test "struct field init with catch" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -631,7 +626,6 @@ fn getC(data: *const BitField1) u2 { test "default struct initialization fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: i32 = 1234, @@ -807,7 +801,6 @@ test "fn with C calling convention returns struct by value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -907,8 +900,6 @@ test "anonymous struct literal syntax" { } test "fully anonymous struct" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { try dump(.{ @@ -931,8 +922,6 @@ test "fully anonymous struct" { } test "fully anonymous list literal" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { try dump(.{ @as(u32, 1234), @as(f64, 12.34), true, "hi" }); @@ -980,7 +969,6 @@ test "tuple element initialized with fn call" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1041,7 +1029,6 @@ test "type coercion of anon struct literal to struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const S2 = struct { @@ -1081,7 +1068,6 @@ test "type coercion of pointer to anon struct literal to pointer to struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const S2 = struct { @@ -1296,7 +1282,6 @@ test "initialize struct with empty literal" { test "loading a struct pointer perfoms a copy" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: i32, @@ -1558,7 +1543,6 @@ test "discarded struct initialization works as expected" { test "function pointer in struct returns the struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = struct { const A = @This(); @@ -1766,8 +1750,6 @@ test "extern struct field pointer has correct alignment" { } test "packed struct field in anonymous struct" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const T = packed struct { f1: bool = false, }; @@ -1779,8 +1761,6 @@ fn countFields(v: anytype) usize { } test "struct init with no result pointer sets field result types" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { // A function parameter has a result type, but no result pointer. fn f(s: struct { x: u32 }) u32 { @@ -1922,8 +1902,6 @@ test "circular dependency through pointer field of a struct" { } test "field calls do not force struct field init resolution" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { x: u32 = blk: { _ = @TypeOf(make().dummyFn()); // runtime field call - S not fully resolved - dummyFn call should not force field init resolution @@ -2057,7 +2035,6 @@ test "runtime value in nested initializer passed as pointer to function" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Bar = struct { b: u32, @@ -2132,7 +2109,6 @@ test "assignment of field with padding" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Mesh = extern struct { id: u32, @@ -2163,7 +2139,6 @@ test "initiate global variable with runtime value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { field: i32, diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index 7fa4709d3b..b875caa94a 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -397,7 +397,6 @@ fn switchWithUnreachable(x: i32) i32 { test "capture value of switch with all unreachable prongs" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x = return_a_number() catch |err| switch (err) { else => unreachable, @@ -503,7 +502,6 @@ test "switch prongs with error set cases make a new error set type for capture v test "return result loc and then switch with range implicit casted to error union" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { diff --git a/test/behavior/try.zig b/test/behavior/try.zig index e8ab96e5c9..53fdc48934 100644 --- a/test/behavior/try.zig +++ b/test/behavior/try.zig @@ -4,7 +4,6 @@ const expect = std.testing.expect; test "try on error union" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try tryOnErrorUnionImpl(); try comptime tryOnErrorUnionImpl(); diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig index 8c683f2cac..142768454d 100644 --- a/test/behavior/tuple.zig +++ b/test/behavior/tuple.zig @@ -10,7 +10,6 @@ test "tuple concatenation" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -56,7 +55,6 @@ test "more tuple concatenation" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = struct { fn consume_tuple(tuple: anytype, len: usize) !void { @@ -326,8 +324,6 @@ test "tuple type with void field" { } test "zero sized struct in tuple handled correctly" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const State = struct { const Self = @This(); data: @Type(.{ @@ -369,7 +365,6 @@ test "branching inside tuple literal" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(a: anytype) !void { @@ -474,7 +469,6 @@ test "coerce anon tuple to tuple" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u8 = 1; var y: u16 = 2; diff --git a/test/behavior/type.zig b/test/behavior/type.zig index 6150a490cf..c00d3de417 100644 --- a/test/behavior/type.zig +++ b/test/behavior/type.zig @@ -203,7 +203,6 @@ test "Type.Opaque" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Opaque = @Type(.{ .Opaque = .{ diff --git a/test/behavior/underscore.zig b/test/behavior/underscore.zig index a53fec489b..66b49e52d5 100644 --- a/test/behavior/underscore.zig +++ b/test/behavior/underscore.zig @@ -8,7 +8,6 @@ test "ignore lval with underscore" { test "ignore lval with underscore (while loop)" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; while (optionalReturnError()) |_| { while (optionalReturnError()) |_| { diff --git a/test/behavior/union.zig b/test/behavior/union.zig index a6a452bd09..4497031b87 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -418,7 +418,6 @@ test "tagged union initialization with runtime void" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(testTaggedUnionInit({})); } diff --git a/test/behavior/var_args.zig b/test/behavior/var_args.zig index a3d4f09d2e..8445df14d0 100644 --- a/test/behavior/var_args.zig +++ b/test/behavior/var_args.zig @@ -14,8 +14,6 @@ fn add(args: anytype) i32 { } test "add arbitrary args" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try expect(add(.{ @as(i32, 1), @as(i32, 2), @as(i32, 3), @as(i32, 4) }) == 10); try expect(add(.{@as(i32, 1234)}) == 1234); try expect(add(.{}) == 0); @@ -26,15 +24,12 @@ fn readFirstVarArg(args: anytype) void { } test "send void arg to var args" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - readFirstVarArg(.{{}}); } test "pass args directly" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(addSomeStuff(.{ @as(i32, 1), @as(i32, 2), @as(i32, 3), @as(i32, 4) }) == 10); try expect(addSomeStuff(.{@as(i32, 1234)}) == 1234); @@ -48,7 +43,6 @@ fn addSomeStuff(args: anytype) i32 { test "runtime parameter before var args" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect((try extraFn(10, .{})) == 0); try expect((try extraFn(10, .{false})) == 1); @@ -87,15 +81,11 @@ fn foo2(args: anytype) bool { } test "array of var args functions" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try expect(foos[0](.{})); try expect(!foos[1](.{})); } test "pass zero length array to var args param" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - doNothingWithFirstArg(.{""}); } diff --git a/test/behavior/while.zig b/test/behavior/while.zig index e1e5ebbfb3..32bae6aeb3 100644 --- a/test/behavior/while.zig +++ b/test/behavior/while.zig @@ -258,7 +258,6 @@ fn returnWithImplicitCastFromWhileLoopTest() anyerror!void { test "while on error union with else result follow else prong" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const result = while (returnError()) |value| { break value; @@ -268,7 +267,6 @@ test "while on error union with else result follow else prong" { test "while on error union with else result follow break prong" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const result = while (returnSuccess(10)) |value| { break value; @@ -315,7 +313,6 @@ test "while error 2 break statements and an else" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry(opt_t: anyerror!bool, f: bool) !void { From 39c95e89303be370bc3df0d5fb8f5ef7f4ef8e97 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sat, 4 May 2024 17:34:05 -0700 Subject: [PATCH 07/24] riscv: switch the test runner to `mainSimple` --- lib/compiler/test_runner.zig | 29 ++++++----------------------- 1 file changed, 6 insertions(+), 23 deletions(-) diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index dc82545e54..c02ee0996a 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -12,10 +12,8 @@ var cmdline_buffer: [4096]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&cmdline_buffer); pub fn main() void { - if (builtin.zig_backend == .stage2_riscv64) return mainExtraSimple() catch @panic("test failure"); - - if (builtin.zig_backend == .stage2_aarch64) { - return mainSimple() catch @panic("test failure"); + if (builtin.zig_backend == .stage2_riscv64) { + return mainSimple() catch @panic("test failure\n"); } const args = std.process.argsAlloc(fba.allocator()) catch @@ -221,8 +219,9 @@ pub fn log( /// Simpler main(), exercising fewer language features, so that /// work-in-progress backends can handle it. pub fn mainSimple() anyerror!void { - const enable_print = false; - const print_all = false; + const enable_print = true; + const print_all = true; + const print_summary = false; var passed: u64 = 0; var skipped: u64 = 0; @@ -251,24 +250,8 @@ pub fn mainSimple() anyerror!void { if (enable_print and print_all) stderr.writeAll("PASS\n") catch {}; passed += 1; } - if (enable_print) { + if (print_summary) { stderr.writer().print("{} passed, {} skipped, {} failed\n", .{ passed, skipped, failed }) catch {}; if (failed != 0) std.process.exit(1); } } - -pub fn mainExtraSimple() !void { - var fail_count: u8 = 0; - - for (builtin.test_functions) |test_fn| { - test_fn.func() catch |err| { - if (err != error.SkipZigTest) { - fail_count += 1; - continue; - } - continue; - }; - } - - if (fail_count != 0) std.process.exit(1); -} From 6603a9c26cda297e9c0baaa48e2fb263de60484d Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 5 May 2024 15:18:00 -0700 Subject: [PATCH 08/24] testing: fix test runner --- lib/compiler/test_runner.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index c02ee0996a..27c5761f9f 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -252,6 +252,6 @@ pub fn mainSimple() anyerror!void { } if (print_summary) { stderr.writer().print("{} passed, {} skipped, {} failed\n", .{ passed, skipped, failed }) catch {}; - if (failed != 0) std.process.exit(1); } + if (failed != 0) std.process.exit(1); } From 381a1043eb53971b9fe5bde088211d04e63dfd58 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 5 May 2024 19:41:23 -0700 Subject: [PATCH 09/24] ZigObject: enforce min function alignement on riscv --- src/link/Elf/ZigObject.zig | 5 ++++- src/target.zig | 7 +++++++ test/behavior/align.zig | 1 - test/behavior/defer.zig | 2 -- test/behavior/optional.zig | 1 - test/behavior/switch.zig | 2 -- test/behavior/try.zig | 1 - 7 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index b27601b420..451c363f56 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -906,7 +906,9 @@ fn updateDeclCode( log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); - const required_alignment = decl.getAlignment(mod); + const required_alignment = decl.getAlignment(mod).max( + target_util.minFunctionAlignment(mod.getTarget()), + ); const sym = elf_file.symbol(sym_index); const esym = &self.local_esyms.items(.elf_sym)[sym.esym_index]; @@ -1634,6 +1636,7 @@ const log = std.log.scoped(.link); const mem = std.mem; const relocation = @import("relocation.zig"); const trace = @import("../../tracy.zig").trace; +const target_util = @import("../../target.zig"); const std = @import("std"); const Air = @import("../../Air.zig"); diff --git a/src/target.zig b/src/target.zig index 6af301e001..8f6473ba8e 100644 --- a/src/target.zig +++ b/src/target.zig @@ -431,6 +431,13 @@ pub fn defaultFunctionAlignment(target: std.Target) Alignment { }; } +pub fn minFunctionAlignment(target: std.Target) Alignment { + return switch (target.cpu.arch) { + .riscv64 => .@"2", + else => .@"1", + }; +} + pub fn supportsFunctionAlignment(target: std.Target) bool { return switch (target.cpu.arch) { .wasm32, .wasm64 => false, diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 3e22d8e175..533b5dce77 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -311,7 +311,6 @@ test "function alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // function alignment is a compile error on wasm32/wasm64 if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest; diff --git a/test/behavior/defer.zig b/test/behavior/defer.zig index 4ea6f54787..fc764f55e3 100644 --- a/test/behavior/defer.zig +++ b/test/behavior/defer.zig @@ -116,7 +116,6 @@ test "errdefer with payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo() !i32 { @@ -139,7 +138,6 @@ test "reference to errdefer payload" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo() !i32 { diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig index 5af4e42b5c..ad90d5fd0a 100644 --- a/test/behavior/optional.zig +++ b/test/behavior/optional.zig @@ -601,7 +601,6 @@ test "cast slice to const slice nested in error union and optional" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn inner() !?[]u8 { diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index b875caa94a..8db6989b98 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -431,7 +431,6 @@ test "else prong of switch on error set excludes other cases" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -467,7 +466,6 @@ test "switch prongs with error set cases make a new error set type for capture v if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { diff --git a/test/behavior/try.zig b/test/behavior/try.zig index 53fdc48934..cc76658e93 100644 --- a/test/behavior/try.zig +++ b/test/behavior/try.zig @@ -51,7 +51,6 @@ test "`try`ing an if/else expression" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn getError() !void { From 7ed2f2156f4e93fa6463fb81f134c0d5e7e7cc89 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Fri, 10 May 2024 23:21:53 -0700 Subject: [PATCH 10/24] riscv: fix register clobber in certain edge cases --- src/arch/riscv64/CodeGen.zig | 24 ++++++++++++------------ test/behavior/cast.zig | 1 - test/behavior/enum.zig | 2 -- test/behavior/switch.zig | 1 - 4 files changed, 12 insertions(+), 16 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 13652a7bca..af620ef365 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1841,7 +1841,7 @@ fn airMinMax( if (int_info.bits > 64) return self.fail("TODO: > 64 bit @min", .{}); const lhs_reg, const lhs_lock = blk: { - if (lhs == .register) break :blk .{ lhs.register, null }; + if (lhs == .register) break :blk .{ lhs.register, self.register_manager.lockReg(lhs.register) }; const lhs_reg, const lhs_lock = try self.allocReg(); try self.genSetReg(lhs_ty, lhs_reg, lhs); @@ -1850,7 +1850,7 @@ fn airMinMax( defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); const rhs_reg, const rhs_lock = blk: { - if (rhs == .register) break :blk .{ rhs.register, null }; + if (rhs == .register) break :blk .{ rhs.register, self.register_manager.lockReg(rhs.register) }; const rhs_reg, const rhs_lock = try self.allocReg(); try self.genSetReg(rhs_ty, rhs_reg, rhs); @@ -2088,7 +2088,7 @@ fn binOpRegister( rhs_ty: Type, ) !MCValue { const lhs_reg, const lhs_lock = blk: { - if (lhs == .register) break :blk .{ lhs.register, null }; + if (lhs == .register) break :blk .{ lhs.register, self.register_manager.lockReg(lhs.register) }; const lhs_reg, const lhs_lock = try self.allocReg(); try self.genSetReg(lhs_ty, lhs_reg, lhs); @@ -2097,7 +2097,7 @@ fn binOpRegister( defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); const rhs_reg, const rhs_lock = blk: { - if (rhs == .register) break :blk .{ rhs.register, null }; + if (rhs == .register) break :blk .{ rhs.register, self.register_manager.lockReg(rhs.register) }; const rhs_reg, const rhs_lock = try self.allocReg(); try self.genSetReg(rhs_ty, rhs_reg, rhs); @@ -2358,7 +2358,7 @@ fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const offset = result_mcv.load_frame; const lhs_reg, const lhs_lock = blk: { - if (lhs == .register) break :blk .{ lhs.register, null }; + if (lhs == .register) break :blk .{ lhs.register, self.register_manager.lockReg(lhs.register) }; const lhs_reg, const lhs_lock = try self.allocReg(); try self.genSetReg(lhs_ty, lhs_reg, lhs); @@ -2367,7 +2367,7 @@ fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); const rhs_reg, const rhs_lock = blk: { - if (rhs == .register) break :blk .{ rhs.register, null }; + if (rhs == .register) break :blk .{ rhs.register, self.register_manager.lockReg(rhs.register) }; const rhs_reg, const rhs_lock = try self.allocReg(); try self.genSetReg(rhs_ty, rhs_reg, rhs); @@ -2596,7 +2596,7 @@ fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(bin_op.rhs); const lhs_reg, const lhs_lock = blk: { - if (lhs == .register) break :blk .{ lhs.register, null }; + if (lhs == .register) break :blk .{ lhs.register, self.register_manager.lockReg(lhs.register) }; const lhs_reg, const lhs_lock = try self.allocReg(); try self.genSetReg(lhs_ty, lhs_reg, lhs); @@ -2605,7 +2605,7 @@ fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void { defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); const rhs_reg, const rhs_lock = blk: { - if (rhs == .register) break :blk .{ rhs.register, null }; + if (rhs == .register) break :blk .{ rhs.register, self.register_manager.lockReg(rhs.register) }; const rhs_reg, const rhs_lock = try self.allocReg(); try self.genSetReg(rhs_ty, rhs_reg, rhs); @@ -2641,7 +2641,7 @@ fn airBitOr(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(bin_op.rhs); const lhs_reg, const lhs_lock = blk: { - if (lhs == .register) break :blk .{ lhs.register, null }; + if (lhs == .register) break :blk .{ lhs.register, self.register_manager.lockReg(lhs.register) }; const lhs_reg, const lhs_lock = try self.allocReg(); try self.genSetReg(lhs_ty, lhs_reg, lhs); @@ -2650,7 +2650,7 @@ fn airBitOr(self: *Self, inst: Air.Inst.Index) !void { defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); const rhs_reg, const rhs_lock = blk: { - if (rhs == .register) break :blk .{ rhs.register, null }; + if (rhs == .register) break :blk .{ rhs.register, self.register_manager.lockReg(rhs.register) }; const rhs_reg, const rhs_lock = try self.allocReg(); try self.genSetReg(rhs_ty, rhs_reg, rhs); @@ -4717,7 +4717,7 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = Type.bool; const lhs_reg, const lhs_lock = blk: { - if (lhs == .register) break :blk .{ lhs.register, null }; + if (lhs == .register) break :blk .{ lhs.register, self.register_manager.lockReg(lhs.register) }; const lhs_reg, const lhs_lock = try self.allocReg(); try self.genSetReg(lhs_ty, lhs_reg, lhs); @@ -4726,7 +4726,7 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); const rhs_reg, const rhs_lock = blk: { - if (rhs == .register) break :blk .{ rhs.register, null }; + if (rhs == .register) break :blk .{ rhs.register, self.register_manager.lockReg(rhs.register) }; const rhs_reg, const rhs_lock = try self.allocReg(); try self.genSetReg(rhs_ty, rhs_reg, rhs); diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index a671010740..8ea564ad13 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -677,7 +677,6 @@ test "@floatCast cast down" { test "peer type resolution: unreachable, error set, unreachable" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Error = error{ FileDescriptorAlreadyPresentInSet, diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index cf899ed3ca..42138c1c15 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -618,7 +618,6 @@ test "enum with specified tag values" { test "non-exhaustive enum" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const E = enum(u8) { a, b, _ }; @@ -683,7 +682,6 @@ test "empty non-exhaustive enum" { test "single field non-exhaustive enum" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const E = enum(u8) { a, _ }; diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index 8db6989b98..6d82392958 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -216,7 +216,6 @@ fn poll() void { test "switch on global mutable var isn't constant-folded" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; while (state < 2) { poll(); From 031d8248e02f019a4c689dfa2913b30ec796dfa5 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Fri, 10 May 2024 23:11:27 -0700 Subject: [PATCH 11/24] riscv: first sign of floats! --- src/arch/riscv64/CodeGen.zig | 559 +++++++++++++++++----------------- src/arch/riscv64/Encoding.zig | 347 +++++++++++++++------ src/arch/riscv64/Lower.zig | 60 +++- src/arch/riscv64/Mir.zig | 59 ++-- src/arch/riscv64/abi.zig | 117 ++++--- src/arch/riscv64/bits.zig | 65 +++- test/behavior/byteswap.zig | 1 + test/behavior/fn.zig | 2 - test/behavior/globals.zig | 1 - test/behavior/slice.zig | 1 + test/behavior/vector.zig | 1 + 11 files changed, 734 insertions(+), 479 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index af620ef365..368ceb08be 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -38,23 +38,9 @@ const Memory = bits.Memory; const FrameIndex = bits.FrameIndex; const RegisterManager = abi.RegisterManager; const RegisterLock = RegisterManager.RegisterLock; -const callee_preserved_regs = abi.callee_preserved_regs; -/// General Purpose -const gp = abi.RegisterClass.gp; -/// Function Args -const fa = abi.RegisterClass.fa; -/// Function Returns -const fr = abi.RegisterClass.fr; -/// Temporary Use -const tp = abi.RegisterClass.tp; const InnerError = CodeGenError || error{OutOfRegisters}; -const RegisterView = enum(u1) { - caller, - callee, -}; - gpa: Allocator, air: Air, mod: *Package.Module, @@ -919,10 +905,24 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { return result; } +const required_features = [_]Target.riscv.Feature{ + .d, + .m, +}; + fn gen(self: *Self) !void { const mod = self.bin_file.comp.module.?; const fn_info = mod.typeToFunc(self.fn_type).?; + inline for (required_features) |feature| { + if (!self.hasFeature(feature)) { + return self.fail( + "target missing required feature {s}", + .{@tagName(feature)}, + ); + } + } + if (fn_info.cc != .Naked) { try self.addPseudoNone(.pseudo_dbg_prologue_end); @@ -1454,9 +1454,9 @@ fn computeFrameLayout(self: *Self) !FrameLayout { } var save_reg_list = Mir.RegisterList{}; - for (callee_preserved_regs) |reg| { + for (abi.Registers.all_preserved) |reg| { if (self.register_manager.isRegAllocated(reg)) { - save_reg_list.push(&callee_preserved_regs, reg); + save_reg_list.push(&abi.Registers.all_preserved, reg); } } @@ -1600,6 +1600,33 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { })); } +fn typeRegClass(self: *Self, ty: Type) abi.RegisterClass { + const zcu = self.bin_file.comp.module.?; + return switch (ty.zigTypeTag(zcu)) { + .Float => .float, + .Vector => @panic("TODO: typeRegClass for Vectors"), + inline else => .int, + }; +} + +fn regGeneralClassForType(self: *Self, ty: Type) RegisterManager.RegisterBitSet { + const zcu = self.bin_file.comp.module.?; + return switch (ty.zigTypeTag(zcu)) { + .Float => abi.Registers.Float.general_purpose, + .Vector => @panic("TODO: regGeneralClassForType for Vectors"), + else => abi.Registers.Integer.general_purpose, + }; +} + +fn regTempClassForType(self: *Self, ty: Type) RegisterManager.RegisterBitSet { + const zcu = self.bin_file.comp.module.?; + return switch (ty.zigTypeTag(zcu)) { + .Float => abi.Registers.Float.temporary, + .Vector => @panic("TODO: regTempClassForType for Vectors"), + else => abi.Registers.Integer.temporary, + }; +} + fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const zcu = self.bin_file.comp.module.?; const elem_ty = self.typeOfIndex(inst); @@ -1608,11 +1635,15 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(zcu)}); }; - if (reg_ok) { - if (abi_size <= 8) { - if (self.register_manager.tryAllocReg(inst, gp)) |reg| { - return .{ .register = reg }; - } + const min_size: u32 = switch (elem_ty.zigTypeTag(zcu)) { + .Float => 4, + .Vector => @panic("allocRegOrMem Vector"), + else => 8, + }; + + if (reg_ok and abi_size <= min_size) { + if (self.register_manager.tryAllocReg(inst, self.regGeneralClassForType(elem_ty))) |reg| { + return .{ .register = reg }; } } @@ -1623,19 +1654,37 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { /// Allocates a register from the general purpose set and returns the Register and the Lock. /// /// Up to the caller to unlock the register later. -fn allocReg(self: *Self) !struct { Register, RegisterLock } { - const reg = try self.register_manager.allocReg(null, gp); +fn allocReg(self: *Self, reg_class: abi.RegisterClass) !struct { Register, RegisterLock } { + if (reg_class == .float and !self.hasFeature(.f)) + std.debug.panic("allocReg class == float where F isn't enabled", .{}); + + const class = switch (reg_class) { + .int => abi.Registers.Integer.general_purpose, + .float => abi.Registers.Float.general_purpose, + }; + + const reg = try self.register_manager.allocReg(null, class); const lock = self.register_manager.lockRegAssumeUnused(reg); return .{ reg, lock }; } +/// Similar to `allocReg` but will copy the MCValue into the Register unless `operand` is already +/// a register, in which case it will return a possible lock to that register. +fn promoteReg(self: *Self, ty: Type, operand: MCValue) !struct { Register, ?RegisterLock } { + if (operand == .register) return .{ operand.register, self.register_manager.lockReg(operand.register) }; + + const reg, const lock = try self.allocReg(self.typeRegClass(ty)); + try self.genSetReg(ty, reg, operand); + return .{ reg, lock }; +} + fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Register { const reg: Register = blk: { switch (index) { .immediate => |imm| { // Optimisation: if index MCValue is an immediate, we can multiply in `comptime` // and set the register directly to the scaled offset as an immediate. - const reg = try self.register_manager.allocReg(null, gp); + const reg = try self.register_manager.allocReg(null, self.regGeneralClassForType(index_ty)); try self.genSetReg(index_ty, reg, .{ .immediate = imm * elem_size }); break :blk reg; }, @@ -1671,7 +1720,8 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { - const reg = try self.register_manager.allocReg(null, tp); + log.debug("copyToTmpRegister ty: {}", .{ty.fmt(self.bin_file.comp.module.?)}); + const reg = try self.register_manager.allocReg(null, self.regTempClassForType(ty)); try self.genSetReg(ty, reg, mcv); return reg; } @@ -1680,7 +1730,8 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { /// `reg_owner` is the instruction that gets associated with the register in the register table. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { - const reg = try self.register_manager.allocReg(reg_owner, gp); + const ty = self.typeOfIndex(reg_owner); + const reg = try self.register_manager.allocReg(reg_owner, self.regGeneralClassForType(ty)); try self.genSetReg(self.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } @@ -1797,7 +1848,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { if (self.reuseOperand(inst, ty_op.operand, 0, operand) and operand == .register) operand.register else - try self.register_manager.allocReg(inst, gp); + (try self.allocRegOrMem(inst, true)).register; _ = try self.addInst(.{ .tag = .pseudo, @@ -1843,7 +1894,7 @@ fn airMinMax( const lhs_reg, const lhs_lock = blk: { if (lhs == .register) break :blk .{ lhs.register, self.register_manager.lockReg(lhs.register) }; - const lhs_reg, const lhs_lock = try self.allocReg(); + const lhs_reg, const lhs_lock = try self.allocReg(.int); try self.genSetReg(lhs_ty, lhs_reg, lhs); break :blk .{ lhs_reg, lhs_lock }; }; @@ -1852,16 +1903,16 @@ fn airMinMax( const rhs_reg, const rhs_lock = blk: { if (rhs == .register) break :blk .{ rhs.register, self.register_manager.lockReg(rhs.register) }; - const rhs_reg, const rhs_lock = try self.allocReg(); + const rhs_reg, const rhs_lock = try self.allocReg(.int); try self.genSetReg(rhs_ty, rhs_reg, rhs); break :blk .{ rhs_reg, rhs_lock }; }; defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); - const mask_reg, const mask_lock = try self.allocReg(); + const mask_reg, const mask_lock = try self.allocReg(.int); defer self.register_manager.unlockReg(mask_lock); - const result_reg, const result_lock = try self.allocReg(); + const result_reg, const result_lock = try self.allocReg(.int); defer self.register_manager.unlockReg(result_lock); _ = try self.addInst(.{ @@ -1955,20 +2006,6 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -/// For all your binary operation needs, this function will generate -/// the corresponding Mir instruction(s). Returns the location of the -/// result. -/// -/// If the binary operation itself happens to be an Air instruction, -/// pass the corresponding index in the inst parameter. That helps -/// this function do stuff like reusing operands. -/// -/// This function does not do any lowering to Mir itself, but instead -/// looks at the lhs and rhs and determines which kind of lowering -/// would be best suitable and then delegates the lowering to other -/// functions. -/// -/// `maybe_inst` **needs** to be a bin_op, make sure of that. fn binOp( self: *Self, tag: Air.Inst.Tag, @@ -1991,11 +2028,18 @@ fn binOp( .cmp_lt, .cmp_lte, => { + assert(lhs_ty.eql(rhs_ty, zcu)); switch (lhs_ty.zigTypeTag(zcu)) { - .Float => return self.fail("TODO binary operations on floats", .{}), + .Float => { + const float_bits = lhs_ty.floatBits(zcu.getTarget()); + if (float_bits <= 32) { + return self.binOpFloat(tag, lhs, lhs_ty, rhs, rhs_ty); + } else { + return self.fail("TODO: binary operations for floats with bits > 32", .{}); + } + }, .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int, .Enum, .ErrorSet => { - assert(lhs_ty.eql(rhs_ty, zcu)); const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 64) { return self.binOpRegister(tag, lhs, lhs_ty, rhs, rhs_ty); @@ -2071,14 +2115,7 @@ fn binOp( else => return self.fail("TODO binOp {}", .{tag}), } } -/// Don't call this function directly. Use binOp instead. -/// -/// Calling this function signals an intention to generate a Mir -/// instruction of the form -/// -/// op dest, lhs, rhs -/// -/// Asserts that generating an instruction of that form is possible. + fn binOpRegister( self: *Self, tag: Air.Inst.Tag, @@ -2087,25 +2124,13 @@ fn binOpRegister( rhs: MCValue, rhs_ty: Type, ) !MCValue { - const lhs_reg, const lhs_lock = blk: { - if (lhs == .register) break :blk .{ lhs.register, self.register_manager.lockReg(lhs.register) }; - - const lhs_reg, const lhs_lock = try self.allocReg(); - try self.genSetReg(lhs_ty, lhs_reg, lhs); - break :blk .{ lhs_reg, lhs_lock }; - }; + const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs); defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = blk: { - if (rhs == .register) break :blk .{ rhs.register, self.register_manager.lockReg(rhs.register) }; - - const rhs_reg, const rhs_lock = try self.allocReg(); - try self.genSetReg(rhs_ty, rhs_reg, rhs); - break :blk .{ rhs_reg, rhs_lock }; - }; + const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs); defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); - const dest_reg, const dest_lock = try self.allocReg(); + const dest_reg, const dest_lock = try self.allocReg(.int); defer self.register_manager.unlockReg(dest_lock); const mir_tag: Mir.Inst.Tag = switch (tag) { @@ -2184,7 +2209,50 @@ fn binOpRegister( else => unreachable, } - // generate the struct for OF checks + return MCValue{ .register = dest_reg }; +} + +fn binOpFloat( + self: *Self, + tag: Air.Inst.Tag, + lhs: MCValue, + lhs_ty: Type, + rhs: MCValue, + rhs_ty: Type, +) !MCValue { + const zcu = self.bin_file.comp.module.?; + const float_bits = lhs_ty.floatBits(zcu.getTarget()); + + const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs); + defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); + + const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs); + defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .add => if (float_bits == 32) .fadds else .faddd, + .cmp_eq => if (float_bits == 32) .feqs else .feqd, + else => return self.fail("TODO: binOpFloat mir_tag {s}", .{@tagName(tag)}), + }; + + const return_class: abi.RegisterClass = switch (tag) { + .add => .float, + .cmp_eq => .int, + else => unreachable, + }; + + const dest_reg, const dest_lock = try self.allocReg(return_class); + defer self.register_manager.unlockReg(dest_lock); + + _ = try self.addInst(.{ + .tag = mir_tag, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = dest_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + } }, + }); return MCValue{ .register = dest_reg }; } @@ -2279,7 +2347,7 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const shift_amount: u6 = @intCast(Type.usize.bitSize(zcu) - int_info.bits); - const shift_reg, const shift_lock = try self.allocReg(); + const shift_reg, const shift_lock = try self.allocReg(.int); defer self.register_manager.unlockReg(shift_lock); _ = try self.addInst(.{ @@ -2357,25 +2425,13 @@ fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result_mcv = try self.allocRegOrMem(inst, false); const offset = result_mcv.load_frame; - const lhs_reg, const lhs_lock = blk: { - if (lhs == .register) break :blk .{ lhs.register, self.register_manager.lockReg(lhs.register) }; - - const lhs_reg, const lhs_lock = try self.allocReg(); - try self.genSetReg(lhs_ty, lhs_reg, lhs); - break :blk .{ lhs_reg, lhs_lock }; - }; + const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs); defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = blk: { - if (rhs == .register) break :blk .{ rhs.register, self.register_manager.lockReg(rhs.register) }; - - const rhs_reg, const rhs_lock = try self.allocReg(); - try self.genSetReg(rhs_ty, rhs_reg, rhs); - break :blk .{ rhs_reg, rhs_lock }; - }; + const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs); defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); - const dest_reg, const dest_lock = try self.allocReg(); + const dest_reg, const dest_lock = try self.allocReg(.int); defer self.register_manager.unlockReg(dest_lock); switch (int_info.signedness) { @@ -2503,18 +2559,12 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { 1...8 => { const max_val = std.math.pow(u16, 2, int_info.bits) - 1; - const overflow_reg, const overflow_lock = try self.allocReg(); - defer self.register_manager.unlockReg(overflow_lock); - - const add_reg, const add_lock = blk: { - if (dest == .register) break :blk .{ dest.register, null }; - - const add_reg, const add_lock = try self.allocReg(); - try self.genSetReg(lhs_ty, add_reg, dest); - break :blk .{ add_reg, add_lock }; - }; + const add_reg, const add_lock = try self.promoteReg(lhs_ty, lhs); defer if (add_lock) |lock| self.register_manager.unlockReg(lock); + const overflow_reg, const overflow_lock = try self.allocReg(.int); + defer self.register_manager.unlockReg(overflow_lock); + _ = try self.addInst(.{ .tag = .andi, .ops = .rri, @@ -2595,25 +2645,13 @@ fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void { const lhs_ty = self.typeOf(bin_op.lhs); const rhs_ty = self.typeOf(bin_op.rhs); - const lhs_reg, const lhs_lock = blk: { - if (lhs == .register) break :blk .{ lhs.register, self.register_manager.lockReg(lhs.register) }; - - const lhs_reg, const lhs_lock = try self.allocReg(); - try self.genSetReg(lhs_ty, lhs_reg, lhs); - break :blk .{ lhs_reg, lhs_lock }; - }; + const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs); defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = blk: { - if (rhs == .register) break :blk .{ rhs.register, self.register_manager.lockReg(rhs.register) }; - - const rhs_reg, const rhs_lock = try self.allocReg(); - try self.genSetReg(rhs_ty, rhs_reg, rhs); - break :blk .{ rhs_reg, rhs_lock }; - }; + const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs); defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); - const dest_reg, const dest_lock = try self.allocReg(); + const dest_reg, const dest_lock = try self.allocReg(.int); defer self.register_manager.unlockReg(dest_lock); _ = try self.addInst(.{ @@ -2640,25 +2678,13 @@ fn airBitOr(self: *Self, inst: Air.Inst.Index) !void { const lhs_ty = self.typeOf(bin_op.lhs); const rhs_ty = self.typeOf(bin_op.rhs); - const lhs_reg, const lhs_lock = blk: { - if (lhs == .register) break :blk .{ lhs.register, self.register_manager.lockReg(lhs.register) }; - - const lhs_reg, const lhs_lock = try self.allocReg(); - try self.genSetReg(lhs_ty, lhs_reg, lhs); - break :blk .{ lhs_reg, lhs_lock }; - }; + const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs); defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = blk: { - if (rhs == .register) break :blk .{ rhs.register, self.register_manager.lockReg(rhs.register) }; - - const rhs_reg, const rhs_lock = try self.allocReg(); - try self.genSetReg(rhs_ty, rhs_reg, rhs); - break :blk .{ rhs_reg, rhs_lock }; - }; + const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs); defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); - const dest_reg, const dest_lock = try self.allocReg(); + const dest_reg, const dest_lock = try self.allocReg(.int); defer self.register_manager.unlockReg(dest_lock); _ = try self.addInst(.{ @@ -3102,7 +3128,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const elem_ty = array_ty.childType(zcu); const elem_abi_size = elem_ty.abiSize(zcu); - const addr_reg, const addr_reg_lock = try self.allocReg(); + const addr_reg, const addr_reg_lock = try self.allocReg(.int); defer self.register_manager.unlockReg(addr_reg_lock); switch (array_mcv) { @@ -3211,46 +3237,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { fn airCtz(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.typeOf(ty_op.operand); - - const dest_reg = try self.register_manager.allocReg(inst, gp); - - const source_reg, const source_lock = blk: { - if (operand == .register) break :blk .{ operand.register, null }; - - const source_reg, const source_lock = try self.allocReg(); - try self.genSetReg(operand_ty, source_reg, operand); - break :blk .{ source_reg, source_lock }; - }; - defer if (source_lock) |lock| self.register_manager.unlockReg(lock); - - // TODO: the B extension for RISCV should have the ctz instruction, and we should use it. - - try self.ctz(source_reg, dest_reg, operand_ty); - - break :result .{ .register = dest_reg }; - }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); -} - -fn ctz(self: *Self, src: Register, dst: Register, ty: Type) !void { - const zcu = self.bin_file.comp.module.?; - const length = (ty.abiSize(zcu) * 8) - 1; - - const count_reg, const count_lock = try self.allocReg(); - defer self.register_manager.unlockReg(count_lock); - - const len_reg, const len_lock = try self.allocReg(); - defer self.register_manager.unlockReg(len_lock); - - try self.genSetReg(Type.usize, count_reg, .{ .immediate = 0 }); - try self.genSetReg(Type.usize, len_reg, .{ .immediate = length }); - - _ = src; - _ = dst; - + _ = ty_op; return self.fail("TODO: finish ctz", .{}); } @@ -3267,38 +3254,18 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void { const ty = self.typeOf(ty_op.operand); const scalar_ty = ty.scalarType(zcu); const operand = try self.resolveInst(ty_op.operand); + _ = operand; switch (scalar_ty.zigTypeTag(zcu)) { .Int => if (ty.zigTypeTag(zcu) == .Vector) { return self.fail("TODO implement airAbs for {}", .{ty.fmt(zcu)}); } else { - const int_bits = ty.intInfo(zcu).bits; - - if (int_bits > 32) { - return self.fail("TODO: airAbs for larger than 32 bits", .{}); - } - - // promote the src into a register - const src_mcv = try self.copyToNewRegister(inst, operand); - // temp register for shift - const temp_reg = try self.register_manager.allocReg(inst, gp); - - _ = try self.addInst(.{ - .tag = .abs, - .ops = .rri, - .data = .{ - .i_type = .{ - .rs1 = src_mcv.register, - .rd = temp_reg, - .imm12 = Immediate.s(int_bits - 1), - }, - }, - }); - - break :result src_mcv; + return self.fail("TODO: implement airAbs for Int", .{}); }, else => return self.fail("TODO: implement airAbs {}", .{scalar_ty.fmt(zcu)}), } + + break :result .{.unreach}; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } @@ -3317,15 +3284,24 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, operand, .{ ty_op.operand, .none, .none }); } - const dest_reg = try self.register_manager.allocReg(null, gp); - try self.genSetReg(ty, dest_reg, operand); - - const dest_mcv: MCValue = .{ .register = dest_reg }; + const dest_mcv = try self.copyToNewRegister(inst, operand); + const dest_reg = dest_mcv.register; switch (int_bits) { 16 => { - const temp = try self.binOp(.shr, dest_mcv, ty, .{ .immediate = 8 }, Type.u8); - assert(temp == .register); + const temp_reg, const temp_lock = try self.allocReg(.int); + defer self.register_manager.unlockReg(temp_lock); + + _ = try self.addInst(.{ + .tag = .srli, + .ops = .rri, + .data = .{ .i_type = .{ + .imm12 = Immediate.s(8), + .rd = temp_reg, + .rs1 = dest_reg, + } }, + }); + _ = try self.addInst(.{ .tag = .slli, .ops = .rri, @@ -3341,7 +3317,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { .data = .{ .r_type = .{ .rd = dest_reg, .rs1 = dest_reg, - .rs2 = temp.register, + .rs2 = temp_reg, } }, }); }, @@ -3360,11 +3336,12 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { } fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void { + const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else - return self.fail("TODO implement airUnaryMath for {}", .{self.target.cpu.arch}); + return self.fail("TODO implementairUnaryMath {s} for {}", .{ @tagName(tag), self.target.cpu.arch }); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -3640,7 +3617,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { @intCast(field_bit_size), ); - const dst_reg, const dst_lock = try self.allocReg(); + const dst_reg, const dst_lock = try self.allocReg(.int); const dst_mcv = MCValue{ .register = dst_reg }; defer self.register_manager.unlockReg(dst_lock); @@ -3658,7 +3635,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { break :dst dst_mcv; }; if (field_abi_size * 8 > field_bit_size and dst_mcv.isMemory()) { - const tmp_reg, const tmp_lock = try self.allocReg(); + const tmp_reg, const tmp_lock = try self.allocReg(.int); defer self.register_manager.unlockReg(tmp_lock); const hi_mcv = @@ -3972,7 +3949,7 @@ fn genCall( } } else { assert(self.typeOf(callee).zigTypeTag(zcu) == .Pointer); - const addr_reg, const addr_lock = try self.allocReg(); + const addr_reg, const addr_lock = try self.allocReg(.int); defer self.register_manager.unlockReg(addr_lock); try self.genSetReg(Type.usize, addr_reg, .{ .air_ref = callee }); @@ -4072,32 +4049,49 @@ fn airCmp(self: *Self, inst: Air.Inst.Index) !void { const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.typeOf(bin_op.lhs); - const int_ty = switch (lhs_ty.zigTypeTag(zcu)) { - .Vector => unreachable, // Handled by cmp_vector. - .Enum => lhs_ty.intTagType(zcu), - .Int => lhs_ty, - .Bool => Type.u1, - .Pointer => Type.usize, - .ErrorSet => Type.u16, - .Optional => blk: { - const payload_ty = lhs_ty.optionalChild(zcu); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - break :blk Type.u1; - } else if (lhs_ty.isPtrLikeOptional(zcu)) { - break :blk Type.usize; + switch (lhs_ty.zigTypeTag(zcu)) { + .Int, + .Enum, + .Bool, + .Pointer, + .ErrorSet, + .Optional, + => { + const int_ty = switch (lhs_ty.zigTypeTag(zcu)) { + .Enum => lhs_ty.intTagType(zcu), + .Int => lhs_ty, + .Bool => Type.u1, + .Pointer => Type.usize, + .ErrorSet => Type.u16, + .Optional => blk: { + const payload_ty = lhs_ty.optionalChild(zcu); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + break :blk Type.u1; + } else if (lhs_ty.isPtrLikeOptional(zcu)) { + break :blk Type.usize; + } else { + return self.fail("TODO riscv cmp non-pointer optionals", .{}); + } + }, + else => unreachable, + }; + + const int_info = int_ty.intInfo(zcu); + if (int_info.bits <= 64) { + break :result try self.binOp(tag, lhs, int_ty, rhs, int_ty); } else { - return self.fail("TODO riscv cmp non-pointer optionals", .{}); + return self.fail("TODO riscv cmp for ints > 64 bits", .{}); } }, - .Float => return self.fail("TODO riscv cmp floats", .{}), - else => unreachable, - }; + .Float => { + const float_bits = lhs_ty.floatBits(self.target.*); + if (float_bits > 32) { + return self.fail("TODO: airCmp float > 32 bits", .{}); + } - const int_info = int_ty.intInfo(zcu); - if (int_info.bits <= 64) { - break :result try self.binOp(tag, lhs, int_ty, rhs, int_ty); - } else { - return self.fail("TODO riscv cmp for ints > 64 bits", .{}); + break :result try self.binOpFloat(tag, lhs, lhs_ty, rhs, lhs_ty); + }, + else => unreachable, } }; @@ -4716,25 +4710,13 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { const lhs_ty = Type.bool; const rhs_ty = Type.bool; - const lhs_reg, const lhs_lock = blk: { - if (lhs == .register) break :blk .{ lhs.register, self.register_manager.lockReg(lhs.register) }; - - const lhs_reg, const lhs_lock = try self.allocReg(); - try self.genSetReg(lhs_ty, lhs_reg, lhs); - break :blk .{ lhs_reg, lhs_lock }; - }; + const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs); defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = blk: { - if (rhs == .register) break :blk .{ rhs.register, self.register_manager.lockReg(rhs.register) }; - - const rhs_reg, const rhs_lock = try self.allocReg(); - try self.genSetReg(rhs_ty, rhs_reg, rhs); - break :blk .{ rhs_reg, rhs_lock }; - }; + const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs); defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); - const result_reg, const result_lock = try self.allocReg(); + const result_reg, const result_lock = try self.allocReg(.int); defer self.register_manager.unlockReg(result_lock); _ = try self.addInst(.{ @@ -4881,7 +4863,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { return self.finishAirResult(inst, result); } -/// Sets the value without any modifications to register allocation metadata or stack allocation metadata. +/// Sets the value of `dst_mcv` to the value of `src_mcv`. fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { const zcu = self.bin_file.comp.module.?; @@ -4890,7 +4872,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { if (!dst_mcv.isMutable()) { // panic so we can see the trace - return self.fail("tried to genCopy immutable: {s}", .{@tagName(dst_mcv)}); + return std.debug.panic("tried to genCopy immutable: {s}", .{@tagName(dst_mcv)}); } switch (dst_mcv) { @@ -4924,13 +4906,12 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { ), .memory => return self.fail("TODO: genCopy memory", .{}), .register_pair => |dst_regs| { - const src_info: ?struct { addr_reg: Register, addr_lock: RegisterLock } = switch (src_mcv) { + const src_info: ?struct { addr_reg: Register, addr_lock: ?RegisterLock } = switch (src_mcv) { .register_pair, .memory, .indirect, .load_frame => null, .load_symbol => src: { - const src_addr_reg, const src_addr_lock = try self.allocReg(); + const src_addr_reg, const src_addr_lock = try self.promoteReg(Type.usize, src_mcv.address()); errdefer self.register_manager.unlockReg(src_addr_lock); - try self.genSetReg(Type.usize, src_addr_reg, src_mcv.address()); break :src .{ .addr_reg = src_addr_reg, .addr_lock = src_addr_lock }; }, .air_ref => |src_ref| return self.genCopy( @@ -4940,7 +4921,12 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { ), else => unreachable, }; - defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock); + + defer if (src_info) |info| { + if (info.addr_lock) |lock| { + self.register_manager.unlockReg(lock); + } + }; var part_disp: i32 = 0; for (dst_regs, try self.splitType(ty), 0..) |dst_reg, dst_ty, part_i| { @@ -4966,7 +4952,7 @@ fn genInlineMemcpy( src_ptr: MCValue, len: MCValue, ) !void { - const regs = try self.register_manager.allocRegs(4, .{null} ** 4, tp); + const regs = try self.register_manager.allocRegs(4, .{null} ** 4, abi.Registers.Integer.temporary); const locks = self.register_manager.lockRegsAssumeUnused(4, regs); defer for (locks) |lock| self.register_manager.unlockReg(lock); @@ -5060,9 +5046,7 @@ fn genInlineMemcpy( _ = try self.addInst(.{ .tag = .pseudo, .ops = .pseudo_j, - .data = .{ - .inst = first_inst, - }, + .data = .{ .inst = first_inst }, }); } @@ -5072,7 +5056,7 @@ fn genInlineMemset( src_value: MCValue, len: MCValue, ) !void { - const regs = try self.register_manager.allocRegs(3, .{null} ** 3, tp); + const regs = try self.register_manager.allocRegs(3, .{null} ** 3, abi.Registers.Integer.temporary); const locks = self.register_manager.lockRegsAssumeUnused(3, regs); defer for (locks) |lock| self.register_manager.unlockReg(lock); @@ -5153,6 +5137,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! if (abi_size > 8) return std.debug.panic("tried to set reg with size {}", .{abi_size}); + const dst_reg_class = reg.class(); + switch (src_mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5163,6 +5149,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); }, .immediate => |unsigned_x| { + assert(dst_reg_class == .int); + const x: i64 = @bitCast(unsigned_x); if (math.minInt(i12) <= x and x <= math.maxInt(i12)) { _ = try self.addInst(.{ @@ -5200,7 +5188,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! // TODO: use a more advanced myriad seq to do this without a reg. // see: https://github.com/llvm/llvm-project/blob/081a66ffacfe85a37ff775addafcf3371e967328/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp#L224 - const temp, const temp_lock = try self.allocReg(); + const temp, const temp_lock = try self.allocReg(.int); defer self.register_manager.unlockReg(temp_lock); const lo32: i32 = @truncate(x); @@ -5236,6 +5224,19 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! if (src_reg.id() == reg.id()) return; + const src_reg_class = src_reg.class(); + + if (src_reg_class == .float) { + if (dst_reg_class == .float) { + return self.fail("TODO: genSetReg float -> float", .{}); + } + + assert(dst_reg_class == .int); // a bit of future proofing + + // to move from float -> int, we use FMV.X.W + return self.fail("TODO: genSetReg float -> int", .{}); + } + // mv reg, src_reg _ = try self.addInst(.{ .tag = .pseudo, @@ -5309,11 +5310,19 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! }); }, .indirect => |reg_off| { + const float_class = dst_reg_class == .float; + const load_tag: Mir.Inst.Tag = switch (abi_size) { - 1 => .lb, - 2 => .lh, - 4 => .lw, - 8 => .ld, + 1 => if (float_class) + unreachable // Zig does not support 8-bit floats + else + .lb, + 2 => if (float_class) + return self.fail("TODO: genSetReg indirect 16-bit float", .{}) + else + .lh, + 4 => if (float_class) .flw else .lw, + 8 => if (float_class) .fld else .ld, else => return std.debug.panic("TODO: genSetReg for size {d}", .{abi_size}), }; @@ -5336,15 +5345,18 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! .tag = .pseudo, .ops = .pseudo_load_symbol, .data = .{ .payload = try self.addExtra(Mir.LoadSymbolPayload{ - .register = reg.id(), + .register = reg.encodeId(), .atom_index = atom_index, .sym_index = sym_off.sym, }) }, }); }, .load_symbol => { - try self.genSetReg(ty, reg, src_mcv.address()); - try self.genSetReg(ty, reg, .{ .indirect = .{ .reg = reg } }); + const addr_reg, const addr_lock = try self.allocReg(.int); + defer self.register_manager.unlockReg(addr_lock); + + try self.genSetReg(ty, addr_reg, src_mcv.address()); + try self.genSetReg(ty, reg, .{ .indirect = .{ .reg = addr_reg } }); }, .air_ref => |ref| try self.genSetReg(ty, reg, try self.resolveInst(ref)), else => return self.fail("TODO: genSetReg {s}", .{@tagName(src_mcv)}), @@ -5386,7 +5398,8 @@ fn genSetMem( => switch (abi_size) { 0 => {}, 1, 2, 4, 8 => { - const src_reg = try self.copyToTmpRegister(ty, src_mcv); + // no matter what type, it should use an integer register + const src_reg = try self.copyToTmpRegister(Type.usize, src_mcv); const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); @@ -5460,10 +5473,8 @@ fn genSetMem( .immediate => { // TODO: remove this lock in favor of a copyToTmpRegister when we load 64 bit immediates with // a register allocation. - const reg, const reg_lock = try self.allocReg(); - defer self.register_manager.unlockReg(reg_lock); - - try self.genSetReg(ty, reg, src_mcv); + const reg, const reg_lock = try self.promoteReg(ty, src_mcv); + defer if (reg_lock) |lock| self.register_manager.unlockReg(lock); return self.genSetMem(base, disp, ty, .{ .register = reg }); }, @@ -5632,7 +5643,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { assert(len != 0); // prevented by Sema try self.store(dst_ptr, src_val, elem_ptr_ty, elem_ty); - const second_elem_ptr_reg, const second_elem_ptr_lock = try self.allocReg(); + const second_elem_ptr_reg, const second_elem_ptr_lock = try self.allocReg(.int); defer self.register_manager.unlockReg(second_elem_ptr_lock); const second_elem_ptr_mcv: MCValue = .{ .register = second_elem_ptr_reg }; @@ -5677,7 +5688,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { const err_lock = self.register_manager.lockRegAssumeUnused(err_reg); defer self.register_manager.unlockReg(err_lock); - const addr_reg, const addr_lock = try self.allocReg(); + const addr_reg, const addr_lock = try self.allocReg(.int); defer self.register_manager.unlockReg(addr_lock); // this is now the base address of the error name table @@ -5691,13 +5702,13 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO: riscv non-elf", .{}); } - const start_reg, const start_lock = try self.allocReg(); + const start_reg, const start_lock = try self.allocReg(.int); defer self.register_manager.unlockReg(start_lock); - const end_reg, const end_lock = try self.allocReg(); + const end_reg, const end_lock = try self.allocReg(.int); defer self.register_manager.unlockReg(end_lock); - // const tmp_reg, const tmp_lock = try self.allocReg(); + // const tmp_reg, const tmp_lock = try self.allocReg(.int); // defer self.register_manager.unlockReg(tmp_lock); // we move the base address forward by the following formula: base + (errno * 8) @@ -6025,16 +6036,16 @@ fn resolveCallingConventionValues( for (classes) |class| switch (class) { .integer => { - const ret_int_reg = abi.function_ret_regs[ret_int_reg_i]; + const ret_int_reg = abi.Registers.Integer.function_ret_regs[ret_int_reg_i]; ret_int_reg_i += 1; ret_tracking[ret_tracking_i] = InstTracking.init(.{ .register = ret_int_reg }); ret_tracking_i += 1; }, .memory => { - const ret_int_reg = abi.function_ret_regs[ret_int_reg_i]; + const ret_int_reg = abi.Registers.Integer.function_ret_regs[ret_int_reg_i]; ret_int_reg_i += 1; - const ret_indirect_reg = abi.function_arg_regs[param_int_reg_i]; + const ret_indirect_reg = abi.Registers.Integer.function_arg_regs[param_int_reg_i]; param_int_reg_i += 1; ret_tracking[ret_tracking_i] = .{ @@ -6069,7 +6080,7 @@ fn resolveCallingConventionValues( for (classes) |class| switch (class) { .integer => { - const param_int_regs = abi.function_arg_regs; + const param_int_regs = abi.Registers.Integer.function_arg_regs; if (param_int_reg_i >= param_int_regs.len) break; const param_int_reg = param_int_regs[param_int_reg_i]; @@ -6079,7 +6090,7 @@ fn resolveCallingConventionValues( arg_mcv_i += 1; }, .memory => { - const param_int_regs = abi.function_arg_regs; + const param_int_regs = abi.Registers.Integer.function_arg_regs; const param_int_reg = param_int_regs[param_int_reg_i]; param_int_reg_i += 1; diff --git a/src/arch/riscv64/Encoding.zig b/src/arch/riscv64/Encoding.zig index 46b6735084..eec4320753 100644 --- a/src/arch/riscv64/Encoding.zig +++ b/src/arch/riscv64/Encoding.zig @@ -1,7 +1,65 @@ mnemonic: Mnemonic, data: Data, +const OpCode = enum(u7) { + OP = 0b0110011, + OP_IMM = 0b0010011, + OP_32 = 0b0111011, + + BRANCH = 0b1100011, + LOAD = 0b0000011, + STORE = 0b0100011, + SYSTEM = 0b1110011, + + OP_FP = 0b1010011, + LOAD_FP = 0b0000111, + STORE_FP = 0b0100111, + + JALR = 0b1100111, + AUIPC = 0b0010111, + LUI = 0b0110111, + JAL = 0b1101111, + NONE = 0b0000000, +}; + +const Fmt = enum(u2) { + /// 32-bit single-precision + S = 0b00, + /// 64-bit double-precision + D = 0b01, + _reserved = 0b10, + /// 128-bit quad-precision + Q = 0b11, +}; + +const Enc = struct { + opcode: OpCode, + + data: union(enum) { + /// funct3 + funct7 + ff: struct { + funct3: u3, + funct7: u7, + }, + /// funct3 + offset + fo: struct { + funct3: u3, + offset: u12 = 0, + }, + /// funct5 + rm + fmt + fmt: struct { + funct5: u5, + rm: u3, + fmt: Fmt, + }, + /// U-type + none, + }, +}; + pub const Mnemonic = enum { + // base mnemonics + // I Type ld, lw, @@ -10,6 +68,7 @@ pub const Mnemonic = enum { lhu, lb, lbu, + sltiu, xori, andi, @@ -52,56 +111,130 @@ pub const Mnemonic = enum { ebreak, unimp, + // float mnemonics + fadds, + faddd, + + feqs, + feqd, + + fld, + flw, + + fsd, + fsw, + pub fn encoding(mnem: Mnemonic) Enc { return switch (mnem) { // zig fmt: off - .add => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0000000 }, - .sltu => .{ .opcode = 0b0110011, .funct3 = 0b011, .funct7 = 0b0000000 }, - .@"and" => .{ .opcode = 0b0110011, .funct3 = 0b111, .funct7 = 0b0000000 }, - .@"or" => .{ .opcode = 0b0110011, .funct3 = 0b110, .funct7 = 0b0000000 }, - .sub => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0100000 }, - .ld => .{ .opcode = 0b0000011, .funct3 = 0b011, .funct7 = null }, - .lw => .{ .opcode = 0b0000011, .funct3 = 0b010, .funct7 = null }, - .lwu => .{ .opcode = 0b0000011, .funct3 = 0b110, .funct7 = null }, - .lh => .{ .opcode = 0b0000011, .funct3 = 0b001, .funct7 = null }, - .lhu => .{ .opcode = 0b0000011, .funct3 = 0b101, .funct7 = null }, - .lb => .{ .opcode = 0b0000011, .funct3 = 0b000, .funct7 = null }, - .lbu => .{ .opcode = 0b0000011, .funct3 = 0b100, .funct7 = null }, + // OP - .sltiu => .{ .opcode = 0b0010011, .funct3 = 0b011, .funct7 = null }, + .add => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000000 } } }, + .sub => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0100000 } } }, - .addi => .{ .opcode = 0b0010011, .funct3 = 0b000, .funct7 = null }, - .andi => .{ .opcode = 0b0010011, .funct3 = 0b111, .funct7 = null }, - .xori => .{ .opcode = 0b0010011, .funct3 = 0b100, .funct7 = null }, - .jalr => .{ .opcode = 0b1100111, .funct3 = 0b000, .funct7 = null }, - .slli => .{ .opcode = 0b0010011, .funct3 = 0b001, .funct7 = null }, - .srli => .{ .opcode = 0b0010011, .funct3 = 0b101, .funct7 = null }, - .srai => .{ .opcode = 0b0010011, .funct3 = 0b101, .funct7 = null, .offset = 1 << 10 }, + .@"and" => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b111, .funct7 = 0b0000000 } } }, + .@"or" => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b110, .funct7 = 0b0000000 } } }, + .xor => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b100, .funct7 = 0b0000000 } } }, + + .sltu => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b011, .funct7 = 0b0000000 } } }, + .slt => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b010, .funct7 = 0b0000000 } } }, + + .mul => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000001 } } }, + + + // OP_IMM + + .addi => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + .andi => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b111 } } }, + .xori => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b100 } } }, - .sllw => .{ .opcode = 0b0111011, .funct3 = 0b001, .funct7 = 0b0000000 }, + .sltiu => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b011 } } }, - .lui => .{ .opcode = 0b0110111, .funct3 = null, .funct7 = null }, - .auipc => .{ .opcode = 0b0010111, .funct3 = null, .funct7 = null }, + .slli => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b001 } } }, + .srli => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b101 } } }, + .srai => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b101, .offset = 1 << 10 } } }, - .sd => .{ .opcode = 0b0100011, .funct3 = 0b011, .funct7 = null }, - .sw => .{ .opcode = 0b0100011, .funct3 = 0b010, .funct7 = null }, - .sh => .{ .opcode = 0b0100011, .funct3 = 0b001, .funct7 = null }, - .sb => .{ .opcode = 0b0100011, .funct3 = 0b000, .funct7 = null }, - .jal => .{ .opcode = 0b1101111, .funct3 = null, .funct7 = null }, + // OP_FP - .beq => .{ .opcode = 0b1100011, .funct3 = 0b000, .funct7 = null }, + .fadds => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00000, .fmt = .S, .rm = 0b111 } } }, + .faddd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00000, .fmt = .D, .rm = 0b111 } } }, - .slt => .{ .opcode = 0b0110011, .funct3 = 0b010, .funct7 = 0b0000000 }, + .feqs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b010 } } }, + .feqd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b010 } } }, - .xor => .{ .opcode = 0b0110011, .funct3 = 0b100, .funct7 = 0b0000000 }, + // LOAD + + .ld => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b011 } } }, + .lw => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b010 } } }, + .lwu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b110 } } }, + .lh => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b001 } } }, + .lhu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b101 } } }, + .lb => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + .lbu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b100 } } }, + + + // STORE + + .sd => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b011 } } }, + .sw => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b010 } } }, + .sh => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b001 } } }, + .sb => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + + + // LOAD_FP + + .fld => .{ .opcode = .LOAD_FP, .data = .{ .fo = .{ .funct3 = 0b011 } } }, + .flw => .{ .opcode = .LOAD_FP, .data = .{ .fo = .{ .funct3 = 0b010 } } }, + + // STORE_FP + + .fsd => .{ .opcode = .STORE_FP, .data = .{ .fo = .{ .funct3 = 0b011 } } }, + .fsw => .{ .opcode = .STORE_FP, .data = .{ .fo = .{ .funct3 = 0b010 } } }, + + + // JALR + + .jalr => .{ .opcode = .JALR, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + + + // OP_32 + + .sllw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0000000 } } }, + + + // LUI + + .lui => .{ .opcode = .LUI, .data = .{ .none = {} } }, + + + // AUIPC + + .auipc => .{ .opcode = .AUIPC, .data = .{ .none = {} } }, + + + // JAL + + .jal => .{ .opcode = .JAL, .data = .{ .none = {} } }, + + + // BRANCH + + .beq => .{ .opcode = .BRANCH, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + + + // SYSTEM + + .ecall => .{ .opcode = .SYSTEM, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + .ebreak => .{ .opcode = .SYSTEM, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + + + // NONE + + .unimp => .{ .opcode = .NONE, .data = .{ .fo = .{ .funct3 = 0b000 } } }, - .mul => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0000001 }, - .ecall => .{ .opcode = 0b1110011, .funct3 = 0b000, .funct7 = null }, - .ebreak => .{ .opcode = 0b1110011, .funct3 = 0b000, .funct7 = null }, - .unimp => .{ .opcode = 0b0000000, .funct3 = 0b000, .funct7 = null }, // zig fmt: on }; } @@ -109,6 +242,7 @@ pub const Mnemonic = enum { pub const InstEnc = enum { R, + R4, I, S, B, @@ -121,13 +255,6 @@ pub const InstEnc = enum { pub fn fromMnemonic(mnem: Mnemonic) InstEnc { return switch (mnem) { .addi, - .ld, - .lw, - .lwu, - .lh, - .lhu, - .lb, - .lbu, .jalr, .sltiu, .xori, @@ -135,6 +262,17 @@ pub const InstEnc = enum { .slli, .srli, .srai, + + .ld, + .lw, + .lwu, + .lh, + .lhu, + .lb, + .lbu, + + .flw, + .fld, => .I, .lui, @@ -145,6 +283,9 @@ pub const InstEnc = enum { .sw, .sh, .sb, + + .fsd, + .fsw, => .S, .jal, @@ -162,6 +303,11 @@ pub const InstEnc = enum { .sub, .@"and", .@"or", + + .fadds, + .faddd, + .feqs, + .feqd, => .R, .ecall, @@ -171,16 +317,17 @@ pub const InstEnc = enum { }; } - pub fn opsList(enc: InstEnc) [3]std.meta.FieldEnum(Operand) { + pub fn opsList(enc: InstEnc) [4]std.meta.FieldEnum(Operand) { return switch (enc) { // zig fmt: off - .R => .{ .reg, .reg, .reg, }, - .I => .{ .reg, .reg, .imm, }, - .S => .{ .reg, .reg, .imm, }, - .B => .{ .reg, .reg, .imm, }, - .U => .{ .reg, .imm, .none, }, - .J => .{ .reg, .imm, .none, }, - .system => .{ .none, .none, .none, }, + .R => .{ .reg, .reg, .reg, .none }, + .R4 => .{ .reg, .reg, .reg, .reg }, + .I => .{ .reg, .reg, .imm, .none }, + .S => .{ .reg, .reg, .imm, .none }, + .B => .{ .reg, .reg, .imm, .none }, + .U => .{ .reg, .imm, .none, .none }, + .J => .{ .reg, .imm, .none, .none }, + .system => .{ .none, .none, .none, .none }, // zig fmt: on }; } @@ -195,6 +342,15 @@ pub const Data = union(InstEnc) { rs2: u5, funct7: u7, }, + R4: packed struct { + opcode: u7, + rd: u5, + funct3: u3, + rs1: u5, + rs2: u5, + funct2: u2, + rs3: u5, + }, I: packed struct { opcode: u7, rd: u5, @@ -237,19 +393,21 @@ pub const Data = union(InstEnc) { pub fn toU32(self: Data) u32 { return switch (self) { - .R => |v| @as(u32, @bitCast(v)), - .I => |v| @as(u32, @bitCast(v)), - .S => |v| @as(u32, @bitCast(v)), - .B => |v| @as(u32, @intCast(v.opcode)) + (@as(u32, @intCast(v.imm11)) << 7) + (@as(u32, @intCast(v.imm1_4)) << 8) + (@as(u32, @intCast(v.funct3)) << 12) + (@as(u32, @intCast(v.rs1)) << 15) + (@as(u32, @intCast(v.rs2)) << 20) + (@as(u32, @intCast(v.imm5_10)) << 25) + (@as(u32, @intCast(v.imm12)) << 31), - .U => |v| @as(u32, @bitCast(v)), - .J => |v| @as(u32, @bitCast(v)), + // zig fmt: off + .R => |v| @bitCast(v), + .R4 => |v| @bitCast(v), + .I => |v| @bitCast(v), + .S => |v| @bitCast(v), + .B => |v| @as(u32, @intCast(v.opcode)) + (@as(u32, @intCast(v.imm11)) << 7) + (@as(u32, @intCast(v.imm1_4)) << 8) + (@as(u32, @intCast(v.funct3)) << 12) + (@as(u32, @intCast(v.rs1)) << 15) + (@as(u32, @intCast(v.rs2)) << 20) + (@as(u32, @intCast(v.imm5_10)) << 25) + (@as(u32, @intCast(v.imm12)) << 31), + .U => |v| @bitCast(v), + .J => |v| @bitCast(v), .system => unreachable, + // zig fmt: on }; } pub fn construct(mnem: Mnemonic, ops: []const Operand) !Data { const inst_enc = InstEnc.fromMnemonic(mnem); - const enc = mnem.encoding(); // special mnemonics @@ -261,8 +419,8 @@ pub const Data = union(InstEnc) { assert(ops.len == 0); return .{ .I = .{ - .rd = Register.zero.id(), - .rs1 = Register.zero.id(), + .rd = Register.zero.encodeId(), + .rs1 = Register.zero.encodeId(), .imm0_11 = switch (mnem) { .ecall => 0x000, .ebreak => 0x001, @@ -270,8 +428,8 @@ pub const Data = union(InstEnc) { else => unreachable, }, - .opcode = enc.opcode, - .funct3 = enc.funct3.?, + .opcode = @intFromEnum(enc.opcode), + .funct3 = enc.data.fo.funct3, }, }; }, @@ -282,14 +440,26 @@ pub const Data = union(InstEnc) { .R => { assert(ops.len == 3); return .{ - .R = .{ - .rd = ops[0].reg.id(), - .rs1 = ops[1].reg.id(), - .rs2 = ops[2].reg.id(), + .R = switch (enc.data) { + .ff => |ff| .{ + .rd = ops[0].reg.encodeId(), + .rs1 = ops[1].reg.encodeId(), + .rs2 = ops[2].reg.encodeId(), - .opcode = enc.opcode, - .funct3 = enc.funct3.?, - .funct7 = enc.funct7.?, + .opcode = @intFromEnum(enc.opcode), + .funct3 = ff.funct3, + .funct7 = ff.funct7, + }, + .fmt => |fmt| .{ + .rd = ops[0].reg.encodeId(), + .rs1 = ops[1].reg.encodeId(), + .rs2 = ops[2].reg.encodeId(), + + .opcode = @intFromEnum(enc.opcode), + .funct3 = fmt.rm, + .funct7 = (@as(u7, fmt.funct5) << 2) | @intFromEnum(fmt.fmt), + }, + else => unreachable, }, }; }, @@ -300,12 +470,12 @@ pub const Data = union(InstEnc) { return .{ .S = .{ .imm0_4 = @truncate(umm), - .rs1 = ops[0].reg.id(), - .rs2 = ops[1].reg.id(), + .rs1 = ops[0].reg.encodeId(), + .rs2 = ops[1].reg.encodeId(), .imm5_11 = @truncate(umm >> 5), - .opcode = enc.opcode, - .funct3 = enc.funct3.?, + .opcode = @intFromEnum(enc.opcode), + .funct3 = enc.data.fo.funct3, }, }; }, @@ -313,12 +483,12 @@ pub const Data = union(InstEnc) { assert(ops.len == 3); return .{ .I = .{ - .rd = ops[0].reg.id(), - .rs1 = ops[1].reg.id(), - .imm0_11 = ops[2].imm.asBits(u12) + enc.offset, + .rd = ops[0].reg.encodeId(), + .rs1 = ops[1].reg.encodeId(), + .imm0_11 = ops[2].imm.asBits(u12) + enc.data.fo.offset, - .opcode = enc.opcode, - .funct3 = enc.funct3.?, + .opcode = @intFromEnum(enc.opcode), + .funct3 = enc.data.fo.funct3, }, }; }, @@ -326,10 +496,10 @@ pub const Data = union(InstEnc) { assert(ops.len == 2); return .{ .U = .{ - .rd = ops[0].reg.id(), + .rd = ops[0].reg.encodeId(), .imm12_31 = ops[1].imm.asBits(u20), - .opcode = enc.opcode, + .opcode = @intFromEnum(enc.opcode), }, }; }, @@ -341,13 +511,13 @@ pub const Data = union(InstEnc) { return .{ .J = .{ - .rd = ops[0].reg.id(), + .rd = ops[0].reg.encodeId(), .imm1_10 = @truncate(umm >> 1), .imm11 = @truncate(umm >> 11), .imm12_19 = @truncate(umm >> 12), .imm20 = @truncate(umm >> 20), - .opcode = enc.opcode, + .opcode = @intFromEnum(enc.opcode), }, }; }, @@ -359,15 +529,15 @@ pub const Data = union(InstEnc) { return .{ .B = .{ - .rs1 = ops[0].reg.id(), - .rs2 = ops[1].reg.id(), + .rs1 = ops[0].reg.encodeId(), + .rs2 = ops[1].reg.encodeId(), .imm1_4 = @truncate(umm >> 1), .imm5_10 = @truncate(umm >> 5), .imm11 = @truncate(umm >> 11), .imm12 = @truncate(umm >> 12), - .opcode = enc.opcode, - .funct3 = enc.funct3.?, + .opcode = @intFromEnum(enc.opcode), + .funct3 = enc.data.fo.funct3, }, }; }, @@ -386,13 +556,6 @@ pub fn findByMnemonic(mnem: Mnemonic, ops: []const Operand) !?Encoding { }; } -const Enc = struct { - opcode: u7, - funct3: ?u3, - funct7: ?u7, - offset: u12 = 0, -}; - fn verifyOps(mnem: Mnemonic, ops: []const Operand) bool { const inst_enc = InstEnc.fromMnemonic(mnem); const list = std.mem.sliceTo(&inst_enc.opsList(), .none); diff --git a/src/arch/riscv64/Lower.zig b/src/arch/riscv64/Lower.zig index d44c614b06..659d4eb605 100644 --- a/src/arch/riscv64/Lower.zig +++ b/src/arch/riscv64/Lower.zig @@ -14,7 +14,7 @@ result_relocs_len: u8 = undefined, result_insts: [ @max( 1, // non-pseudo instruction - abi.callee_preserved_regs.len, // spill / restore regs, + abi.Registers.all_preserved.len, // spill / restore regs, ) ]Instruction = undefined, result_relocs: [1]Reloc = undefined, @@ -71,11 +71,24 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { switch (inst.ops) { .pseudo_load_rm => { - const tag: Encoding.Mnemonic = switch (rm.m.mod.size()) { - .byte => .lb, - .hword => .lh, - .word => .lw, - .dword => .ld, + const dest_reg = rm.r; + const dest_reg_class = dest_reg.class(); + const float = dest_reg_class == .float; + + const src_size = rm.m.mod.size(); + + const tag: Encoding.Mnemonic = if (!float) + switch (src_size) { + .byte => .lb, + .hword => .lh, + .word => .lw, + .dword => .ld, + } + else switch (src_size) { + .byte => unreachable, // Zig does not support 8-bit floats + .hword => return lower.fail("TODO: lowerMir pseudo_load_rm support 16-bit floats", .{}), + .word => .flw, + .dword => .fld, }; try lower.emit(tag, &.{ @@ -85,11 +98,25 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }); }, .pseudo_store_rm => { - const tag: Encoding.Mnemonic = switch (rm.m.mod.size()) { - .byte => .sb, - .hword => .sh, - .word => .sw, - .dword => .sd, + const src_reg = rm.r; + const src_reg_class = src_reg.class(); + const float = src_reg_class == .float; + + // TODO: do we actually need this? are all stores not usize? + const dest_size = rm.m.mod.size(); + + const tag: Encoding.Mnemonic = if (!float) + switch (dest_size) { + .byte => .sb, + .hword => .sh, + .word => .sw, + .dword => .sd, + } + else switch (dest_size) { + .byte => unreachable, // Zig does not support 8-bit floats + .hword => return lower.fail("TODO: lowerMir pseudo_load_rm support 16-bit floats", .{}), + .word => .fsw, + .dword => .fsd, }; try lower.emit(tag, &.{ @@ -336,16 +363,19 @@ fn pushPopRegList(lower: *Lower, comptime spilling: bool, reg_list: Mir.Register var reg_i: u31 = 0; while (it.next()) |i| { const frame = lower.mir.frame_locs.get(@intFromEnum(bits.FrameIndex.spill_frame)); + const reg = abi.Registers.all_preserved[i]; + const reg_class = reg.class(); + const is_float_reg = reg_class == .float; if (spilling) { - try lower.emit(.sd, &.{ + try lower.emit(if (is_float_reg) .fsd else .sd, &.{ .{ .reg = frame.base }, - .{ .reg = abi.callee_preserved_regs[i] }, + .{ .reg = abi.Registers.all_preserved[i] }, .{ .imm = Immediate.s(frame.disp + reg_i) }, }); } else { - try lower.emit(.ld, &.{ - .{ .reg = abi.callee_preserved_regs[i] }, + try lower.emit(if (is_float_reg) .fld else .ld, &.{ + .{ .reg = abi.Registers.all_preserved[i] }, .{ .reg = frame.base }, .{ .imm = Immediate.s(frame.disp + reg_i) }, }); diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 5d21719da2..e26f811ff5 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -20,87 +20,68 @@ pub const Inst = struct { pub const Index = u32; pub const Tag = enum(u16) { - /// Add immediate. Uses i_type payload. - addi, - /// Add immediate and produce a sign-extended result. - /// - /// Uses i-type payload. + // base extension + addi, addiw, jalr, lui, - mv, @"and", + andi, + xor, + @"or", ebreak, ecall, unimp, - /// OR instruction. Uses r_type payload. - @"or", - - /// Addition add, - /// Subtraction sub, - /// Multiply, uses r_type. Needs the M extension. - mul, - - /// Absolute Value, uses i_type payload. - abs, sltu, slt, - /// Immediate Logical Right Shift, uses i_type payload srli, - /// Immediate Logical Left Shift, uses i_type payload slli, - /// Immediate Arithmetic Right Shift, uses i_type payload. srai, - /// Register Logical Left Shift, uses r_type payload sllw, - /// Register Logical Right Shit, uses r_type payload srlw, - /// Jumps, but stores the address of the instruction following the - /// jump in `rd`. - /// - /// Uses j_type payload. jal, - /// Immediate AND, uses i_type payload - andi, - - /// Branch if equal, Uses b_type beq, - /// Branch if not equal, Uses b_type bne, - /// Generates a NO-OP, uses nop payload nop, - /// Load double (64 bits), uses i_type payload ld, - /// Load word (32 bits), uses i_type payload lw, - /// Load half (16 bits), uses i_type payload lh, - /// Load byte (8 bits), uses i_type payload lb, - /// Store double (64 bits), uses s_type payload sd, - /// Store word (32 bits), uses s_type payload sw, - /// Store half (16 bits), uses s_type payload sh, - /// Store byte (8 bits), uses s_type payload sb, + // M extension + mul, + + // F extension (32-bit float) + fadds, + flw, + fsw, + feqs, + + // D extension (64-bit float) + faddd, + fld, + fsd, + feqd, + /// A pseudo-instruction. Used for anything that isn't 1:1 with an /// assembly instruction. pseudo, diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index 41edd60c67..88a09d2a50 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -238,62 +238,81 @@ fn classifyStruct( } } -pub const callee_preserved_regs = [_]Register{ - // .s0 is ommited to be used as a frame pointer - .s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11, -}; - -pub const function_arg_regs = [_]Register{ - .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7, -}; - -pub const function_ret_regs = [_]Register{ - .a0, .a1, -}; - -pub const temporary_regs = [_]Register{ - .t0, .t1, .t2, .t3, .t4, .t5, .t6, -}; - -const allocatable_registers = callee_preserved_regs ++ function_arg_regs ++ temporary_regs; +const allocatable_registers = Registers.Integer.all_regs ++ Registers.Float.all_regs; pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, &allocatable_registers); // Register classes const RegisterBitSet = RegisterManager.RegisterBitSet; -pub const RegisterClass = struct { - pub const gp: RegisterBitSet = blk: { - var set = RegisterBitSet.initEmpty(); - set.setRangeValue(.{ - .start = 0, - .end = callee_preserved_regs.len, - }, true); - break :blk set; + +pub const RegisterClass = enum { + int, + float, +}; + +pub const Registers = struct { + pub const all_preserved = Integer.callee_preserved_regs ++ Float.callee_preserved_regs; + + pub const Integer = struct { + // zig fmt: off + pub const general_purpose = initRegBitSet(0, callee_preserved_regs.len); + pub const function_arg = initRegBitSet(callee_preserved_regs.len, function_arg_regs.len); + pub const function_ret = initRegBitSet(callee_preserved_regs.len, function_ret_regs.len); + pub const temporary = initRegBitSet(callee_preserved_regs.len + function_arg_regs.len, temporary_regs.len); + // zig fmt: on + + pub const callee_preserved_regs = [_]Register{ + // .s0 is omitted to be used as the frame pointer register + .s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11, + }; + + pub const function_arg_regs = [_]Register{ + .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7, + }; + + pub const function_ret_regs = [_]Register{ + .a0, .a1, + }; + + pub const temporary_regs = [_]Register{ + .t0, .t1, .t2, .t3, .t4, .t5, .t6, + }; + + pub const all_regs = callee_preserved_regs ++ function_arg_regs ++ temporary_regs; }; - pub const fa: RegisterBitSet = blk: { - var set = RegisterBitSet.initEmpty(); - set.setRangeValue(.{ - .start = callee_preserved_regs.len, - .end = callee_preserved_regs.len + function_arg_regs.len, - }, true); - break :blk set; - }; + pub const Float = struct { + // zig fmt: off + pub const general_purpose = initRegBitSet(Integer.all_regs.len, callee_preserved_regs.len); + pub const function_arg = initRegBitSet(Integer.all_regs.len + callee_preserved_regs.len, function_arg_regs.len); + pub const function_ret = initRegBitSet(Integer.all_regs.len + callee_preserved_regs.len, function_ret_regs.len); + pub const temporary = initRegBitSet(Integer.all_regs.len + callee_preserved_regs.len + function_arg_regs.len, temporary_regs.len); + // zig fmt: on - pub const fr: RegisterBitSet = blk: { - var set = RegisterBitSet.initEmpty(); - set.setRangeValue(.{ - .start = callee_preserved_regs.len, - .end = callee_preserved_regs.len + function_ret_regs.len, - }, true); - break :blk set; - }; + pub const callee_preserved_regs = [_]Register{ + .fs0, .fs1, .fs2, .fs3, .fs4, .fs5, .fs6, .fs7, .fs8, .fs9, .fs10, .fs11, + }; - pub const tp: RegisterBitSet = blk: { - var set = RegisterBitSet.initEmpty(); - set.setRangeValue(.{ - .start = callee_preserved_regs.len + function_arg_regs.len, - .end = callee_preserved_regs.len + function_arg_regs.len + temporary_regs.len, - }, true); - break :blk set; + pub const function_arg_regs = [_]Register{ + .fa0, .fa1, .fa2, .fa3, .fa4, .fa5, .fa6, .fa7, + }; + + pub const function_ret_regs = [_]Register{ + .fa0, .fa1, + }; + + pub const temporary_regs = [_]Register{ + .ft0, .ft1, .ft2, .ft3, .ft4, .ft5, .ft6, .ft7, .ft8, .ft9, .ft10, .ft11, + }; + + pub const all_regs = callee_preserved_regs ++ function_arg_regs ++ temporary_regs; }; }; + +fn initRegBitSet(start: usize, length: usize) RegisterBitSet { + var set = RegisterBitSet.initEmpty(); + set.setRangeValue(.{ + .start = start, + .end = start + length, + }, true); + return set; +} diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig index 281ad80292..6264ec1854 100644 --- a/src/arch/riscv64/bits.zig +++ b/src/arch/riscv64/bits.zig @@ -4,6 +4,7 @@ const assert = std.debug.assert; const testing = std.testing; const Encoding = @import("Encoding.zig"); const Mir = @import("Mir.zig"); +const abi = @import("abi.zig"); pub const Memory = struct { base: Base, @@ -154,10 +155,10 @@ pub const Immediate = union(enum) { } }; -pub const Register = enum(u6) { +pub const Register = enum(u8) { // zig fmt: off - // general purpose registers + // base extension registers zero, // zero ra, // return address. caller saved @@ -178,12 +179,48 @@ pub const Register = enum(u6) { x24, x25, x26, x27, x28, x29, x30, x31, + // F extension registers + + ft0, ft1, ft2, ft3, ft4, ft5, ft6, ft7, // float temporaries. caller saved. + fs0, fs1, // float saved. callee saved. + fa0, fa1, // float arg/ret. caller saved. + fa2, fa3, fa4, fa5, fa6, fa7, // float arg. called saved. + fs2, fs3, fs4, fs5, fs6, fs7, fs8, fs9, fs10, fs11, // float saved. callee saved. + ft8, ft9, ft10, ft11, // foat temporaries. calller saved. + + // this register is accessed only through API instructions instead of directly + // fcsr, + + f0, f1, f2, f3, f4, f5, f6, f7, + f8, f9, f10, f11, f12, f13, f14, f15, + f16, f17, f18, f19, f20, f21, f22, f23, + f24, f25, f26, f27, f28, f29, f30, f31, + // zig fmt: on - /// Returns the unique 5-bit ID of this register which is used in - /// the machine code - pub fn id(self: Register) u5 { - return @as(u5, @truncate(@intFromEnum(self))); + /// in RISC-V registers are stored as 5 bit IDs and a register can have + /// two names. Example being `zero` and `x0` are the same register and have the + /// same ID, but are two different entries in the enum. We store floating point + /// registers in the same enum. RISC-V uses the same IDs for `f0` and `x0` by + /// infering which register is being talked about given the instruction it's in. + /// + /// The goal of this function is to return the same ID for `zero` and `x0` but two + /// seperate IDs for `x0` and `f0`. We will assume that each register set has 32 registers + /// and is repeated twice, once for the named version, once for the number version. + pub fn id(reg: Register) u7 { + const base = switch (@intFromEnum(reg)) { + // zig fmt: off + @intFromEnum(Register.zero) ... @intFromEnum(Register.x31) => @intFromEnum(Register.zero), + @intFromEnum(Register.ft0) ... @intFromEnum(Register.f31) => @intFromEnum(Register.ft0), + else => unreachable, + // zig fmt: on + }; + + return @intCast(base + reg.encodeId()); + } + + pub fn encodeId(reg: Register) u5 { + return @truncate(@intFromEnum(reg)); } pub fn dwarfLocOp(reg: Register) u8 { @@ -192,7 +229,21 @@ pub const Register = enum(u6) { pub fn bitSize(reg: Register) u32 { return switch (@intFromEnum(reg)) { - @intFromEnum(Register.zero)...@intFromEnum(Register.x31) => 64, + // zig fmt: off + @intFromEnum(Register.zero) ... @intFromEnum(Register.x31) => 64, + @intFromEnum(Register.ft0) ... @intFromEnum(Register.f31) => 32, + else => unreachable, + // zig fmt: on + }; + } + + pub fn class(reg: Register) abi.RegisterClass { + return switch (@intFromEnum(reg)) { + // zig fmt: off + @intFromEnum(Register.zero) ... @intFromEnum(Register.x31) => .int, + @intFromEnum(Register.ft0) ... @intFromEnum(Register.f31) => .float, + else => unreachable, + // zig fmt: on }; } }; diff --git a/test/behavior/byteswap.zig b/test/behavior/byteswap.zig index fd7e2af850..0c6e655b25 100644 --- a/test/behavior/byteswap.zig +++ b/test/behavior/byteswap.zig @@ -100,6 +100,7 @@ test "@byteSwap vectors u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime vector8(); try vector8(); diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index 433e24af00..78ca6d0fd1 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -349,7 +349,6 @@ test "function call with anon list literal" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -370,7 +369,6 @@ test "function call with anon list literal - 2D" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { diff --git a/test/behavior/globals.zig b/test/behavior/globals.zig index 0c988450c0..17b8c4b823 100644 --- a/test/behavior/globals.zig +++ b/test/behavior/globals.zig @@ -7,7 +7,6 @@ test "store to global array" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(pos[1] == 0.0); pos = [2]f32{ 0.0, 1.0 }; diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index 8614c78804..97b6e88f27 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -1001,6 +1001,7 @@ test "sentinel-terminated 0-length slices" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const u32s: [4]u32 = [_]u32{ 0, 1, 2, 3 }; diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 2e860e1001..e32f5af9a3 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -434,6 +434,7 @@ test "load vector elements via runtime index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { From b2cb090c3790dd10a784f9e291230cf6052b514c Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sat, 11 May 2024 02:04:18 -0700 Subject: [PATCH 12/24] riscv: float args --- src/arch/riscv64/CodeGen.zig | 31 ++++++++---- src/arch/riscv64/Encoding.zig | 91 ++++++++++++++++++----------------- src/arch/riscv64/Lower.zig | 26 ++++++++-- src/arch/riscv64/abi.zig | 22 +++++++-- src/arch/riscv64/bits.zig | 9 +++- src/codegen/llvm.zig | 2 - 6 files changed, 116 insertions(+), 65 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 368ceb08be..7b44bcfc7c 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -5226,13 +5226,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! const src_reg_class = src_reg.class(); - if (src_reg_class == .float) { - if (dst_reg_class == .float) { - return self.fail("TODO: genSetReg float -> float", .{}); - } - - assert(dst_reg_class == .int); // a bit of future proofing - + if (src_reg_class == .float and dst_reg_class == .int) { // to move from float -> int, we use FMV.X.W return self.fail("TODO: genSetReg float -> int", .{}); } @@ -6031,6 +6025,7 @@ fn resolveCallingConventionValues( } else { var ret_tracking: [2]InstTracking = undefined; var ret_tracking_i: usize = 0; + var ret_float_reg_i: usize = 0; const classes = mem.sliceTo(&abi.classifySystem(ret_ty, zcu), .none); @@ -6042,6 +6037,13 @@ fn resolveCallingConventionValues( ret_tracking[ret_tracking_i] = InstTracking.init(.{ .register = ret_int_reg }); ret_tracking_i += 1; }, + .float => { + const ret_float_reg = abi.Registers.Float.function_ret_regs[ret_float_reg_i]; + ret_float_reg_i += 1; + + ret_tracking[ret_tracking_i] = InstTracking.init(.{ .register = ret_float_reg }); + ret_tracking_i += 1; + }, .memory => { const ret_int_reg = abi.Registers.Integer.function_ret_regs[ret_int_reg_i]; ret_int_reg_i += 1; @@ -6076,6 +6078,8 @@ fn resolveCallingConventionValues( var arg_mcv: [2]MCValue = undefined; var arg_mcv_i: usize = 0; + var param_float_reg_i: usize = 0; + const classes = mem.sliceTo(&abi.classifySystem(ty, zcu), .none); for (classes) |class| switch (class) { @@ -6089,6 +6093,16 @@ fn resolveCallingConventionValues( arg_mcv[arg_mcv_i] = .{ .register = param_int_reg }; arg_mcv_i += 1; }, + .float => { + const param_float_regs = abi.Registers.Float.function_arg_regs; + if (param_float_reg_i >= param_float_regs.len) break; + + const param_float_reg = param_float_regs[param_float_reg_i]; + param_float_reg_i += 1; + + arg_mcv[arg_mcv_i] = .{ .register = param_float_reg }; + arg_mcv_i += 1; + }, .memory => { const param_int_regs = abi.Registers.Integer.function_arg_regs; @@ -6118,9 +6132,8 @@ fn resolveCallingConventionValues( return result; } -/// TODO support scope overrides. Also note this logic is duplicated with `Module.wantSafety`. fn wantSafety(self: *Self) bool { - return switch (self.bin_file.comp.root_mod.optimize_mode) { + return switch (self.mod.optimize_mode) { .Debug => true, .ReleaseSafe => true, .ReleaseFast => false, diff --git a/src/arch/riscv64/Encoding.zig b/src/arch/riscv64/Encoding.zig index eec4320753..bbea73aac9 100644 --- a/src/arch/riscv64/Encoding.zig +++ b/src/arch/riscv64/Encoding.zig @@ -124,115 +124,119 @@ pub const Mnemonic = enum { fsd, fsw, + fsgnjns, + pub fn encoding(mnem: Mnemonic) Enc { return switch (mnem) { // zig fmt: off // OP - .add => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000000 } } }, - .sub => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0100000 } } }, + .add => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000000 } } }, + .sub => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0100000 } } }, - .@"and" => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b111, .funct7 = 0b0000000 } } }, - .@"or" => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b110, .funct7 = 0b0000000 } } }, - .xor => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b100, .funct7 = 0b0000000 } } }, + .@"and" => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b111, .funct7 = 0b0000000 } } }, + .@"or" => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b110, .funct7 = 0b0000000 } } }, + .xor => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b100, .funct7 = 0b0000000 } } }, - .sltu => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b011, .funct7 = 0b0000000 } } }, - .slt => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b010, .funct7 = 0b0000000 } } }, + .sltu => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b011, .funct7 = 0b0000000 } } }, + .slt => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b010, .funct7 = 0b0000000 } } }, - .mul => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000001 } } }, + .mul => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000001 } } }, // OP_IMM - .addi => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b000 } } }, - .andi => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b111 } } }, - .xori => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b100 } } }, + .addi => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + .andi => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b111 } } }, + .xori => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b100 } } }, - .sltiu => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b011 } } }, + .sltiu => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b011 } } }, - .slli => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b001 } } }, - .srli => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b101 } } }, - .srai => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b101, .offset = 1 << 10 } } }, + .slli => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b001 } } }, + .srli => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b101 } } }, + .srai => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b101, .offset = 1 << 10 } } }, // OP_FP - .fadds => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00000, .fmt = .S, .rm = 0b111 } } }, - .faddd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00000, .fmt = .D, .rm = 0b111 } } }, + .fadds => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00000, .fmt = .S, .rm = 0b111 } } }, + .faddd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00000, .fmt = .D, .rm = 0b111 } } }, - .feqs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b010 } } }, - .feqd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b010 } } }, + .feqs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b010 } } }, + .feqd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b010 } } }, + + .fsgnjns => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .S, .rm = 0b000 } } }, // LOAD - .ld => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b011 } } }, - .lw => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b010 } } }, - .lwu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b110 } } }, - .lh => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b001 } } }, - .lhu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b101 } } }, - .lb => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b000 } } }, - .lbu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b100 } } }, + .ld => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b011 } } }, + .lw => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b010 } } }, + .lwu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b110 } } }, + .lh => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b001 } } }, + .lhu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b101 } } }, + .lb => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + .lbu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b100 } } }, // STORE - .sd => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b011 } } }, - .sw => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b010 } } }, - .sh => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b001 } } }, - .sb => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + .sd => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b011 } } }, + .sw => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b010 } } }, + .sh => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b001 } } }, + .sb => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b000 } } }, // LOAD_FP - .fld => .{ .opcode = .LOAD_FP, .data = .{ .fo = .{ .funct3 = 0b011 } } }, - .flw => .{ .opcode = .LOAD_FP, .data = .{ .fo = .{ .funct3 = 0b010 } } }, + .fld => .{ .opcode = .LOAD_FP, .data = .{ .fo = .{ .funct3 = 0b011 } } }, + .flw => .{ .opcode = .LOAD_FP, .data = .{ .fo = .{ .funct3 = 0b010 } } }, // STORE_FP - .fsd => .{ .opcode = .STORE_FP, .data = .{ .fo = .{ .funct3 = 0b011 } } }, - .fsw => .{ .opcode = .STORE_FP, .data = .{ .fo = .{ .funct3 = 0b010 } } }, + .fsd => .{ .opcode = .STORE_FP, .data = .{ .fo = .{ .funct3 = 0b011 } } }, + .fsw => .{ .opcode = .STORE_FP, .data = .{ .fo = .{ .funct3 = 0b010 } } }, // JALR - .jalr => .{ .opcode = .JALR, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + .jalr => .{ .opcode = .JALR, .data = .{ .fo = .{ .funct3 = 0b000 } } }, // OP_32 - .sllw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0000000 } } }, + .sllw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0000000 } } }, // LUI - .lui => .{ .opcode = .LUI, .data = .{ .none = {} } }, + .lui => .{ .opcode = .LUI, .data = .{ .none = {} } }, // AUIPC - .auipc => .{ .opcode = .AUIPC, .data = .{ .none = {} } }, + .auipc => .{ .opcode = .AUIPC, .data = .{ .none = {} } }, // JAL - .jal => .{ .opcode = .JAL, .data = .{ .none = {} } }, + .jal => .{ .opcode = .JAL, .data = .{ .none = {} } }, // BRANCH - .beq => .{ .opcode = .BRANCH, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + .beq => .{ .opcode = .BRANCH, .data = .{ .fo = .{ .funct3 = 0b000 } } }, // SYSTEM - .ecall => .{ .opcode = .SYSTEM, .data = .{ .fo = .{ .funct3 = 0b000 } } }, - .ebreak => .{ .opcode = .SYSTEM, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + .ecall => .{ .opcode = .SYSTEM, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + .ebreak => .{ .opcode = .SYSTEM, .data = .{ .fo = .{ .funct3 = 0b000 } } }, // NONE - .unimp => .{ .opcode = .NONE, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + .unimp => .{ .opcode = .NONE, .data = .{ .fo = .{ .funct3 = 0b000 } } }, // zig fmt: on @@ -308,6 +312,7 @@ pub const InstEnc = enum { .faddd, .feqs, .feqd, + .fsgnjns, => .R, .ecall, diff --git a/src/arch/riscv64/Lower.zig b/src/arch/riscv64/Lower.zig index 659d4eb605..6e2fc125cf 100644 --- a/src/arch/riscv64/Lower.zig +++ b/src/arch/riscv64/Lower.zig @@ -132,11 +132,27 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .pseudo_mv => { const rr = inst.data.rr; - try lower.emit(.addi, &.{ - .{ .reg = rr.rd }, - .{ .reg = rr.rs }, - .{ .imm = Immediate.s(0) }, - }); + const dst_class = rr.rd.class(); + const src_class = rr.rs.class(); + + assert(dst_class == src_class); + + switch (dst_class) { + .float => { + try lower.emit(.fsgnjns, &.{ + .{ .reg = rr.rd }, + .{ .reg = rr.rs }, + .{ .reg = rr.rs }, + }); + }, + .int => { + try lower.emit(.addi, &.{ + .{ .reg = rr.rd }, + .{ .reg = rr.rs }, + .{ .imm = Immediate.s(0) }, + }); + }, + } }, .pseudo_ret => { diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index 88a09d2a50..b8254b68a5 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -7,7 +7,7 @@ const InternPool = @import("../../InternPool.zig"); const Module = @import("../../Module.zig"); const assert = std.debug.assert; -pub const Class = enum { memory, byval, integer, double_integer, fields, none }; +pub const Class = enum { memory, byval, integer, double_integer, fields }; pub fn classifyType(ty: Type, mod: *Module) Class { const target = mod.getTarget(); @@ -93,11 +93,13 @@ pub fn classifyType(ty: Type, mod: *Module) Class { } } +pub const SystemClass = enum { integer, float, memory, none }; + /// There are a maximum of 8 possible return slots. Returned values are in /// the beginning of the array; unused slots are filled with .none. -pub fn classifySystem(ty: Type, zcu: *Module) [8]Class { - var result = [1]Class{.none} ** 8; - const memory_class = [_]Class{ +pub fn classifySystem(ty: Type, zcu: *Module) [8]SystemClass { + var result = [1]SystemClass{.none} ** 8; + const memory_class = [_]SystemClass{ .memory, .none, .none, .none, .none, .none, .none, .none, }; @@ -139,6 +141,18 @@ pub fn classifySystem(ty: Type, zcu: *Module) [8]Class { } unreachable; // support > 128 bit int arguments }, + .Float => { + const target = zcu.getTarget(); + const features = target.cpu.features; + + const float_bits = ty.floatBits(zcu.getTarget()); + const float_reg_size: u32 = if (std.Target.riscv.featureSetHas(features, .d)) 64 else 32; + if (float_bits <= float_reg_size) { + result[0] = .float; + return result; + } + unreachable; // support split float args + }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(zcu); const payload_bits = payload_ty.bitSize(zcu); diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig index 6264ec1854..6b3a61578f 100644 --- a/src/arch/riscv64/bits.zig +++ b/src/arch/riscv64/bits.zig @@ -2,6 +2,9 @@ const std = @import("std"); const DW = std.dwarf; const assert = std.debug.assert; const testing = std.testing; +const Target = std.Target; + +const Module = @import("../../Module.zig"); const Encoding = @import("Encoding.zig"); const Mir = @import("Mir.zig"); const abi = @import("abi.zig"); @@ -227,11 +230,13 @@ pub const Register = enum(u8) { return @as(u8, reg.id()); } - pub fn bitSize(reg: Register) u32 { + pub fn bitSize(reg: Register, zcu: Module) u32 { + const features = zcu.getTarget().cpu.features; + return switch (@intFromEnum(reg)) { // zig fmt: off @intFromEnum(Register.zero) ... @intFromEnum(Register.x31) => 64, - @intFromEnum(Register.ft0) ... @intFromEnum(Register.f31) => 32, + @intFromEnum(Register.ft0) ... @intFromEnum(Register.f31) => if (Target.riscv.featureSetHas(features, .d)) 64 else 32, else => unreachable, // zig fmt: on }; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index fe7f98bc8d..edf0891a69 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -11148,7 +11148,6 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu } return o.builder.structType(.normal, types[0..types_len]); }, - .none => unreachable, } }, // TODO investigate C ABI for other architectures @@ -11406,7 +11405,6 @@ const ParamTypeIterator = struct { it.llvm_index += it.types_len - 1; return .multiple_llvm_types; }, - .none => unreachable, } }, // TODO investigate C ABI for other architectures From b67995689df424a0cab9186fcaf7b09bb04ffc1a Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 12 May 2024 11:01:23 -0700 Subject: [PATCH 13/24] riscv: add `airAggregateInit` for arrays --- src/arch/riscv64/CodeGen.zig | 27 ++++++++++++++++++++---- test/behavior/align.zig | 1 - test/behavior/array.zig | 3 --- test/behavior/basic.zig | 6 ------ test/behavior/eval.zig | 1 - test/behavior/fn.zig | 1 - test/behavior/for.zig | 6 ------ test/behavior/lower_strlit_to_vector.zig | 1 - test/behavior/pointers.zig | 1 - test/behavior/prefetch.zig | 1 - test/behavior/sizeof_and_typeof.zig | 1 - test/behavior/slice.zig | 4 ---- test/behavior/struct.zig | 1 - 13 files changed, 23 insertions(+), 31 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 7b44bcfc7c..71489c0eb6 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -3207,10 +3207,6 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); defer self.register_manager.unlockReg(offset_reg_lock); - if (true) return self.fail("TODO: airPtrElemPtr", .{}); - - // TODO: something is breaking here dunno - break :result try self.binOp(.ptr_add, base_ptr_mcv, base_ptr_ty, .{ .register = offset_reg }, base_ptr_ty); }; return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); @@ -5864,6 +5860,29 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } break :result .{ .load_frame = .{ .index = frame_index } }; }, + .Array => { + const elem_ty = result_ty.childType(zcu); + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu)); + const elem_size: u32 = @intCast(elem_ty.abiSize(zcu)); + + for (elements, 0..) |elem, elem_i| { + const elem_mcv = try self.resolveInst(elem); + const elem_off: i32 = @intCast(elem_size * elem_i); + try self.genSetMem( + .{ .frame = frame_index }, + elem_off, + elem_ty, + elem_mcv, + ); + } + if (result_ty.sentinel(zcu)) |sentinel| try self.genSetMem( + .{ .frame = frame_index }, + @intCast(elem_size * elements.len), + elem_ty, + try self.genTypedValue(sentinel), + ); + break :result .{ .load_frame = .{ .index = frame_index } }; + }, else => return self.fail("TODO: airAggregate {}", .{result_ty.fmt(zcu)}), } }; diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 533b5dce77..674d375438 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -237,7 +237,6 @@ fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 { } test "specifying alignment allows pointer cast" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 81a34ca1a8..b442048b95 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -346,7 +346,6 @@ test "read/write through global variable array of struct fields initialized via test "implicit cast single-item pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testImplicitCastSingleItemPtr(); try comptime testImplicitCastSingleItemPtr(); @@ -714,7 +713,6 @@ test "array of array agregate init" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = [1]u32{11} ** 10; var b = [1][10]u32{a} ** 2; @@ -765,7 +763,6 @@ test "slicing array of zero-sized values" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var arr: [32]u0 = undefined; diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index 643f1141d1..7c0c6cc4ab 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -688,7 +688,6 @@ test "string concatenation" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = "OK" ++ " IT " ++ "WORKED"; const b = "OK IT WORKED"; @@ -728,7 +727,6 @@ fn maybe(x: bool) anyerror!?u32 { test "auto created variables have correct alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(str: [*]const u8) u32 { @@ -893,7 +891,6 @@ test "weird array and tuple initializations" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = enum { a, b }; const S = struct { e: E }; @@ -1012,7 +1009,6 @@ comptime { test "switch inside @as gets correct type" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u32 = 0; _ = &a; @@ -1379,8 +1375,6 @@ test "allocation and looping over 3-byte integer" { } test "loading array from struct is not optimized away" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { arr: [1]u32 = .{0}, fn doTheTest(self: *@This()) !void { diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 1e7af5f871..966882b6a8 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -547,7 +547,6 @@ test "static eval list init" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(static_vec3.data[2] == 1.0); try expect(vec3(0.0, 0.0, 3.0).data[2] == 3.0); diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index 78ca6d0fd1..1c7adc38e8 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -493,7 +493,6 @@ test "using @ptrCast on function pointers" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = struct { data: [4]u8 }; diff --git a/test/behavior/for.zig b/test/behavior/for.zig index 337e7b6767..66c89d1b8c 100644 --- a/test/behavior/for.zig +++ b/test/behavior/for.zig @@ -69,7 +69,6 @@ test "basic for loop" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const expected_result = [_]u8{ 9, 8, 7, 6, 0, 1, 2, 3 } ** 3; @@ -134,7 +133,6 @@ test "for with null and T peer types and inferred result location type" { test "2 break statements and an else" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry(t: bool, f: bool) !void { @@ -183,7 +181,6 @@ fn mangleString(s: []u8) void { test "for copies its payload" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -283,7 +280,6 @@ test "two counters" { test "1-based counter and ptr to array" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var ok: usize = 0; @@ -367,7 +363,6 @@ test "raw pointer and slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [10]u8 = undefined; const slice: []const u8 = "blah"; @@ -387,7 +382,6 @@ test "raw pointer and counter" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [10]u8 = undefined; const ptr: [*]u8 = &buf; diff --git a/test/behavior/lower_strlit_to_vector.zig b/test/behavior/lower_strlit_to_vector.zig index 79315e7a53..948d708aa7 100644 --- a/test/behavior/lower_strlit_to_vector.zig +++ b/test/behavior/lower_strlit_to_vector.zig @@ -6,7 +6,6 @@ test "strlit to vector" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const strlit = "0123456789abcdef0123456789ABCDEF"; const vec_from_strlit: @Vector(32, u8) = strlit.*; diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index ffeeca3986..891ae311dd 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -541,7 +541,6 @@ test "pointer alignment and element type include call expression" { test "pointer to array has explicit alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Base = extern struct { a: u8 }; diff --git a/test/behavior/prefetch.zig b/test/behavior/prefetch.zig index 1f21d23001..e98e848393 100644 --- a/test/behavior/prefetch.zig +++ b/test/behavior/prefetch.zig @@ -3,7 +3,6 @@ const std = @import("std"); test "@prefetch()" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [2]u32 = .{ 42, 42 }; var a_len = a.len; diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig index 2f714f69d3..11d74c43d9 100644 --- a/test/behavior/sizeof_and_typeof.zig +++ b/test/behavior/sizeof_and_typeof.zig @@ -272,7 +272,6 @@ test "runtime instructions inside typeof in comptime only scope" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var y: i8 = 2; diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index 97b6e88f27..606f6db9a3 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -185,8 +185,6 @@ test "slicing zero length array" { } test "slicing pointer by length" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 }; const ptr: [*]const u8 = @as([*]const u8, @ptrCast(&array)); const slice = ptr[1..][0..5]; @@ -306,7 +304,6 @@ test "slice type with custom alignment" { test "obtaining a null terminated slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // here we have a normal array var buf: [50]u8 = undefined; @@ -772,7 +769,6 @@ test "slice sentinel access at comptime" { test "slicing array with sentinel as end index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn do() !void { diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index eaf795f861..3d9ddd7714 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -348,7 +348,6 @@ test "self-referencing struct via array member" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = struct { children: [1]*@This(), From 083b7b483e1ad83d62ed7029822a82fff14953c5 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 12 May 2024 12:24:59 -0700 Subject: [PATCH 14/24] riscv: zero registers when using register-wide operations what was happening is that instructions like `lb` were only affecting the lower bytes of the register and leaving the top dirty. this would lead to situtations were `cmp_eq` for example was using `xor`, which was failing because of the left-over stuff in the top of the register. with this commit, we now zero out or truncate depending on the context, to ensure instructions like xor will provide proper results. --- src/arch/riscv64/CodeGen.zig | 88 ++++++++++++++++++++++++++------- src/arch/riscv64/bits.zig | 2 +- test/behavior/array.zig | 1 - test/behavior/bitcast.zig | 3 -- test/behavior/cast.zig | 2 - test/behavior/error.zig | 1 - test/behavior/eval.zig | 2 - test/behavior/math.zig | 4 -- test/behavior/packed-struct.zig | 2 - test/behavior/reflection.zig | 1 - test/behavior/struct.zig | 5 -- test/behavior/this.zig | 1 - test/behavior/union.zig | 4 -- 13 files changed, 72 insertions(+), 44 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 71489c0eb6..bc9c75f3de 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1668,12 +1668,66 @@ fn allocReg(self: *Self, reg_class: abi.RegisterClass) !struct { Register, Regis return .{ reg, lock }; } +const PromoteOptions = struct { + /// zeroes out the register before loading in the operand + /// + /// if the operand is already a register, it will truncate with 0 + zero: bool = false, +}; + /// Similar to `allocReg` but will copy the MCValue into the Register unless `operand` is already /// a register, in which case it will return a possible lock to that register. -fn promoteReg(self: *Self, ty: Type, operand: MCValue) !struct { Register, ?RegisterLock } { - if (operand == .register) return .{ operand.register, self.register_manager.lockReg(operand.register) }; +fn promoteReg(self: *Self, ty: Type, operand: MCValue, options: PromoteOptions) !struct { Register, ?RegisterLock } { + const zcu = self.bin_file.comp.module.?; + const bit_size = ty.bitSize(zcu); + + if (operand == .register) { + const op_reg = operand.register; + if (options.zero and op_reg.class() == .int) { + // we make sure to emit the truncate manually because binOp will call this function + // and it could cause an infinite loop + + _ = try self.addInst(.{ + .tag = .slli, + .ops = .rri, + .data = .{ + .i_type = .{ + .imm12 = Immediate.u(64 - bit_size), + .rd = op_reg, + .rs1 = op_reg, + }, + }, + }); + + _ = try self.addInst(.{ + .tag = .srli, + .ops = .rri, + .data = .{ + .i_type = .{ + .imm12 = Immediate.u(64 - bit_size), + .rd = op_reg, + .rs1 = op_reg, + }, + }, + }); + } + + return .{ op_reg, self.register_manager.lockReg(operand.register) }; + } const reg, const lock = try self.allocReg(self.typeRegClass(ty)); + + if (options.zero and reg.class() == .int) { + _ = try self.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_mv, + .data = .{ .rr = .{ + .rd = reg, + .rs = .zero, + } }, + }); + } + try self.genSetReg(ty, reg, operand); return .{ reg, lock }; } @@ -2124,10 +2178,10 @@ fn binOpRegister( rhs: MCValue, rhs_ty: Type, ) !MCValue { - const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs); + const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs, .{ .zero = true }); defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs); + const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs, .{ .zero = true }); defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); const dest_reg, const dest_lock = try self.allocReg(.int); @@ -2223,10 +2277,10 @@ fn binOpFloat( const zcu = self.bin_file.comp.module.?; const float_bits = lhs_ty.floatBits(zcu.getTarget()); - const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs); + const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs, .{}); defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs); + const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs, .{}); defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); const mir_tag: Mir.Inst.Tag = switch (tag) { @@ -2425,10 +2479,10 @@ fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result_mcv = try self.allocRegOrMem(inst, false); const offset = result_mcv.load_frame; - const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs); + const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs, .{}); defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs); + const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs, .{}); defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); const dest_reg, const dest_lock = try self.allocReg(.int); @@ -2559,7 +2613,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { 1...8 => { const max_val = std.math.pow(u16, 2, int_info.bits) - 1; - const add_reg, const add_lock = try self.promoteReg(lhs_ty, lhs); + const add_reg, const add_lock = try self.promoteReg(lhs_ty, lhs, .{}); defer if (add_lock) |lock| self.register_manager.unlockReg(lock); const overflow_reg, const overflow_lock = try self.allocReg(.int); @@ -2645,10 +2699,10 @@ fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void { const lhs_ty = self.typeOf(bin_op.lhs); const rhs_ty = self.typeOf(bin_op.rhs); - const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs); + const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs, .{}); defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs); + const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs, .{}); defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); const dest_reg, const dest_lock = try self.allocReg(.int); @@ -2678,10 +2732,10 @@ fn airBitOr(self: *Self, inst: Air.Inst.Index) !void { const lhs_ty = self.typeOf(bin_op.lhs); const rhs_ty = self.typeOf(bin_op.rhs); - const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs); + const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs, .{}); defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs); + const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs, .{}); defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); const dest_reg, const dest_lock = try self.allocReg(.int); @@ -4706,10 +4760,10 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { const lhs_ty = Type.bool; const rhs_ty = Type.bool; - const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs); + const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs, .{}); defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs); + const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs, .{}); defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); const result_reg, const result_lock = try self.allocReg(.int); @@ -4905,7 +4959,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { const src_info: ?struct { addr_reg: Register, addr_lock: ?RegisterLock } = switch (src_mcv) { .register_pair, .memory, .indirect, .load_frame => null, .load_symbol => src: { - const src_addr_reg, const src_addr_lock = try self.promoteReg(Type.usize, src_mcv.address()); + const src_addr_reg, const src_addr_lock = try self.promoteReg(Type.usize, src_mcv.address(), .{}); errdefer self.register_manager.unlockReg(src_addr_lock); break :src .{ .addr_reg = src_addr_reg, .addr_lock = src_addr_lock }; @@ -5463,7 +5517,7 @@ fn genSetMem( .immediate => { // TODO: remove this lock in favor of a copyToTmpRegister when we load 64 bit immediates with // a register allocation. - const reg, const reg_lock = try self.promoteReg(ty, src_mcv); + const reg, const reg_lock = try self.promoteReg(ty, src_mcv, .{}); defer if (reg_lock) |lock| self.register_manager.unlockReg(lock); return self.genSetMem(base, disp, ty, .{ .register = reg }); diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig index 6b3a61578f..034fe49450 100644 --- a/src/arch/riscv64/bits.zig +++ b/src/arch/riscv64/bits.zig @@ -102,7 +102,7 @@ pub const Memory = struct { pub const Immediate = union(enum) { signed: i32, - unsigned: u32, + unsigned: u64, pub fn u(x: u64) Immediate { return .{ .unsigned = x }; diff --git a/test/behavior/array.zig b/test/behavior/array.zig index b442048b95..9b3c66f1ba 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -542,7 +542,6 @@ test "sentinel element count towards the ABI size calculation" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index 6d513a4ac7..779fbfc795 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -165,7 +165,6 @@ test "@bitCast packed structs at runtime and comptime" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Full = packed struct { number: u16, @@ -192,7 +191,6 @@ test "@bitCast packed structs at runtime and comptime" { test "@bitCast extern structs at runtime and comptime" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Full = extern struct { number: u16, @@ -227,7 +225,6 @@ test "bitcast packed struct to integer and back" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const LevelUpMove = packed struct { move_id: u9, diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 8ea564ad13..f33930f215 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -57,8 +57,6 @@ test "@intCast to comptime_int" { } test "implicit cast comptime numbers to any type when the value fits" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const a: u64 = 255; var b: u8 = a; _ = &b; diff --git a/test/behavior/error.zig b/test/behavior/error.zig index fde116ffb1..4e7fe949f1 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -740,7 +740,6 @@ test "ret_ptr doesn't cause own inferred error set to be resolved" { test "simple else prong allowed even when all errors handled" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo() !u8 { diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 966882b6a8..7b01c8d25f 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -395,7 +395,6 @@ test "return 0 from function that has u0 return type" { test "statically initialized struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; st_init_str_foo.x += 1; try expect(st_init_str_foo.x == 14); @@ -787,7 +786,6 @@ test "array concatenation peer resolves element types - pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = [2]u3{ 1, 7 }; var b = [3]u8{ 200, 225, 255 }; diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 62df9c1b60..5e8f1c192a 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -605,8 +605,6 @@ fn testSignedNegationWrappingEval(x: i16) !void { } test "unsigned negation wrapping" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testUnsignedNegationWrappingEval(1); try comptime testUnsignedNegationWrappingEval(1); } @@ -1436,8 +1434,6 @@ test "quad hex float literal parsing accurate" { } test "truncating shift left" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testShlTrunc(maxInt(u16)); try comptime testShlTrunc(maxInt(u16)); } diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index 4c54ce730e..be69bf8213 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -258,7 +258,6 @@ test "nested packed struct unaligned" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet const S1 = packed struct { @@ -331,7 +330,6 @@ test "byte-aligned field pointer offsets" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = packed struct { diff --git a/test/behavior/reflection.zig b/test/behavior/reflection.zig index f07b5a512e..aea84bc45a 100644 --- a/test/behavior/reflection.zig +++ b/test/behavior/reflection.zig @@ -28,7 +28,6 @@ fn dummy(a: bool, b: i32, c: f32) i32 { test "reflection: @field" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var f = Foo{ .one = 42, diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 3d9ddd7714..44911378db 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -68,7 +68,6 @@ const SmallStruct = struct { test "lower unnamed constants" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo = SmallStruct{ .a = 1, .b = 255 }; try expect(foo.first() == 1); @@ -395,7 +394,6 @@ test "packed struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo = APackedStruct{ .x = 1, @@ -876,7 +874,6 @@ test "packed struct field passed to generic function" { test "anonymous struct literal syntax" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Point = struct { @@ -1106,7 +1103,6 @@ test "packed struct with undefined initializers" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const P = packed struct { @@ -1369,7 +1365,6 @@ test "store to comptime field" { test "struct field init value is size of the struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const namespace = struct { const S = extern struct { diff --git a/test/behavior/this.zig b/test/behavior/this.zig index 330f9a714d..fadb21023e 100644 --- a/test/behavior/this.zig +++ b/test/behavior/this.zig @@ -27,7 +27,6 @@ test "this refer to module call private fn" { test "this refer to container" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var pt: Point(i32) = undefined; pt.x = 12; diff --git a/test/behavior/union.zig b/test/behavior/union.zig index 4497031b87..73bfe4a7cb 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -2025,7 +2025,6 @@ test "inner struct initializer uses packed union layout" { test "extern union initialized via reintepreted struct field initializer" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd }; @@ -2045,7 +2044,6 @@ test "extern union initialized via reintepreted struct field initializer" { test "packed union initialized via reintepreted struct field initializer" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd }; @@ -2066,7 +2064,6 @@ test "packed union initialized via reintepreted struct field initializer" { test "store of comptime reinterpreted memory to extern union" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd }; @@ -2089,7 +2086,6 @@ test "store of comptime reinterpreted memory to extern union" { test "store of comptime reinterpreted memory to packed union" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd }; From c10d1c6a75f818eda092ef7912fd3c2b996632f0 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 12 May 2024 13:46:49 -0700 Subject: [PATCH 15/24] riscv: implement more arithmetic instructions --- src/arch/riscv64/CodeGen.zig | 134 ++++++++++++++++++----- src/arch/riscv64/Encoding.zig | 115 +++++++++++++++++--- src/arch/riscv64/Lower.zig | 199 +++++++++++++++++++++++----------- src/arch/riscv64/Mir.zig | 25 +++++ test/behavior/cast.zig | 2 - test/behavior/floatop.zig | 12 -- test/behavior/math.zig | 1 - 7 files changed, 363 insertions(+), 125 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index bc9c75f3de..31767e01cc 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -2075,6 +2075,7 @@ fn binOp( .add, .sub, .mul, + .div_float, .cmp_eq, .cmp_neq, .cmp_gt, @@ -2086,10 +2087,11 @@ fn binOp( switch (lhs_ty.zigTypeTag(zcu)) { .Float => { const float_bits = lhs_ty.floatBits(zcu.getTarget()); - if (float_bits <= 32) { + const float_reg_bits: u32 = if (self.hasFeature(.d)) 64 else 32; + if (float_bits <= float_reg_bits) { return self.binOpFloat(tag, lhs, lhs_ty, rhs, rhs_ty); } else { - return self.fail("TODO: binary operations for floats with bits > 32", .{}); + return self.fail("TODO: binary operations for floats with bits > {d}", .{float_reg_bits}); } }, .Vector => return self.fail("TODO binary operations on vectors", .{}), @@ -2255,6 +2257,7 @@ fn binOpRegister( .cmp_lte => .lte, else => unreachable, }, + .size = self.memSize(lhs_ty), }, }, }); @@ -2285,28 +2288,90 @@ fn binOpFloat( const mir_tag: Mir.Inst.Tag = switch (tag) { .add => if (float_bits == 32) .fadds else .faddd, - .cmp_eq => if (float_bits == 32) .feqs else .feqd, + .sub => if (float_bits == 32) .fsubs else .fsubd, + .mul => if (float_bits == 32) .fmuls else .fmuld, + .div_float => if (float_bits == 32) .fdivs else .fdivd, + + .cmp_eq, + .cmp_neq, + .cmp_gt, + .cmp_gte, + .cmp_lt, + .cmp_lte, + => .pseudo, + else => return self.fail("TODO: binOpFloat mir_tag {s}", .{@tagName(tag)}), }; const return_class: abi.RegisterClass = switch (tag) { - .add => .float, - .cmp_eq => .int, + .add, + .sub, + .mul, + .div_float, + => .float, + + .cmp_eq, + .cmp_neq, + .cmp_gt, + .cmp_gte, + .cmp_lt, + .cmp_lte, + => .int, else => unreachable, }; const dest_reg, const dest_lock = try self.allocReg(return_class); defer self.register_manager.unlockReg(dest_lock); - _ = try self.addInst(.{ - .tag = mir_tag, - .ops = .rrr, - .data = .{ .r_type = .{ - .rd = dest_reg, - .rs1 = lhs_reg, - .rs2 = rhs_reg, - } }, - }); + switch (tag) { + .add, + .sub, + .mul, + .div_float, + => { + _ = try self.addInst(.{ + .tag = mir_tag, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = dest_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + } }, + }); + }, + + .cmp_eq, + .cmp_neq, + .cmp_gt, + .cmp_gte, + .cmp_lt, + .cmp_lte, + => { + _ = try self.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_compare, + .data = .{ + .compare = .{ + .rd = dest_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + .op = switch (tag) { + .cmp_eq => .eq, + .cmp_neq => .neq, + .cmp_gt => .gt, + .cmp_gte => .gte, + .cmp_lt => .lt, + .cmp_lte => .lte, + else => unreachable, + }, + .size = self.memSize(lhs_ty), + }, + }, + }); + }, + + else => unreachable, + } return MCValue{ .register = dest_reg }; } @@ -2360,7 +2425,27 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { fn airMul(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement mul for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); + + break :result try self.binOp(.mul, lhs, lhs_ty, rhs, rhs_ty); + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +} + +fn airDiv(self: *Self, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); + + break :result try self.binOp(.div_float, lhs, lhs_ty, rhs, rhs_ty); + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -2672,12 +2757,6 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement airShlWithOverflow for {}", .{self.target.cpu.arch}); } -fn airDiv(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement div for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - fn airRem(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement rem for {}", .{self.target.cpu.arch}); @@ -3742,6 +3821,9 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const arg_ty = self.typeOfIndex(inst); const dst_mcv = try self.allocRegOrMem(inst, false); + + log.debug("airArg {} -> {}", .{ src_mcv, dst_mcv }); + try self.genCopy(arg_ty, dst_mcv, src_mcv); try self.genArgDbgInfo(inst, src_mcv); @@ -4135,10 +4217,10 @@ fn airCmp(self: *Self, inst: Air.Inst.Index) !void { }, .Float => { const float_bits = lhs_ty.floatBits(self.target.*); - if (float_bits > 32) { - return self.fail("TODO: airCmp float > 32 bits", .{}); + const float_reg_size: u32 = if (self.hasFeature(.d)) 64 else 32; + if (float_bits > float_reg_size) { + return self.fail("TODO: airCmp float > 64/32 bits", .{}); } - break :result try self.binOpFloat(tag, lhs, lhs_ty, rhs, lhs_ty); }, else => unreachable, @@ -6141,6 +6223,8 @@ fn resolveCallingConventionValues( }; } + var param_float_reg_i: usize = 0; + for (param_types, result.args) |ty, *arg| { if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) { assert(cc == .Unspecified); @@ -6151,8 +6235,6 @@ fn resolveCallingConventionValues( var arg_mcv: [2]MCValue = undefined; var arg_mcv_i: usize = 0; - var param_float_reg_i: usize = 0; - const classes = mem.sliceTo(&abi.classifySystem(ty, zcu), .none); for (classes) |class| switch (class) { diff --git a/src/arch/riscv64/Encoding.zig b/src/arch/riscv64/Encoding.zig index bbea73aac9..a944cac634 100644 --- a/src/arch/riscv64/Encoding.zig +++ b/src/arch/riscv64/Encoding.zig @@ -111,21 +111,46 @@ pub const Mnemonic = enum { ebreak, unimp, - // float mnemonics + // F extension (32-bit float) fadds, - faddd, + fsubs, + fmuls, + fdivs, - feqs, - feqd, + fmins, + fmaxs, + + fsqrts, - fld, flw, - - fsd, fsw, + feqs, + flts, + fles, + fsgnjns, + // D extension (64-bit float) + faddd, + fsubd, + fmuld, + fdivd, + + fmind, + fmaxd, + + fsqrtd, + + fld, + fsd, + + feqd, + fltd, + fled, + + fsgnjnd, + pub fn encoding(mnem: Mnemonic) Enc { return switch (mnem) { // zig fmt: off @@ -163,39 +188,66 @@ pub const Mnemonic = enum { .fadds => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00000, .fmt = .S, .rm = 0b111 } } }, .faddd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00000, .fmt = .D, .rm = 0b111 } } }, + .fsubs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00001, .fmt = .S, .rm = 0b111 } } }, + .fsubd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00001, .fmt = .D, .rm = 0b111 } } }, + + .fmuls => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00010, .fmt = .S, .rm = 0b111 } } }, + .fmuld => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00010, .fmt = .D, .rm = 0b111 } } }, + + .fdivs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00011, .fmt = .S, .rm = 0b111 } } }, + .fdivd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00011, .fmt = .D, .rm = 0b111 } } }, + + .fmins => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .S, .rm = 0b000 } } }, + .fmind => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .D, .rm = 0b000 } } }, + + .fmaxs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .S, .rm = 0b001 } } }, + .fmaxd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .D, .rm = 0b001 } } }, + + .fsqrts => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b01011, .fmt = .S, .rm = 0b111 } } }, + .fsqrtd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b01011, .fmt = .D, .rm = 0b111 } } }, + + .fles => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b000 } } }, + .fled => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b000 } } }, + + .flts => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b001 } } }, + .fltd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b001 } } }, + .feqs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b010 } } }, .feqd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b010 } } }, .fsgnjns => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .S, .rm = 0b000 } } }, + .fsgnjnd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .D, .rm = 0b000 } } }, + // LOAD - .ld => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b011 } } }, - .lw => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b010 } } }, - .lwu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b110 } } }, - .lh => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b001 } } }, - .lhu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b101 } } }, .lb => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + .lh => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b001 } } }, + .lw => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b010 } } }, + .ld => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b011 } } }, .lbu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b100 } } }, + .lhu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b101 } } }, + .lwu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b110 } } }, // STORE - - .sd => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b011 } } }, - .sw => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b010 } } }, - .sh => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b001 } } }, + .sb => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + .sh => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b001 } } }, + .sw => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b010 } } }, + .sd => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b011 } } }, // LOAD_FP - .fld => .{ .opcode = .LOAD_FP, .data = .{ .fo = .{ .funct3 = 0b011 } } }, .flw => .{ .opcode = .LOAD_FP, .data = .{ .fo = .{ .funct3 = 0b010 } } }, + .fld => .{ .opcode = .LOAD_FP, .data = .{ .fo = .{ .funct3 = 0b011 } } }, + // STORE_FP - .fsd => .{ .opcode = .STORE_FP, .data = .{ .fo = .{ .funct3 = 0b011 } } }, .fsw => .{ .opcode = .STORE_FP, .data = .{ .fo = .{ .funct3 = 0b010 } } }, + .fsd => .{ .opcode = .STORE_FP, .data = .{ .fo = .{ .funct3 = 0b011 } } }, // JALR @@ -310,9 +362,36 @@ pub const InstEnc = enum { .fadds, .faddd, + + .fsubs, + .fsubd, + + .fmuls, + .fmuld, + + .fdivs, + .fdivd, + + .fmins, + .fmind, + + .fmaxs, + .fmaxd, + + .fsqrts, + .fsqrtd, + + .fles, + .fled, + + .flts, + .fltd, + .feqs, .feqd, + .fsgnjns, + .fsgnjnd, => .R, .ecall, diff --git a/src/arch/riscv64/Lower.zig b/src/arch/riscv64/Lower.zig index 6e2fc125cf..99270c8a7e 100644 --- a/src/arch/riscv64/Lower.zig +++ b/src/arch/riscv64/Lower.zig @@ -139,7 +139,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { switch (dst_class) { .float => { - try lower.emit(.fsgnjns, &.{ + try lower.emit(if (lower.hasFeature(.d)) .fsgnjnd else .fsgnjns, &.{ .{ .reg = rr.rd }, .{ .reg = rr.rs }, .{ .reg = rr.rs }, @@ -176,9 +176,11 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .pseudo_load_symbol => { const payload = inst.data.payload; const data = lower.mir.extraData(Mir.LoadSymbolPayload, payload).data; + const dst_reg: bits.Register = @enumFromInt(data.register); + assert(dst_reg.class() == .int); try lower.emit(.lui, &.{ - .{ .reg = @enumFromInt(data.register) }, + .{ .reg = dst_reg }, .{ .imm = lower.reloc(.{ .load_symbol_reloc = .{ .atom_index = data.atom_index, .sym_index = data.sym_index, @@ -187,14 +189,16 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { // the above reloc implies this one try lower.emit(.addi, &.{ - .{ .reg = @enumFromInt(data.register) }, - .{ .reg = @enumFromInt(data.register) }, + .{ .reg = dst_reg }, + .{ .reg = dst_reg }, .{ .imm = Immediate.s(0) }, }); }, .pseudo_lea_rm => { const rm = inst.data.rm; + assert(rm.r.class() == .int); + const frame = rm.m.toFrameLoc(lower.mir); try lower.emit(.addi, &.{ @@ -212,78 +216,135 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { const rs1 = compare.rs1; const rs2 = compare.rs2; - switch (op) { - .eq => { - try lower.emit(.xor, &.{ - .{ .reg = rd }, - .{ .reg = rs1 }, - .{ .reg = rs2 }, - }); + const class = rs1.class(); + const size = compare.size.bitSize(); - try lower.emit(.sltiu, &.{ - .{ .reg = rd }, - .{ .reg = rd }, - .{ .imm = Immediate.s(1) }, - }); - }, - .neq => { - try lower.emit(.xor, &.{ - .{ .reg = rd }, - .{ .reg = rs1 }, - .{ .reg = rs2 }, - }); + switch (class) { + .int => switch (op) { + .eq => { + try lower.emit(.xor, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); - try lower.emit(.sltu, &.{ - .{ .reg = rd }, - .{ .reg = .zero }, - .{ .reg = rd }, - }); - }, - .gt => { - try lower.emit(.sltu, &.{ - .{ .reg = rd }, - .{ .reg = rs1 }, - .{ .reg = rs2 }, - }); - }, - .gte => { - try lower.emit(.sltu, &.{ - .{ .reg = rd }, - .{ .reg = rs1 }, - .{ .reg = rs2 }, - }); + try lower.emit(.sltiu, &.{ + .{ .reg = rd }, + .{ .reg = rd }, + .{ .imm = Immediate.s(1) }, + }); + }, + .neq => { + try lower.emit(.xor, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); - try lower.emit(.xori, &.{ - .{ .reg = rd }, - .{ .reg = rd }, - .{ .imm = Immediate.s(1) }, - }); - }, - .lt => { - try lower.emit(.slt, &.{ - .{ .reg = rd }, - .{ .reg = rs1 }, - .{ .reg = rs2 }, - }); - }, - .lte => { - try lower.emit(.slt, &.{ - .{ .reg = rd }, - .{ .reg = rs2 }, - .{ .reg = rs1 }, - }); + try lower.emit(.sltu, &.{ + .{ .reg = rd }, + .{ .reg = .zero }, + .{ .reg = rd }, + }); + }, + .gt => { + try lower.emit(.sltu, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + }, + .gte => { + try lower.emit(.sltu, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); - try lower.emit(.xori, &.{ - .{ .reg = rd }, - .{ .reg = rd }, - .{ .imm = Immediate.s(1) }, - }); + try lower.emit(.xori, &.{ + .{ .reg = rd }, + .{ .reg = rd }, + .{ .imm = Immediate.s(1) }, + }); + }, + .lt => { + try lower.emit(.slt, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + }, + .lte => { + try lower.emit(.slt, &.{ + .{ .reg = rd }, + .{ .reg = rs2 }, + .{ .reg = rs1 }, + }); + + try lower.emit(.xori, &.{ + .{ .reg = rd }, + .{ .reg = rd }, + .{ .imm = Immediate.s(1) }, + }); + }, + }, + .float => switch (op) { + // eq + .eq => { + try lower.emit(if (size == 64) .feqd else .feqs, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + }, + // !(eq) + .neq => { + try lower.emit(if (size == 64) .feqd else .feqs, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + try lower.emit(.xori, &.{ + .{ .reg = rd }, + .{ .reg = rd }, + .{ .imm = Immediate.s(1) }, + }); + }, + .lt => { + try lower.emit(if (size == 64) .fltd else .flts, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + }, + .lte => { + try lower.emit(if (size == 64) .fled else .fles, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + }, + .gt => { + try lower.emit(if (size == 64) .fltd else .flts, &.{ + .{ .reg = rd }, + .{ .reg = rs2 }, + .{ .reg = rs1 }, + }); + }, + .gte => { + try lower.emit(if (size == 64) .fled else .fles, &.{ + .{ .reg = rd }, + .{ .reg = rs2 }, + .{ .reg = rs1 }, + }); + }, }, } }, .pseudo_not => { const rr = inst.data.rr; + assert(rr.rs.class() == .int and rr.rd.class() == .int); try lower.emit(.xori, &.{ .{ .reg = rr.rd }, @@ -408,6 +469,12 @@ pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error { return error.LowerFail; } +fn hasFeature(lower: *Lower, feature: std.Target.riscv.Feature) bool { + const target = lower.bin_file.comp.module.?.getTarget(); + const features = target.cpu.features; + return std.Target.riscv.featureSetHas(features, feature); +} + const Lower = @This(); const abi = @import("abi.zig"); diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index e26f811ff5..dd79a8dd83 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -72,15 +72,39 @@ pub const Inst = struct { // F extension (32-bit float) fadds, + fsubs, + fmuls, + fdivs, + + fmins, + fmaxs, + + fsqrts, + flw, fsw, + feqs, + flts, + fles, // D extension (64-bit float) faddd, + fsubd, + fmuld, + fdivd, + + fmind, + fmaxd, + + fsqrtd, + fld, fsd, + feqd, + fltd, + fled, /// A pseudo-instruction. Used for anything that isn't 1:1 with an /// assembly instruction. @@ -182,6 +206,7 @@ pub const Inst = struct { lt, lte, }, + size: Memory.Size, }, reloc: struct { diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index f33930f215..1c5fbe09cd 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -1717,7 +1717,6 @@ test "peer type resolution: float and comptime-known fixed-width integer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const i: u8 = 100; var f: f32 = 1.234; @@ -2586,7 +2585,6 @@ test "@intFromBool on vector" { test "numeric coercions with undefined" { if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const from: i32 = undefined; var to: f32 = from; diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index d32319c644..670a7a01ec 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -22,8 +22,6 @@ test "add f16" { } test "add f32/f64" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testAdd(f32); try comptime testAdd(f32); try testAdd(f64); @@ -60,8 +58,6 @@ test "sub f16" { } test "sub f32/f64" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testSub(f32); try comptime testSub(f32); try testSub(f64); @@ -98,8 +94,6 @@ test "mul f16" { } test "mul f32/f64" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testMul(f32); try comptime testMul(f32); try testMul(f64); @@ -1622,7 +1616,6 @@ test "comptime inf >= runtime 1" { test "comptime isNan(nan * 1)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const nan_times_one = comptime std.math.nan(f64) * 1; try std.testing.expect(std.math.isNan(nan_times_one)); @@ -1630,7 +1623,6 @@ test "comptime isNan(nan * 1)" { test "runtime isNan(nan * 1)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const nan_times_one = std.math.nan(f64) * 1; try std.testing.expect(std.math.isNan(nan_times_one)); @@ -1638,7 +1630,6 @@ test "runtime isNan(nan * 1)" { test "comptime isNan(nan * 0)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const nan_times_zero = comptime std.math.nan(f64) * 0; try std.testing.expect(std.math.isNan(nan_times_zero)); @@ -1648,7 +1639,6 @@ test "comptime isNan(nan * 0)" { test "runtime isNan(nan * 0)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const nan_times_zero = std.math.nan(f64) * 0; try std.testing.expect(std.math.isNan(nan_times_zero)); @@ -1658,7 +1648,6 @@ test "runtime isNan(nan * 0)" { test "comptime isNan(inf * 0)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const inf_times_zero = comptime std.math.inf(f64) * 0; try std.testing.expect(std.math.isNan(inf_times_zero)); @@ -1668,7 +1657,6 @@ test "comptime isNan(inf * 0)" { test "runtime isNan(inf * 0)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const inf_times_zero = std.math.inf(f64) * 0; try std.testing.expect(std.math.isNan(inf_times_zero)); diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 5e8f1c192a..ea03d3f89d 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -236,7 +236,6 @@ test "float equality" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x: f64 = 0.012; const y: f64 = x + 1.0; From 206e66858c69cd667aad3779837493968bfcd228 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Mon, 13 May 2024 11:38:46 -0700 Subject: [PATCH 16/24] riscv: rename `Self` to `Func` Very similar reasoning to the Wasm backend. I believe that "Self" is not the most descriptive possible name here and "Func" better explains it. The generation is happening for a Function, and accessing "Func" is like accessing the context of that current function. --- src/arch/riscv64/CodeGen.zig | 3725 +++++++++++++++++----------------- 1 file changed, 1858 insertions(+), 1867 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 31767e01cc..f252c9e6b7 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -281,9 +281,9 @@ const MCValue = union(enum) { const Branch = struct { inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{}, - fn deinit(self: *Branch, gpa: Allocator) void { - self.inst_table.deinit(gpa); - self.* = undefined; + fn deinit(func: *Branch, gpa: Allocator) void { + func.inst_table.deinit(gpa); + func.* = undefined; } }; @@ -317,33 +317,33 @@ const InstTracking = struct { }, .short = result }; } - fn getReg(self: InstTracking) ?Register { - return self.short.getReg(); + fn getReg(func: InstTracking) ?Register { + return func.short.getReg(); } - fn getRegs(self: *const InstTracking) []const Register { - return self.short.getRegs(); + fn getRegs(func: *const InstTracking) []const Register { + return func.short.getRegs(); } - fn spill(self: *InstTracking, function: *Self, inst: Air.Inst.Index) !void { - if (std.meta.eql(self.long, self.short)) return; // Already spilled + fn spill(func: *InstTracking, function: *Func, inst: Air.Inst.Index) !void { + if (std.meta.eql(func.long, func.short)) return; // Already spilled // Allocate or reuse frame index - switch (self.long) { - .none => self.long = try function.allocRegOrMem(inst, false), + switch (func.long) { + .none => func.long = try function.allocRegOrMem(inst, false), .load_frame => {}, - .reserved_frame => |index| self.long = .{ .load_frame = .{ .index = index } }, + .reserved_frame => |index| func.long = .{ .load_frame = .{ .index = index } }, else => unreachable, } - tracking_log.debug("spill %{d} from {} to {}", .{ inst, self.short, self.long }); - try function.genCopy(function.typeOfIndex(inst), self.long, self.short); + tracking_log.debug("spill %{d} from {} to {}", .{ inst, func.short, func.long }); + try function.genCopy(function.typeOfIndex(inst), func.long, func.short); } - fn reuseFrame(self: *InstTracking) void { - switch (self.long) { - .reserved_frame => |index| self.long = .{ .load_frame = .{ .index = index } }, + fn reuseFrame(func: *InstTracking) void { + switch (func.long) { + .reserved_frame => |index| func.long = .{ .load_frame = .{ .index = index } }, else => {}, } - self.short = switch (self.long) { + func.short = switch (func.long) { .none, .unreach, .undef, @@ -353,7 +353,7 @@ const InstTracking = struct { .lea_frame, .load_symbol, .lea_symbol, - => self.long, + => func.long, .dead, .register, .register_pair, @@ -365,14 +365,14 @@ const InstTracking = struct { }; } - fn trackSpill(self: *InstTracking, function: *Self, inst: Air.Inst.Index) !void { - try function.freeValue(self.short); - self.reuseFrame(); - tracking_log.debug("%{d} => {} (spilled)", .{ inst, self.* }); + fn trackSpill(func: *InstTracking, function: *Func, inst: Air.Inst.Index) !void { + try function.freeValue(func.short); + func.reuseFrame(); + tracking_log.debug("%{d} => {} (spilled)", .{ inst, func.* }); } - fn verifyMaterialize(self: InstTracking, target: InstTracking) void { - switch (self.long) { + fn verifyMaterialize(func: InstTracking, target: InstTracking) void { + switch (func.long) { .none, .unreach, .undef, @@ -381,7 +381,7 @@ const InstTracking = struct { .lea_frame, .load_symbol, .lea_symbol, - => assert(std.meta.eql(self.long, target.long)), + => assert(std.meta.eql(func.long, target.long)), .load_frame, .reserved_frame, => switch (target.long) { @@ -402,73 +402,73 @@ const InstTracking = struct { } fn materialize( - self: *InstTracking, - function: *Self, + func: *InstTracking, + function: *Func, inst: Air.Inst.Index, target: InstTracking, ) !void { - self.verifyMaterialize(target); - try self.materializeUnsafe(function, inst, target); + func.verifyMaterialize(target); + try func.materializeUnsafe(function, inst, target); } fn materializeUnsafe( - self: InstTracking, - function: *Self, + func: InstTracking, + function: *Func, inst: Air.Inst.Index, target: InstTracking, ) !void { const ty = function.typeOfIndex(inst); - if ((self.long == .none or self.long == .reserved_frame) and target.long == .load_frame) - try function.genCopy(ty, target.long, self.short); - try function.genCopy(ty, target.short, self.short); + if ((func.long == .none or func.long == .reserved_frame) and target.long == .load_frame) + try function.genCopy(ty, target.long, func.short); + try function.genCopy(ty, target.short, func.short); } - fn trackMaterialize(self: *InstTracking, inst: Air.Inst.Index, target: InstTracking) void { - self.verifyMaterialize(target); + fn trackMaterialize(func: *InstTracking, inst: Air.Inst.Index, target: InstTracking) void { + func.verifyMaterialize(target); // Don't clobber reserved frame indices - self.long = if (target.long == .none) switch (self.long) { + func.long = if (target.long == .none) switch (func.long) { .load_frame => |addr| .{ .reserved_frame = addr.index }, - .reserved_frame => self.long, + .reserved_frame => func.long, else => target.long, } else target.long; - self.short = target.short; - tracking_log.debug("%{d} => {} (materialize)", .{ inst, self.* }); + func.short = target.short; + tracking_log.debug("%{d} => {} (materialize)", .{ inst, func.* }); } - fn resurrect(self: *InstTracking, inst: Air.Inst.Index, scope_generation: u32) void { - switch (self.short) { + fn resurrect(func: *InstTracking, inst: Air.Inst.Index, scope_generation: u32) void { + switch (func.short) { .dead => |die_generation| if (die_generation >= scope_generation) { - self.reuseFrame(); - tracking_log.debug("%{d} => {} (resurrect)", .{ inst, self.* }); + func.reuseFrame(); + tracking_log.debug("%{d} => {} (resurrect)", .{ inst, func.* }); }, else => {}, } } - fn die(self: *InstTracking, function: *Self, inst: Air.Inst.Index) !void { - if (self.short == .dead) return; - try function.freeValue(self.short); - self.short = .{ .dead = function.scope_generation }; - tracking_log.debug("%{d} => {} (death)", .{ inst, self.* }); + fn die(func: *InstTracking, function: *Func, inst: Air.Inst.Index) !void { + if (func.short == .dead) return; + try function.freeValue(func.short); + func.short = .{ .dead = function.scope_generation }; + tracking_log.debug("%{d} => {} (death)", .{ inst, func.* }); } fn reuse( - self: *InstTracking, - function: *Self, + func: *InstTracking, + function: *Func, new_inst: ?Air.Inst.Index, old_inst: Air.Inst.Index, ) void { - self.short = .{ .dead = function.scope_generation }; + func.short = .{ .dead = function.scope_generation }; if (new_inst) |inst| - tracking_log.debug("%{d} => {} (reuse %{d})", .{ inst, self.*, old_inst }) + tracking_log.debug("%{d} => {} (reuse %{d})", .{ inst, func.*, old_inst }) else - tracking_log.debug("tmp => {} (reuse %{d})", .{ self.*, old_inst }); + tracking_log.debug("tmp => {} (reuse %{d})", .{ func.*, old_inst }); } - fn liveOut(self: *InstTracking, function: *Self, inst: Air.Inst.Index) void { - for (self.getRegs()) |reg| { + fn liveOut(func: *InstTracking, function: *Func, inst: Air.Inst.Index) void { + for (func.getRegs()) |reg| { if (function.register_manager.isRegFree(reg)) { - tracking_log.debug("%{d} => {} (live-out)", .{ inst, self.* }); + tracking_log.debug("%{d} => {} (live-out)", .{ inst, func.* }); continue; } @@ -495,18 +495,18 @@ const InstTracking = struct { // Perform side-effects of freeValue manually. function.register_manager.freeReg(reg); - tracking_log.debug("%{d} => {} (live-out %{d})", .{ inst, self.*, tracked_inst }); + tracking_log.debug("%{d} => {} (live-out %{d})", .{ inst, func.*, tracked_inst }); } } pub fn format( - self: InstTracking, + func: InstTracking, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (!std.meta.eql(self.long, self.short)) try writer.print("|{}| ", .{self.long}); - try writer.print("{}", .{self.short}); + if (!std.meta.eql(func.long, func.short)) try writer.print("|{}| ", .{func.long}); + try writer.print("{}", .{func.short}); } }; @@ -546,19 +546,13 @@ const FrameAlloc = struct { } }; -const StackAllocation = struct { - inst: ?Air.Inst.Index, - /// TODO: make the size inferred from the bits of the inst - size: u32, -}; - const BlockData = struct { relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{}, state: State, - fn deinit(self: *BlockData, gpa: Allocator) void { - self.relocs.deinit(gpa); - self.* = undefined; + fn deinit(bd: *BlockData, gpa: Allocator) void { + bd.relocs.deinit(gpa); + bd.* = undefined; } }; @@ -570,31 +564,31 @@ const State = struct { scope_generation: u32, }; -fn initRetroactiveState(self: *Self) State { +fn initRetroactiveState(func: *Func) State { var state: State = undefined; - state.inst_tracking_len = @intCast(self.inst_tracking.count()); - state.scope_generation = self.scope_generation; + state.inst_tracking_len = @intCast(func.inst_tracking.count()); + state.scope_generation = func.scope_generation; return state; } -fn saveRetroactiveState(self: *Self, state: *State) !void { - const free_registers = self.register_manager.free_registers; +fn saveRetroactiveState(func: *Func, state: *State) !void { + const free_registers = func.register_manager.free_registers; var it = free_registers.iterator(.{ .kind = .unset }); while (it.next()) |index| { - const tracked_inst = self.register_manager.registers[index]; + const tracked_inst = func.register_manager.registers[index]; state.registers[index] = tracked_inst; - state.reg_tracking[index] = self.inst_tracking.get(tracked_inst).?; + state.reg_tracking[index] = func.inst_tracking.get(tracked_inst).?; } state.free_registers = free_registers; } -fn saveState(self: *Self) !State { - var state = self.initRetroactiveState(); - try self.saveRetroactiveState(&state); +fn saveState(func: *Func) !State { + var state = func.initRetroactiveState(); + try func.saveRetroactiveState(&state); return state; } -fn restoreState(self: *Self, state: State, deaths: []const Air.Inst.Index, comptime opts: struct { +fn restoreState(func: *Func, state: State, deaths: []const Air.Inst.Index, comptime opts: struct { emit_instructions: bool, update_tracking: bool, resurrect: bool, @@ -602,81 +596,81 @@ fn restoreState(self: *Self, state: State, deaths: []const Air.Inst.Index, compt }) !void { if (opts.close_scope) { for ( - self.inst_tracking.keys()[state.inst_tracking_len..], - self.inst_tracking.values()[state.inst_tracking_len..], - ) |inst, *tracking| try tracking.die(self, inst); - self.inst_tracking.shrinkRetainingCapacity(state.inst_tracking_len); + func.inst_tracking.keys()[state.inst_tracking_len..], + func.inst_tracking.values()[state.inst_tracking_len..], + ) |inst, *tracking| try tracking.die(func, inst); + func.inst_tracking.shrinkRetainingCapacity(state.inst_tracking_len); } if (opts.resurrect) for ( - self.inst_tracking.keys()[0..state.inst_tracking_len], - self.inst_tracking.values()[0..state.inst_tracking_len], + func.inst_tracking.keys()[0..state.inst_tracking_len], + func.inst_tracking.values()[0..state.inst_tracking_len], ) |inst, *tracking| tracking.resurrect(inst, state.scope_generation); - for (deaths) |death| try self.processDeath(death); + for (deaths) |death| try func.processDeath(death); const ExpectedContents = [@typeInfo(RegisterManager.TrackedRegisters).Array.len]RegisterLock; var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = if (opts.update_tracking) - {} else std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + {} else std.heap.stackFallback(@sizeOf(ExpectedContents), func.gpa); var reg_locks = if (opts.update_tracking) {} else try std.ArrayList(RegisterLock).initCapacity( stack.get(), @typeInfo(ExpectedContents).Array.len, ); defer if (!opts.update_tracking) { - for (reg_locks.items) |lock| self.register_manager.unlockReg(lock); + for (reg_locks.items) |lock| func.register_manager.unlockReg(lock); reg_locks.deinit(); }; for (0..state.registers.len) |index| { - const current_maybe_inst = if (self.register_manager.free_registers.isSet(index)) + const current_maybe_inst = if (func.register_manager.free_registers.isSet(index)) null else - self.register_manager.registers[index]; + func.register_manager.registers[index]; const target_maybe_inst = if (state.free_registers.isSet(index)) null else state.registers[index]; if (std.debug.runtime_safety) if (target_maybe_inst) |target_inst| - assert(self.inst_tracking.getIndex(target_inst).? < state.inst_tracking_len); + assert(func.inst_tracking.getIndex(target_inst).? < state.inst_tracking_len); if (opts.emit_instructions) { if (current_maybe_inst) |current_inst| { - try self.inst_tracking.getPtr(current_inst).?.spill(self, current_inst); + try func.inst_tracking.getPtr(current_inst).?.spill(func, current_inst); } if (target_maybe_inst) |target_inst| { - const target_tracking = self.inst_tracking.getPtr(target_inst).?; - try target_tracking.materialize(self, target_inst, state.reg_tracking[index]); + const target_tracking = func.inst_tracking.getPtr(target_inst).?; + try target_tracking.materialize(func, target_inst, state.reg_tracking[index]); } } if (opts.update_tracking) { if (current_maybe_inst) |current_inst| { - try self.inst_tracking.getPtr(current_inst).?.trackSpill(self, current_inst); + try func.inst_tracking.getPtr(current_inst).?.trackSpill(func, current_inst); } blk: { const inst = target_maybe_inst orelse break :blk; const reg = RegisterManager.regAtTrackedIndex(@intCast(index)); - self.register_manager.freeReg(reg); - self.register_manager.getRegAssumeFree(reg, inst); + func.register_manager.freeReg(reg); + func.register_manager.getRegAssumeFree(reg, inst); } if (target_maybe_inst) |target_inst| { - self.inst_tracking.getPtr(target_inst).?.trackMaterialize( + func.inst_tracking.getPtr(target_inst).?.trackMaterialize( target_inst, state.reg_tracking[index], ); } } else if (target_maybe_inst) |_| - try reg_locks.append(self.register_manager.lockRegIndexAssumeUnused(@intCast(index))); + try reg_locks.append(func.register_manager.lockRegIndexAssumeUnused(@intCast(index))); } if (opts.update_tracking and std.debug.runtime_safety) { - assert(self.register_manager.free_registers.eql(state.free_registers)); + assert(func.register_manager.free_registers.eql(state.free_registers)); var used_reg_it = state.free_registers.iterator(.{ .kind = .unset }); while (used_reg_it.next()) |index| - assert(self.register_manager.registers[index] == state.registers[index]); + assert(func.register_manager.registers[index] == state.registers[index]); } } -const Self = @This(); +const Func = @This(); const CallView = enum(u1) { callee, @@ -712,7 +706,7 @@ pub fn generate( } try branch_stack.append(.{}); - var function = Self{ + var function = Func{ .gpa = gpa, .air = air, .mod = mod, @@ -852,51 +846,51 @@ pub fn generate( } } -fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { - const gpa = self.gpa; +fn addInst(func: *Func, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { + const gpa = func.gpa; - try self.mir_instructions.ensureUnusedCapacity(gpa, 1); + try func.mir_instructions.ensureUnusedCapacity(gpa, 1); - const result_index: Mir.Inst.Index = @intCast(self.mir_instructions.len); - self.mir_instructions.appendAssumeCapacity(inst); + const result_index: Mir.Inst.Index = @intCast(func.mir_instructions.len); + func.mir_instructions.appendAssumeCapacity(inst); return result_index; } -fn addNop(self: *Self) error{OutOfMemory}!Mir.Inst.Index { - return self.addInst(.{ +fn addNop(func: *Func) error{OutOfMemory}!Mir.Inst.Index { + return func.addInst(.{ .tag = .nop, .ops = .none, .data = undefined, }); } -fn addPseudoNone(self: *Self, ops: Mir.Inst.Ops) !void { - _ = try self.addInst(.{ +fn addPseudoNone(func: *Func, ops: Mir.Inst.Ops) !void { + _ = try func.addInst(.{ .tag = .pseudo, .ops = ops, .data = undefined, }); } -fn addPseudo(self: *Self, ops: Mir.Inst.Ops) !Mir.Inst.Index { - return self.addInst(.{ +fn addPseudo(func: *Func, ops: Mir.Inst.Ops) !Mir.Inst.Index { + return func.addInst(.{ .tag = .pseudo, .ops = ops, .data = undefined, }); } -pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 { +pub fn addExtra(func: *Func, extra: anytype) Allocator.Error!u32 { const fields = std.meta.fields(@TypeOf(extra)); - try self.mir_extra.ensureUnusedCapacity(self.gpa, fields.len); - return self.addExtraAssumeCapacity(extra); + try func.mir_extra.ensureUnusedCapacity(func.gpa, fields.len); + return func.addExtraAssumeCapacity(extra); } -pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { +pub fn addExtraAssumeCapacity(func: *Func, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result: u32 = @intCast(self.mir_extra.items.len); + const result: u32 = @intCast(func.mir_extra.items.len); inline for (fields) |field| { - self.mir_extra.appendAssumeCapacity(switch (field.type) { + func.mir_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), i32 => @bitCast(@field(extra, field.name)), else => @compileError("bad field type"), @@ -910,13 +904,13 @@ const required_features = [_]Target.riscv.Feature{ .m, }; -fn gen(self: *Self) !void { - const mod = self.bin_file.comp.module.?; - const fn_info = mod.typeToFunc(self.fn_type).?; +fn gen(func: *Func) !void { + const mod = func.bin_file.comp.module.?; + const fn_info = mod.typeToFunc(func.fn_type).?; inline for (required_features) |feature| { - if (!self.hasFeature(feature)) { - return self.fail( + if (!func.hasFeature(feature)) { + return func.fail( "target missing required feature {s}", .{@tagName(feature)}, ); @@ -924,52 +918,52 @@ fn gen(self: *Self) !void { } if (fn_info.cc != .Naked) { - try self.addPseudoNone(.pseudo_dbg_prologue_end); + try func.addPseudoNone(.pseudo_dbg_prologue_end); - const backpatch_stack_alloc = try self.addPseudo(.pseudo_dead); - const backpatch_ra_spill = try self.addPseudo(.pseudo_dead); - const backpatch_fp_spill = try self.addPseudo(.pseudo_dead); - const backpatch_fp_add = try self.addPseudo(.pseudo_dead); - const backpatch_spill_callee_preserved_regs = try self.addPseudo(.pseudo_dead); + const backpatch_stack_alloc = try func.addPseudo(.pseudo_dead); + const backpatch_ra_spill = try func.addPseudo(.pseudo_dead); + const backpatch_fp_spill = try func.addPseudo(.pseudo_dead); + const backpatch_fp_add = try func.addPseudo(.pseudo_dead); + const backpatch_spill_callee_preserved_regs = try func.addPseudo(.pseudo_dead); - switch (self.ret_mcv.long) { + switch (func.ret_mcv.long) { .none, .unreach => {}, .indirect => { // The address where to store the return value for the caller is in a // register which the callee is free to clobber. Therefore, we purposely // spill it to stack immediately. - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(Type.usize, mod)); - try self.genSetMem( + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(Type.usize, mod)); + try func.genSetMem( .{ .frame = frame_index }, 0, Type.usize, - self.ret_mcv.long.address().offset(-self.ret_mcv.short.indirect.off), + func.ret_mcv.long.address().offset(-func.ret_mcv.short.indirect.off), ); - self.ret_mcv.long = .{ .load_frame = .{ .index = frame_index } }; - tracking_log.debug("spill {} to {}", .{ self.ret_mcv.long, frame_index }); + func.ret_mcv.long = .{ .load_frame = .{ .index = frame_index } }; + tracking_log.debug("spill {} to {}", .{ func.ret_mcv.long, frame_index }); }, else => unreachable, } - try self.genBody(self.air.getMainBody()); + try func.genBody(func.air.getMainBody()); - for (self.exitlude_jump_relocs.items) |jmp_reloc| { - self.mir_instructions.items(.data)[jmp_reloc].inst = - @intCast(self.mir_instructions.len); + for (func.exitlude_jump_relocs.items) |jmp_reloc| { + func.mir_instructions.items(.data)[jmp_reloc].inst = + @intCast(func.mir_instructions.len); } - try self.addPseudoNone(.pseudo_dbg_epilogue_begin); + try func.addPseudoNone(.pseudo_dbg_epilogue_begin); - const backpatch_restore_callee_preserved_regs = try self.addPseudo(.pseudo_dead); - const backpatch_ra_restore = try self.addPseudo(.pseudo_dead); - const backpatch_fp_restore = try self.addPseudo(.pseudo_dead); - const backpatch_stack_alloc_restore = try self.addPseudo(.pseudo_dead); - try self.addPseudoNone(.pseudo_ret); + const backpatch_restore_callee_preserved_regs = try func.addPseudo(.pseudo_dead); + const backpatch_ra_restore = try func.addPseudo(.pseudo_dead); + const backpatch_fp_restore = try func.addPseudo(.pseudo_dead); + const backpatch_stack_alloc_restore = try func.addPseudo(.pseudo_dead); + try func.addPseudoNone(.pseudo_ret); - const frame_layout = try self.computeFrameLayout(); + const frame_layout = try func.computeFrameLayout(); const need_save_reg = frame_layout.save_reg_list.count() > 0; - self.mir_instructions.set(backpatch_stack_alloc, .{ + func.mir_instructions.set(backpatch_stack_alloc, .{ .tag = .addi, .ops = .rri, .data = .{ .i_type = .{ @@ -978,7 +972,7 @@ fn gen(self: *Self) !void { .imm12 = Immediate.s(-@as(i32, @intCast(frame_layout.stack_adjust))), } }, }); - self.mir_instructions.set(backpatch_ra_spill, .{ + func.mir_instructions.set(backpatch_ra_spill, .{ .tag = .pseudo, .ops = .pseudo_store_rm, .data = .{ .rm = .{ @@ -989,7 +983,7 @@ fn gen(self: *Self) !void { }, } }, }); - self.mir_instructions.set(backpatch_ra_restore, .{ + func.mir_instructions.set(backpatch_ra_restore, .{ .tag = .pseudo, .ops = .pseudo_load_rm, .data = .{ .rm = .{ @@ -1000,7 +994,7 @@ fn gen(self: *Self) !void { }, } }, }); - self.mir_instructions.set(backpatch_fp_spill, .{ + func.mir_instructions.set(backpatch_fp_spill, .{ .tag = .pseudo, .ops = .pseudo_store_rm, .data = .{ .rm = .{ @@ -1011,7 +1005,7 @@ fn gen(self: *Self) !void { }, } }, }); - self.mir_instructions.set(backpatch_fp_restore, .{ + func.mir_instructions.set(backpatch_fp_restore, .{ .tag = .pseudo, .ops = .pseudo_load_rm, .data = .{ .rm = .{ @@ -1022,7 +1016,7 @@ fn gen(self: *Self) !void { }, } }, }); - self.mir_instructions.set(backpatch_fp_add, .{ + func.mir_instructions.set(backpatch_fp_add, .{ .tag = .addi, .ops = .rri, .data = .{ .i_type = .{ @@ -1031,7 +1025,7 @@ fn gen(self: *Self) !void { .imm12 = Immediate.s(@intCast(frame_layout.stack_adjust)), } }, }); - self.mir_instructions.set(backpatch_stack_alloc_restore, .{ + func.mir_instructions.set(backpatch_stack_alloc_restore, .{ .tag = .addi, .ops = .rri, .data = .{ .i_type = .{ @@ -1042,72 +1036,72 @@ fn gen(self: *Self) !void { }); if (need_save_reg) { - self.mir_instructions.set(backpatch_spill_callee_preserved_regs, .{ + func.mir_instructions.set(backpatch_spill_callee_preserved_regs, .{ .tag = .pseudo, .ops = .pseudo_spill_regs, .data = .{ .reg_list = frame_layout.save_reg_list }, }); - self.mir_instructions.set(backpatch_restore_callee_preserved_regs, .{ + func.mir_instructions.set(backpatch_restore_callee_preserved_regs, .{ .tag = .pseudo, .ops = .pseudo_restore_regs, .data = .{ .reg_list = frame_layout.save_reg_list }, }); } } else { - try self.addPseudoNone(.pseudo_dbg_prologue_end); - try self.genBody(self.air.getMainBody()); - try self.addPseudoNone(.pseudo_dbg_epilogue_begin); + try func.addPseudoNone(.pseudo_dbg_prologue_end); + try func.genBody(func.air.getMainBody()); + try func.addPseudoNone(.pseudo_dbg_epilogue_begin); } // Drop them off at the rbrace. - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_dbg_line_column, .data = .{ .pseudo_dbg_line_column = .{ - .line = self.end_di_line, - .column = self.end_di_column, + .line = func.end_di_line, + .column = func.end_di_column, } }, }); } -fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { - const zcu = self.bin_file.comp.module.?; +fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { + const zcu = func.bin_file.comp.module.?; const ip = &zcu.intern_pool; - const air_tags = self.air.instructions.items(.tag); + const air_tags = func.air.instructions.items(.tag); for (body) |inst| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; + if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip)) continue; - const old_air_bookkeeping = self.air_bookkeeping; - try self.inst_tracking.ensureUnusedCapacity(self.gpa, 1); + const old_air_bookkeeping = func.air_bookkeeping; + try func.inst_tracking.ensureUnusedCapacity(func.gpa, 1); switch (air_tags[@intFromEnum(inst)]) { // zig fmt: off - .ptr_add => try self.airPtrArithmetic(inst, .ptr_add), - .ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub), + .ptr_add => try func.airPtrArithmetic(inst, .ptr_add), + .ptr_sub => try func.airPtrArithmetic(inst, .ptr_sub), - .add => try self.airBinOp(inst, .add), - .sub => try self.airBinOp(inst, .sub), + .add => try func.airBinOp(inst, .add), + .sub => try func.airBinOp(inst, .sub), .add_safe, .sub_safe, .mul_safe, - => return self.fail("TODO implement safety_checked_instructions", .{}), + => return func.fail("TODO implement safety_checked_instructions", .{}), - .add_wrap => try self.airAddWrap(inst), - .add_sat => try self.airAddSat(inst), - .sub_wrap => try self.airSubWrap(inst), - .sub_sat => try self.airSubSat(inst), - .mul => try self.airMul(inst), - .mul_wrap => try self.airMulWrap(inst), - .mul_sat => try self.airMulSat(inst), - .rem => try self.airRem(inst), - .mod => try self.airMod(inst), - .shl, .shl_exact => try self.airShl(inst), - .shl_sat => try self.airShlSat(inst), - .min => try self.airMinMax(inst, .min), - .max => try self.airMinMax(inst, .max), - .slice => try self.airSlice(inst), + .add_wrap => try func.airAddWrap(inst), + .add_sat => try func.airAddSat(inst), + .sub_wrap => try func.airSubWrap(inst), + .sub_sat => try func.airSubSat(inst), + .mul => try func.airMul(inst), + .mul_wrap => try func.airMulWrap(inst), + .mul_sat => try func.airMulSat(inst), + .rem => try func.airRem(inst), + .mod => try func.airMod(inst), + .shl, .shl_exact => try func.airShl(inst), + .shl_sat => try func.airShlSat(inst), + .min => try func.airMinMax(inst, .min), + .max => try func.airMinMax(inst, .max), + .slice => try func.airSlice(inst), .sqrt, .sin, @@ -1123,157 +1117,157 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .round, .trunc_float, .neg, - => try self.airUnaryMath(inst), + => try func.airUnaryMath(inst), - .add_with_overflow => try self.airAddWithOverflow(inst), - .sub_with_overflow => try self.airSubWithOverflow(inst), - .mul_with_overflow => try self.airMulWithOverflow(inst), - .shl_with_overflow => try self.airShlWithOverflow(inst), + .add_with_overflow => try func.airAddWithOverflow(inst), + .sub_with_overflow => try func.airSubWithOverflow(inst), + .mul_with_overflow => try func.airMulWithOverflow(inst), + .shl_with_overflow => try func.airShlWithOverflow(inst), - .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), + .div_float, .div_trunc, .div_floor, .div_exact => try func.airDiv(inst), - .cmp_lt => try self.airCmp(inst), - .cmp_lte => try self.airCmp(inst), - .cmp_eq => try self.airCmp(inst), - .cmp_gte => try self.airCmp(inst), - .cmp_gt => try self.airCmp(inst), - .cmp_neq => try self.airCmp(inst), + .cmp_lt => try func.airCmp(inst), + .cmp_lte => try func.airCmp(inst), + .cmp_eq => try func.airCmp(inst), + .cmp_gte => try func.airCmp(inst), + .cmp_gt => try func.airCmp(inst), + .cmp_neq => try func.airCmp(inst), - .cmp_vector => try self.airCmpVector(inst), - .cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst), + .cmp_vector => try func.airCmpVector(inst), + .cmp_lt_errors_len => try func.airCmpLtErrorsLen(inst), - .bool_and => try self.airBoolOp(inst), - .bool_or => try self.airBoolOp(inst), - .bit_and => try self.airBitAnd(inst), - .bit_or => try self.airBitOr(inst), - .xor => try self.airXor(inst), - .shr, .shr_exact => try self.airShr(inst), + .bool_and => try func.airBoolOp(inst), + .bool_or => try func.airBoolOp(inst), + .bit_and => try func.airBitAnd(inst), + .bit_or => try func.airBitOr(inst), + .xor => try func.airXor(inst), + .shr, .shr_exact => try func.airShr(inst), - .alloc => try self.airAlloc(inst), - .ret_ptr => try self.airRetPtr(inst), - .arg => try self.airArg(inst), - .assembly => try self.airAsm(inst), - .bitcast => try self.airBitCast(inst), - .block => try self.airBlock(inst), - .br => try self.airBr(inst), - .trap => try self.airTrap(), - .breakpoint => try self.airBreakpoint(), - .ret_addr => try self.airRetAddr(inst), - .frame_addr => try self.airFrameAddress(inst), - .fence => try self.airFence(), - .cond_br => try self.airCondBr(inst), - .dbg_stmt => try self.airDbgStmt(inst), - .fptrunc => try self.airFptrunc(inst), - .fpext => try self.airFpext(inst), - .intcast => try self.airIntCast(inst), - .trunc => try self.airTrunc(inst), - .int_from_bool => try self.airIntFromBool(inst), - .is_non_null => try self.airIsNonNull(inst), - .is_non_null_ptr => try self.airIsNonNullPtr(inst), - .is_null => try self.airIsNull(inst), - .is_null_ptr => try self.airIsNullPtr(inst), - .is_non_err => try self.airIsNonErr(inst), - .is_non_err_ptr => try self.airIsNonErrPtr(inst), - .is_err => try self.airIsErr(inst), - .is_err_ptr => try self.airIsErrPtr(inst), - .load => try self.airLoad(inst), - .loop => try self.airLoop(inst), - .not => try self.airNot(inst), - .int_from_ptr => try self.airIntFromPtr(inst), - .ret => try self.airRet(inst, false), - .ret_safe => try self.airRet(inst, true), - .ret_load => try self.airRetLoad(inst), - .store => try self.airStore(inst, false), - .store_safe => try self.airStore(inst, true), - .struct_field_ptr=> try self.airStructFieldPtr(inst), - .struct_field_val=> try self.airStructFieldVal(inst), - .array_to_slice => try self.airArrayToSlice(inst), - .float_from_int => try self.airFloatFromInt(inst), - .int_from_float => try self.airIntFromFloat(inst), - .cmpxchg_strong => try self.airCmpxchg(inst), - .cmpxchg_weak => try self.airCmpxchg(inst), - .atomic_rmw => try self.airAtomicRmw(inst), - .atomic_load => try self.airAtomicLoad(inst), - .memcpy => try self.airMemcpy(inst), - .memset => try self.airMemset(inst, false), - .memset_safe => try self.airMemset(inst, true), - .set_union_tag => try self.airSetUnionTag(inst), - .get_union_tag => try self.airGetUnionTag(inst), - .clz => try self.airClz(inst), - .ctz => try self.airCtz(inst), - .popcount => try self.airPopcount(inst), - .abs => try self.airAbs(inst), - .byte_swap => try self.airByteSwap(inst), - .bit_reverse => try self.airBitReverse(inst), - .tag_name => try self.airTagName(inst), - .error_name => try self.airErrorName(inst), - .splat => try self.airSplat(inst), - .select => try self.airSelect(inst), - .shuffle => try self.airShuffle(inst), - .reduce => try self.airReduce(inst), - .aggregate_init => try self.airAggregateInit(inst), - .union_init => try self.airUnionInit(inst), - .prefetch => try self.airPrefetch(inst), - .mul_add => try self.airMulAdd(inst), - .addrspace_cast => return self.fail("TODO: addrspace_cast", .{}), + .alloc => try func.airAlloc(inst), + .ret_ptr => try func.airRetPtr(inst), + .arg => try func.airArg(inst), + .assembly => try func.airAsm(inst), + .bitcast => try func.airBitCast(inst), + .block => try func.airBlock(inst), + .br => try func.airBr(inst), + .trap => try func.airTrap(), + .breakpoint => try func.airBreakpoint(), + .ret_addr => try func.airRetAddr(inst), + .frame_addr => try func.airFrameAddress(inst), + .fence => try func.airFence(), + .cond_br => try func.airCondBr(inst), + .dbg_stmt => try func.airDbgStmt(inst), + .fptrunc => try func.airFptrunc(inst), + .fpext => try func.airFpext(inst), + .intcast => try func.airIntCast(inst), + .trunc => try func.airTrunc(inst), + .int_from_bool => try func.airIntFromBool(inst), + .is_non_null => try func.airIsNonNull(inst), + .is_non_null_ptr => try func.airIsNonNullPtr(inst), + .is_null => try func.airIsNull(inst), + .is_null_ptr => try func.airIsNullPtr(inst), + .is_non_err => try func.airIsNonErr(inst), + .is_non_err_ptr => try func.airIsNonErrPtr(inst), + .is_err => try func.airIsErr(inst), + .is_err_ptr => try func.airIsErrPtr(inst), + .load => try func.airLoad(inst), + .loop => try func.airLoop(inst), + .not => try func.airNot(inst), + .int_from_ptr => try func.airIntFromPtr(inst), + .ret => try func.airRet(inst, false), + .ret_safe => try func.airRet(inst, true), + .ret_load => try func.airRetLoad(inst), + .store => try func.airStore(inst, false), + .store_safe => try func.airStore(inst, true), + .struct_field_ptr=> try func.airStructFieldPtr(inst), + .struct_field_val=> try func.airStructFieldVal(inst), + .array_to_slice => try func.airArrayToSlice(inst), + .float_from_int => try func.airFloatFromInt(inst), + .int_from_float => try func.airIntFromFloat(inst), + .cmpxchg_strong => try func.airCmpxchg(inst), + .cmpxchg_weak => try func.airCmpxchg(inst), + .atomic_rmw => try func.airAtomicRmw(inst), + .atomic_load => try func.airAtomicLoad(inst), + .memcpy => try func.airMemcpy(inst), + .memset => try func.airMemset(inst, false), + .memset_safe => try func.airMemset(inst, true), + .set_union_tag => try func.airSetUnionTag(inst), + .get_union_tag => try func.airGetUnionTag(inst), + .clz => try func.airClz(inst), + .ctz => try func.airCtz(inst), + .popcount => try func.airPopcount(inst), + .abs => try func.airAbs(inst), + .byte_swap => try func.airByteSwap(inst), + .bit_reverse => try func.airBitReverse(inst), + .tag_name => try func.airTagName(inst), + .error_name => try func.airErrorName(inst), + .splat => try func.airSplat(inst), + .select => try func.airSelect(inst), + .shuffle => try func.airShuffle(inst), + .reduce => try func.airReduce(inst), + .aggregate_init => try func.airAggregateInit(inst), + .union_init => try func.airUnionInit(inst), + .prefetch => try func.airPrefetch(inst), + .mul_add => try func.airMulAdd(inst), + .addrspace_cast => return func.fail("TODO: addrspace_cast", .{}), - .@"try" => try self.airTry(inst), - .try_ptr => return self.fail("TODO: try_ptr", .{}), + .@"try" => try func.airTry(inst), + .try_ptr => return func.fail("TODO: try_ptr", .{}), .dbg_var_ptr, .dbg_var_val, - => try self.airDbgVar(inst), + => try func.airDbgVar(inst), - .dbg_inline_block => try self.airDbgInlineBlock(inst), + .dbg_inline_block => try func.airDbgInlineBlock(inst), - .call => try self.airCall(inst, .auto), - .call_always_tail => try self.airCall(inst, .always_tail), - .call_never_tail => try self.airCall(inst, .never_tail), - .call_never_inline => try self.airCall(inst, .never_inline), + .call => try func.airCall(inst, .auto), + .call_always_tail => try func.airCall(inst, .always_tail), + .call_never_tail => try func.airCall(inst, .never_tail), + .call_never_inline => try func.airCall(inst, .never_inline), - .atomic_store_unordered => try self.airAtomicStore(inst, .unordered), - .atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic), - .atomic_store_release => try self.airAtomicStore(inst, .release), - .atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst), + .atomic_store_unordered => try func.airAtomicStore(inst, .unordered), + .atomic_store_monotonic => try func.airAtomicStore(inst, .monotonic), + .atomic_store_release => try func.airAtomicStore(inst, .release), + .atomic_store_seq_cst => try func.airAtomicStore(inst, .seq_cst), - .struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0), - .struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1), - .struct_field_ptr_index_2 => try self.airStructFieldPtrIndex(inst, 2), - .struct_field_ptr_index_3 => try self.airStructFieldPtrIndex(inst, 3), + .struct_field_ptr_index_0 => try func.airStructFieldPtrIndex(inst, 0), + .struct_field_ptr_index_1 => try func.airStructFieldPtrIndex(inst, 1), + .struct_field_ptr_index_2 => try func.airStructFieldPtrIndex(inst, 2), + .struct_field_ptr_index_3 => try func.airStructFieldPtrIndex(inst, 3), - .field_parent_ptr => try self.airFieldParentPtr(inst), + .field_parent_ptr => try func.airFieldParentPtr(inst), - .switch_br => try self.airSwitchBr(inst), - .slice_ptr => try self.airSlicePtr(inst), - .slice_len => try self.airSliceLen(inst), + .switch_br => try func.airSwitchBr(inst), + .slice_ptr => try func.airSlicePtr(inst), + .slice_len => try func.airSliceLen(inst), - .ptr_slice_len_ptr => try self.airPtrSliceLenPtr(inst), - .ptr_slice_ptr_ptr => try self.airPtrSlicePtrPtr(inst), + .ptr_slice_len_ptr => try func.airPtrSliceLenPtr(inst), + .ptr_slice_ptr_ptr => try func.airPtrSlicePtrPtr(inst), - .array_elem_val => try self.airArrayElemVal(inst), - .slice_elem_val => try self.airSliceElemVal(inst), - .slice_elem_ptr => try self.airSliceElemPtr(inst), - .ptr_elem_val => try self.airPtrElemVal(inst), - .ptr_elem_ptr => try self.airPtrElemPtr(inst), + .array_elem_val => try func.airArrayElemVal(inst), + .slice_elem_val => try func.airSliceElemVal(inst), + .slice_elem_ptr => try func.airSliceElemPtr(inst), + .ptr_elem_val => try func.airPtrElemVal(inst), + .ptr_elem_ptr => try func.airPtrElemPtr(inst), .inferred_alloc, .inferred_alloc_comptime => unreachable, - .unreach => self.finishAirBookkeeping(), + .unreach => func.finishAirBookkeeping(), - .optional_payload => try self.airOptionalPayload(inst), - .optional_payload_ptr => try self.airOptionalPayloadPtr(inst), - .optional_payload_ptr_set => try self.airOptionalPayloadPtrSet(inst), - .unwrap_errunion_err => try self.airUnwrapErrErr(inst), - .unwrap_errunion_payload => try self.airUnwrapErrPayload(inst), - .unwrap_errunion_err_ptr => try self.airUnwrapErrErrPtr(inst), - .unwrap_errunion_payload_ptr=> try self.airUnwrapErrPayloadPtr(inst), - .errunion_payload_ptr_set => try self.airErrUnionPayloadPtrSet(inst), - .err_return_trace => try self.airErrReturnTrace(inst), - .set_err_return_trace => try self.airSetErrReturnTrace(inst), - .save_err_return_trace_index=> try self.airSaveErrReturnTraceIndex(inst), + .optional_payload => try func.airOptionalPayload(inst), + .optional_payload_ptr => try func.airOptionalPayloadPtr(inst), + .optional_payload_ptr_set => try func.airOptionalPayloadPtrSet(inst), + .unwrap_errunion_err => try func.airUnwrapErrErr(inst), + .unwrap_errunion_payload => try func.airUnwrapErrPayload(inst), + .unwrap_errunion_err_ptr => try func.airUnwrapErrErrPtr(inst), + .unwrap_errunion_payload_ptr=> try func.airUnwrapErrPayloadPtr(inst), + .errunion_payload_ptr_set => try func.airErrUnionPayloadPtrSet(inst), + .err_return_trace => try func.airErrReturnTrace(inst), + .set_err_return_trace => try func.airSetErrReturnTrace(inst), + .save_err_return_trace_index=> try func.airSaveErrReturnTraceIndex(inst), - .wrap_optional => try self.airWrapOptional(inst), - .wrap_errunion_payload => try self.airWrapErrUnionPayload(inst), - .wrap_errunion_err => try self.airWrapErrUnionErr(inst), + .wrap_optional => try func.airWrapOptional(inst), + .wrap_errunion_payload => try func.airWrapErrUnionPayload(inst), + .wrap_errunion_err => try func.airWrapErrUnionErr(inst), .add_optimized, .sub_optimized, @@ -1294,16 +1288,16 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .cmp_vector_optimized, .reduce_optimized, .int_from_float_optimized, - => return self.fail("TODO implement optimized float mode", .{}), + => return func.fail("TODO implement optimized float mode", .{}), - .is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}), - .error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}), - .vector_store_elem => return self.fail("TODO implement vector_store_elem", .{}), + .is_named_enum_value => return func.fail("TODO implement is_named_enum_value", .{}), + .error_set_has_value => return func.fail("TODO implement error_set_has_value", .{}), + .vector_store_elem => return func.fail("TODO implement vector_store_elem", .{}), - .c_va_arg => return self.fail("TODO implement c_va_arg", .{}), - .c_va_copy => return self.fail("TODO implement c_va_copy", .{}), - .c_va_end => return self.fail("TODO implement c_va_end", .{}), - .c_va_start => return self.fail("TODO implement c_va_start", .{}), + .c_va_arg => return func.fail("TODO implement c_va_arg", .{}), + .c_va_copy => return func.fail("TODO implement c_va_copy", .{}), + .c_va_end => return func.fail("TODO implement c_va_end", .{}), + .c_va_start => return func.fail("TODO implement c_va_start", .{}), .wasm_memory_size => unreachable, .wasm_memory_grow => unreachable, @@ -1314,19 +1308,19 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { // zig fmt: on } - assert(!self.register_manager.lockedRegsExist()); + assert(!func.register_manager.lockedRegsExist()); if (std.debug.runtime_safety) { - if (self.air_bookkeeping < old_air_bookkeeping + 1) { + if (func.air_bookkeeping < old_air_bookkeeping + 1) { std.debug.panic("in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. Look for a missing call to finishAir.", .{ inst, air_tags[@intFromEnum(inst)] }); } { // check consistency of tracked registers - var it = self.register_manager.free_registers.iterator(.{ .kind = .unset }); + var it = func.register_manager.free_registers.iterator(.{ .kind = .unset }); while (it.next()) |index| { - const tracked_inst = self.register_manager.registers[index]; + const tracked_inst = func.register_manager.registers[index]; tracking_log.debug("tracked inst: {}", .{tracked_inst}); - const tracking = self.getResolvedInstValue(tracked_inst); + const tracking = func.getResolvedInstValue(tracked_inst); for (tracking.getRegs()) |reg| { if (RegisterManager.indexOfRegIntoTracked(reg).? == index) break; } else return std.debug.panic( @@ -1338,72 +1332,72 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { } } -fn getValue(self: *Self, value: MCValue, inst: ?Air.Inst.Index) !void { - for (value.getRegs()) |reg| try self.register_manager.getReg(reg, inst); +fn getValue(func: *Func, value: MCValue, inst: ?Air.Inst.Index) !void { + for (value.getRegs()) |reg| try func.register_manager.getReg(reg, inst); } -fn getValueIfFree(self: *Self, value: MCValue, inst: ?Air.Inst.Index) void { - for (value.getRegs()) |reg| if (self.register_manager.isRegFree(reg)) - self.register_manager.getRegAssumeFree(reg, inst); +fn getValueIfFree(func: *Func, value: MCValue, inst: ?Air.Inst.Index) void { + for (value.getRegs()) |reg| if (func.register_manager.isRegFree(reg)) + func.register_manager.getRegAssumeFree(reg, inst); } -fn freeValue(self: *Self, value: MCValue) !void { +fn freeValue(func: *Func, value: MCValue) !void { switch (value) { - .register => |reg| self.register_manager.freeReg(reg), - .register_pair => |regs| for (regs) |reg| self.register_manager.freeReg(reg), - .register_offset => |reg_off| self.register_manager.freeReg(reg_off.reg), + .register => |reg| func.register_manager.freeReg(reg), + .register_pair => |regs| for (regs) |reg| func.register_manager.freeReg(reg), + .register_offset => |reg_off| func.register_manager.freeReg(reg_off.reg), else => {}, // TODO process stack allocation death } } -fn feed(self: *Self, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) !void { +fn feed(func: *Func, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) !void { if (bt.feed()) if (operand.toIndex()) |inst| { log.debug("feed inst: %{}", .{inst}); - try self.processDeath(inst); + try func.processDeath(inst); }; } /// Asserts there is already capacity to insert into top branch inst_table. -fn processDeath(self: *Self, inst: Air.Inst.Index) !void { - try self.inst_tracking.getPtr(inst).?.die(self, inst); +fn processDeath(func: *Func, inst: Air.Inst.Index) !void { + try func.inst_tracking.getPtr(inst).?.die(func, inst); } /// Called when there are no operands, and the instruction is always unreferenced. -fn finishAirBookkeeping(self: *Self) void { +fn finishAirBookkeeping(func: *Func) void { if (std.debug.runtime_safety) { - self.air_bookkeeping += 1; + func.air_bookkeeping += 1; } } -fn finishAirResult(self: *Self, inst: Air.Inst.Index, result: MCValue) void { - if (self.liveness.isUnused(inst)) switch (result) { +fn finishAirResult(func: *Func, inst: Air.Inst.Index, result: MCValue) void { + if (func.liveness.isUnused(inst)) switch (result) { .none, .dead, .unreach => {}, else => unreachable, // Why didn't the result die? } else { tracking_log.debug("%{d} => {} (birth)", .{ inst, result }); - self.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(result)); + func.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(result)); // In some cases, an operand may be reused as the result. // If that operand died and was a register, it was freed by // processDeath, so we have to "re-allocate" the register. - self.getValueIfFree(result, inst); + func.getValueIfFree(result, inst); } - self.finishAirBookkeeping(); + func.finishAirBookkeeping(); } fn finishAir( - self: *Self, + func: *Func, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref, ) !void { - var tomb_bits = self.liveness.getTombBits(inst); + var tomb_bits = func.liveness.getTombBits(inst); for (operands) |op| { const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; - try self.processDeath(op.toIndexAllowNone() orelse continue); + try func.processDeath(op.toIndexAllowNone() orelse continue); } - self.finishAirResult(inst, result); + func.finishAirResult(inst, result); } const FrameLayout = struct { @@ -1412,7 +1406,7 @@ const FrameLayout = struct { }; fn setFrameLoc( - self: *Self, + func: *Func, frame_index: FrameIndex, base: Register, offset: *i32, @@ -1420,24 +1414,24 @@ fn setFrameLoc( ) void { const frame_i = @intFromEnum(frame_index); if (aligned) { - const alignment: InternPool.Alignment = self.frame_allocs.items(.abi_align)[frame_i]; + const alignment: InternPool.Alignment = func.frame_allocs.items(.abi_align)[frame_i]; offset.* = if (math.sign(offset.*) < 0) -1 * @as(i32, @intCast(alignment.backward(@intCast(@abs(offset.*))))) else @intCast(alignment.forward(@intCast(@abs(offset.*)))); } - self.frame_locs.set(frame_i, .{ .base = base, .disp = offset.* }); - offset.* += self.frame_allocs.items(.abi_size)[frame_i]; + func.frame_locs.set(frame_i, .{ .base = base, .disp = offset.* }); + offset.* += func.frame_allocs.items(.abi_size)[frame_i]; } -fn computeFrameLayout(self: *Self) !FrameLayout { - const frame_allocs_len = self.frame_allocs.len; - try self.frame_locs.resize(self.gpa, frame_allocs_len); - const stack_frame_order = try self.gpa.alloc(FrameIndex, frame_allocs_len - FrameIndex.named_count); - defer self.gpa.free(stack_frame_order); +fn computeFrameLayout(func: *Func) !FrameLayout { + const frame_allocs_len = func.frame_allocs.len; + try func.frame_locs.resize(func.gpa, frame_allocs_len); + const stack_frame_order = try func.gpa.alloc(FrameIndex, frame_allocs_len - FrameIndex.named_count); + defer func.gpa.free(stack_frame_order); - const frame_size = self.frame_allocs.items(.abi_size); - const frame_align = self.frame_allocs.items(.abi_align); + const frame_size = func.frame_allocs.items(.abi_size); + const frame_align = func.frame_allocs.items(.abi_align); for (stack_frame_order, FrameIndex.named_count..) |*frame_order, frame_index| frame_order.* = @enumFromInt(frame_index); @@ -1455,7 +1449,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout { var save_reg_list = Mir.RegisterList{}; for (abi.Registers.all_preserved) |reg| { - if (self.register_manager.isRegAllocated(reg)) { + if (func.register_manager.isRegAllocated(reg)) { save_reg_list.push(&abi.Registers.all_preserved, reg); } } @@ -1489,11 +1483,11 @@ fn computeFrameLayout(self: *Self) !FrameLayout { // store the ra at total_size - 8, so it's the very first thing in the stack // relative to the fp - self.frame_locs.set( + func.frame_locs.set( @intFromEnum(FrameIndex.ret_addr), .{ .base = .sp, .disp = acc_frame_size - 8 }, ); - self.frame_locs.set( + func.frame_locs.set( @intFromEnum(FrameIndex.base_ptr), .{ .base = .sp, .disp = acc_frame_size - 16 }, ); @@ -1502,11 +1496,11 @@ fn computeFrameLayout(self: *Self) !FrameLayout { // not need to know the size of the first allocation. Stack offsets point at the "bottom" // of variables. var s0_offset: i32 = -acc_frame_size; - self.setFrameLoc(.stack_frame, .s0, &s0_offset, true); - for (stack_frame_order) |frame_index| self.setFrameLoc(frame_index, .s0, &s0_offset, true); - self.setFrameLoc(.args_frame, .s0, &s0_offset, true); - self.setFrameLoc(.call_frame, .s0, &s0_offset, true); - self.setFrameLoc(.spill_frame, .s0, &s0_offset, true); + func.setFrameLoc(.stack_frame, .s0, &s0_offset, true); + for (stack_frame_order) |frame_index| func.setFrameLoc(frame_index, .s0, &s0_offset, true); + func.setFrameLoc(.args_frame, .s0, &s0_offset, true); + func.setFrameLoc(.call_frame, .s0, &s0_offset, true); + func.setFrameLoc(.spill_frame, .s0, &s0_offset, true); return .{ .stack_adjust = @intCast(acc_frame_size), @@ -1514,21 +1508,21 @@ fn computeFrameLayout(self: *Self) !FrameLayout { }; } -fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { - const table = &self.branch_stack.items[self.branch_stack.items.len - 1].inst_table; - try table.ensureUnusedCapacity(self.gpa, additional_count); +fn ensureProcessDeathCapacity(func: *Func, additional_count: usize) !void { + const table = &func.branch_stack.items[func.branch_stack.items.len - 1].inst_table; + try table.ensureUnusedCapacity(func.gpa, additional_count); } -fn memSize(self: *Self, ty: Type) Memory.Size { - const mod = self.bin_file.comp.module.?; +fn memSize(func: *Func, ty: Type) Memory.Size { + const mod = func.bin_file.comp.module.?; return switch (ty.zigTypeTag(mod)) { - .Float => Memory.Size.fromBitSize(ty.floatBits(self.target.*)), + .Float => Memory.Size.fromBitSize(ty.floatBits(func.target.*)), else => Memory.Size.fromByteSize(ty.abiSize(mod)), }; } -fn splitType(self: *Self, ty: Type) ![2]Type { - const zcu = self.bin_file.comp.module.?; +fn splitType(func: *Func, ty: Type) ![2]Type { + const zcu = func.bin_file.comp.module.?; const classes = mem.sliceTo(&abi.classifySystem(ty, zcu), .none); var parts: [2]Type = undefined; if (classes.len == 2) for (&parts, classes, 0..) |*part, class, part_i| { @@ -1545,63 +1539,63 @@ fn splitType(self: *Self, ty: Type) ![2]Type { }, else => unreachable, }, - else => return self.fail("TODO: splitType class {}", .{class}), + else => return func.fail("TODO: splitType class {}", .{class}), }; } else if (parts[0].abiSize(zcu) + parts[1].abiSize(zcu) == ty.abiSize(zcu)) return parts; - return self.fail("TODO implement splitType for {}", .{ty.fmt(zcu)}); + return func.fail("TODO implement splitType for {}", .{ty.fmt(zcu)}); } -fn symbolIndex(self: *Self) !u32 { - const zcu = self.bin_file.comp.module.?; - const decl_index = zcu.funcOwnerDeclIndex(self.func_index); - return switch (self.bin_file.tag) { +fn symbolIndex(func: *Func) !u32 { + const zcu = func.bin_file.comp.module.?; + const decl_index = zcu.funcOwnerDeclIndex(func.func_index); + return switch (func.bin_file.tag) { .elf => blk: { - const elf_file = self.bin_file.cast(link.File.Elf).?; + const elf_file = func.bin_file.cast(link.File.Elf).?; const atom_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, decl_index); break :blk atom_index; }, - else => return self.fail("TODO symbolIndex {s}", .{@tagName(self.bin_file.tag)}), + else => return func.fail("TODO symbolIndex {s}", .{@tagName(func.bin_file.tag)}), }; } -fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { - const frame_allocs_slice = self.frame_allocs.slice(); +fn allocFrameIndex(func: *Func, alloc: FrameAlloc) !FrameIndex { + const frame_allocs_slice = func.frame_allocs.slice(); const frame_size = frame_allocs_slice.items(.abi_size); const frame_align = frame_allocs_slice.items(.abi_align); const stack_frame_align = &frame_align[@intFromEnum(FrameIndex.stack_frame)]; stack_frame_align.* = stack_frame_align.max(alloc.abi_align); - for (self.free_frame_indices.keys(), 0..) |frame_index, free_i| { + for (func.free_frame_indices.keys(), 0..) |frame_index, free_i| { const abi_size = frame_size[@intFromEnum(frame_index)]; if (abi_size != alloc.abi_size) continue; const abi_align = &frame_align[@intFromEnum(frame_index)]; abi_align.* = abi_align.max(alloc.abi_align); - _ = self.free_frame_indices.swapRemoveAt(free_i); + _ = func.free_frame_indices.swapRemoveAt(free_i); return frame_index; } - const frame_index: FrameIndex = @enumFromInt(self.frame_allocs.len); - try self.frame_allocs.append(self.gpa, alloc); + const frame_index: FrameIndex = @enumFromInt(func.frame_allocs.len); + try func.frame_allocs.append(func.gpa, alloc); log.debug("allocated frame {}", .{frame_index}); return frame_index; } /// Use a pointer instruction as the basis for allocating stack memory. -fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { - const zcu = self.bin_file.comp.module.?; - const ptr_ty = self.typeOfIndex(inst); +fn allocMemPtr(func: *Func, inst: Air.Inst.Index) !FrameIndex { + const zcu = func.bin_file.comp.module.?; + const ptr_ty = func.typeOfIndex(inst); const val_ty = ptr_ty.childType(zcu); - return self.allocFrameIndex(FrameAlloc.init(.{ + return func.allocFrameIndex(FrameAlloc.init(.{ .size = math.cast(u32, val_ty.abiSize(zcu)) orelse { - return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(zcu)}); + return func.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(zcu)}); }, .alignment = ptr_ty.ptrAlignment(zcu).max(.@"1"), })); } -fn typeRegClass(self: *Self, ty: Type) abi.RegisterClass { - const zcu = self.bin_file.comp.module.?; +fn typeRegClass(func: *Func, ty: Type) abi.RegisterClass { + const zcu = func.bin_file.comp.module.?; return switch (ty.zigTypeTag(zcu)) { .Float => .float, .Vector => @panic("TODO: typeRegClass for Vectors"), @@ -1609,8 +1603,8 @@ fn typeRegClass(self: *Self, ty: Type) abi.RegisterClass { }; } -fn regGeneralClassForType(self: *Self, ty: Type) RegisterManager.RegisterBitSet { - const zcu = self.bin_file.comp.module.?; +fn regGeneralClassForType(func: *Func, ty: Type) RegisterManager.RegisterBitSet { + const zcu = func.bin_file.comp.module.?; return switch (ty.zigTypeTag(zcu)) { .Float => abi.Registers.Float.general_purpose, .Vector => @panic("TODO: regGeneralClassForType for Vectors"), @@ -1618,8 +1612,8 @@ fn regGeneralClassForType(self: *Self, ty: Type) RegisterManager.RegisterBitSet }; } -fn regTempClassForType(self: *Self, ty: Type) RegisterManager.RegisterBitSet { - const zcu = self.bin_file.comp.module.?; +fn regTempClassForType(func: *Func, ty: Type) RegisterManager.RegisterBitSet { + const zcu = func.bin_file.comp.module.?; return switch (ty.zigTypeTag(zcu)) { .Float => abi.Registers.Float.temporary, .Vector => @panic("TODO: regTempClassForType for Vectors"), @@ -1627,12 +1621,12 @@ fn regTempClassForType(self: *Self, ty: Type) RegisterManager.RegisterBitSet { }; } -fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const zcu = self.bin_file.comp.module.?; - const elem_ty = self.typeOfIndex(inst); +fn allocRegOrMem(func: *Func, inst: Air.Inst.Index, reg_ok: bool) !MCValue { + const zcu = func.bin_file.comp.module.?; + const elem_ty = func.typeOfIndex(inst); const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(zcu)}); + return func.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(zcu)}); }; const min_size: u32 = switch (elem_ty.zigTypeTag(zcu)) { @@ -1642,20 +1636,20 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { }; if (reg_ok and abi_size <= min_size) { - if (self.register_manager.tryAllocReg(inst, self.regGeneralClassForType(elem_ty))) |reg| { + if (func.register_manager.tryAllocReg(inst, func.regGeneralClassForType(elem_ty))) |reg| { return .{ .register = reg }; } } - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(elem_ty, zcu)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(elem_ty, zcu)); return .{ .load_frame = .{ .index = frame_index } }; } /// Allocates a register from the general purpose set and returns the Register and the Lock. /// /// Up to the caller to unlock the register later. -fn allocReg(self: *Self, reg_class: abi.RegisterClass) !struct { Register, RegisterLock } { - if (reg_class == .float and !self.hasFeature(.f)) +fn allocReg(func: *Func, reg_class: abi.RegisterClass) !struct { Register, RegisterLock } { + if (reg_class == .float and !func.hasFeature(.f)) std.debug.panic("allocReg class == float where F isn't enabled", .{}); const class = switch (reg_class) { @@ -1663,8 +1657,8 @@ fn allocReg(self: *Self, reg_class: abi.RegisterClass) !struct { Register, Regis .float => abi.Registers.Float.general_purpose, }; - const reg = try self.register_manager.allocReg(null, class); - const lock = self.register_manager.lockRegAssumeUnused(reg); + const reg = try func.register_manager.allocReg(null, class); + const lock = func.register_manager.lockRegAssumeUnused(reg); return .{ reg, lock }; } @@ -1677,8 +1671,8 @@ const PromoteOptions = struct { /// Similar to `allocReg` but will copy the MCValue into the Register unless `operand` is already /// a register, in which case it will return a possible lock to that register. -fn promoteReg(self: *Self, ty: Type, operand: MCValue, options: PromoteOptions) !struct { Register, ?RegisterLock } { - const zcu = self.bin_file.comp.module.?; +fn promoteReg(func: *Func, ty: Type, operand: MCValue, options: PromoteOptions) !struct { Register, ?RegisterLock } { + const zcu = func.bin_file.comp.module.?; const bit_size = ty.bitSize(zcu); if (operand == .register) { @@ -1687,7 +1681,7 @@ fn promoteReg(self: *Self, ty: Type, operand: MCValue, options: PromoteOptions) // we make sure to emit the truncate manually because binOp will call this function // and it could cause an infinite loop - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .slli, .ops = .rri, .data = .{ @@ -1699,7 +1693,7 @@ fn promoteReg(self: *Self, ty: Type, operand: MCValue, options: PromoteOptions) }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .srli, .ops = .rri, .data = .{ @@ -1712,13 +1706,13 @@ fn promoteReg(self: *Self, ty: Type, operand: MCValue, options: PromoteOptions) }); } - return .{ op_reg, self.register_manager.lockReg(operand.register) }; + return .{ op_reg, func.register_manager.lockReg(operand.register) }; } - const reg, const lock = try self.allocReg(self.typeRegClass(ty)); + const reg, const lock = try func.allocReg(func.typeRegClass(ty)); if (options.zero and reg.class() == .int) { - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_mv, .data = .{ .rr = .{ @@ -1728,26 +1722,26 @@ fn promoteReg(self: *Self, ty: Type, operand: MCValue, options: PromoteOptions) }); } - try self.genSetReg(ty, reg, operand); + try func.genSetReg(ty, reg, operand); return .{ reg, lock }; } -fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Register { +fn elemOffset(func: *Func, index_ty: Type, index: MCValue, elem_size: u64) !Register { const reg: Register = blk: { switch (index) { .immediate => |imm| { // Optimisation: if index MCValue is an immediate, we can multiply in `comptime` // and set the register directly to the scaled offset as an immediate. - const reg = try self.register_manager.allocReg(null, self.regGeneralClassForType(index_ty)); - try self.genSetReg(index_ty, reg, .{ .immediate = imm * elem_size }); + const reg = try func.register_manager.allocReg(null, func.regGeneralClassForType(index_ty)); + try func.genSetReg(index_ty, reg, .{ .immediate = imm * elem_size }); break :blk reg; }, else => { - const reg = try self.copyToTmpRegister(index_ty, index); - const lock = self.register_manager.lockRegAssumeUnused(reg); - defer self.register_manager.unlockReg(lock); + const reg = try func.copyToTmpRegister(index_ty, index); + const lock = func.register_manager.lockRegAssumeUnused(reg); + defer func.register_manager.unlockReg(lock); - const result = try self.binOp( + const result = try func.binOp( .mul, .{ .register = reg }, index_ty, @@ -1761,72 +1755,72 @@ fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Regi return reg; } -pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { - const tracking = self.inst_tracking.getPtr(inst) orelse return; +pub fn spillInstruction(func: *Func, reg: Register, inst: Air.Inst.Index) !void { + const tracking = func.inst_tracking.getPtr(inst) orelse return; for (tracking.getRegs()) |tracked_reg| { if (tracked_reg.id() == reg.id()) break; } else unreachable; // spilled reg not tracked with spilled instruciton - try tracking.spill(self, inst); - try tracking.trackSpill(self, inst); + try tracking.spill(func, inst); + try tracking.trackSpill(func, inst); } /// Copies a value to a register without tracking the register. The register is not considered /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. -fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { - log.debug("copyToTmpRegister ty: {}", .{ty.fmt(self.bin_file.comp.module.?)}); - const reg = try self.register_manager.allocReg(null, self.regTempClassForType(ty)); - try self.genSetReg(ty, reg, mcv); +fn copyToTmpRegister(func: *Func, ty: Type, mcv: MCValue) !Register { + log.debug("copyToTmpRegister ty: {}", .{ty.fmt(func.bin_file.comp.module.?)}); + const reg = try func.register_manager.allocReg(null, func.regTempClassForType(ty)); + try func.genSetReg(ty, reg, mcv); return reg; } /// Allocates a new register and copies `mcv` into it. /// `reg_owner` is the instruction that gets associated with the register in the register table. /// This can have a side effect of spilling instructions to the stack to free up a register. -fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { - const ty = self.typeOfIndex(reg_owner); - const reg = try self.register_manager.allocReg(reg_owner, self.regGeneralClassForType(ty)); - try self.genSetReg(self.typeOfIndex(reg_owner), reg, mcv); +fn copyToNewRegister(func: *Func, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { + const ty = func.typeOfIndex(reg_owner); + const reg = try func.register_manager.allocReg(reg_owner, func.regGeneralClassForType(ty)); + try func.genSetReg(func.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } -fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { - const result = MCValue{ .lea_frame = .{ .index = try self.allocMemPtr(inst) } }; - return self.finishAir(inst, result, .{ .none, .none, .none }); +fn airAlloc(func: *Func, inst: Air.Inst.Index) !void { + const result = MCValue{ .lea_frame = .{ .index = try func.allocMemPtr(inst) } }; + return func.finishAir(inst, result, .{ .none, .none, .none }); } -fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { - const result: MCValue = switch (self.ret_mcv.long) { - .none => .{ .lea_frame = .{ .index = try self.allocMemPtr(inst) } }, +fn airRetPtr(func: *Func, inst: Air.Inst.Index) !void { + const result: MCValue = switch (func.ret_mcv.long) { + .none => .{ .lea_frame = .{ .index = try func.allocMemPtr(inst) } }, .load_frame => .{ .register_offset = .{ - .reg = (try self.copyToNewRegister( + .reg = (try func.copyToNewRegister( inst, - self.ret_mcv.long, + func.ret_mcv.long, )).register, - .off = self.ret_mcv.short.indirect.off, + .off = func.ret_mcv.short.indirect.off, } }, - else => |t| return self.fail("TODO: airRetPtr {s}", .{@tagName(t)}), + else => |t| return func.fail("TODO: airRetPtr {s}", .{@tagName(t)}), }; - return self.finishAir(inst, result, .{ .none, .none, .none }); + return func.finishAir(inst, result, .{ .none, .none, .none }); } -fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airFptrunc for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airFptrunc(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airFptrunc for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airFpext(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airFpext for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airFpext(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airFpext for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const src_ty = self.typeOf(ty_op.operand); - const dst_ty = self.typeOfIndex(inst); +fn airIntCast(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const src_ty = func.typeOf(ty_op.operand); + const dst_ty = func.typeOfIndex(inst); const result: MCValue = result: { const src_int_info = src_ty.intInfo(zcu); @@ -1834,20 +1828,20 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty; - const src_mcv = try self.resolveInst(ty_op.operand); + const src_mcv = try func.resolveInst(ty_op.operand); const src_storage_bits: u16 = switch (src_mcv) { .register => 64, .load_frame => src_int_info.bits, - else => return self.fail("airIntCast from {s}", .{@tagName(src_mcv)}), + else => return func.fail("airIntCast from {s}", .{@tagName(src_mcv)}), }; const dst_mcv = if (dst_int_info.bits <= src_storage_bits and math.divCeil(u16, dst_int_info.bits, 64) catch unreachable == math.divCeil(u32, src_storage_bits, 64) catch unreachable and - self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: { - const dst_mcv = try self.allocRegOrMem(inst, true); - try self.genCopy(min_ty, dst_mcv, src_mcv); + func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: { + const dst_mcv = try func.allocRegOrMem(inst, true); + try func.genCopy(min_ty, dst_mcv, src_mcv); break :dst dst_mcv; }; @@ -1858,53 +1852,53 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { break :result null; // TODO break :result dst_mcv; - } orelse return self.fail("TODO implement airIntCast from {} to {}", .{ + } orelse return func.fail("TODO implement airIntCast from {} to {}", .{ src_ty.fmt(zcu), dst_ty.fmt(zcu), }); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - if (self.liveness.isUnused(inst)) - return self.finishAir(inst, .unreach, .{ ty_op.operand, .none, .none }); +fn airTrunc(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + if (func.liveness.isUnused(inst)) + return func.finishAir(inst, .unreach, .{ ty_op.operand, .none, .none }); - const operand = try self.resolveInst(ty_op.operand); + const operand = try func.resolveInst(ty_op.operand); _ = operand; - return self.fail("TODO implement trunc for {}", .{self.target.cpu.arch}); - // return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.fail("TODO implement trunc for {}", .{func.target.cpu.arch}); + // return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const operand = try self.resolveInst(un_op); - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else operand; - return self.finishAir(inst, result, .{ un_op, .none, .none }); +fn airIntFromBool(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try func.resolveInst(un_op); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else operand; + return func.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airNot(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const zcu = self.bin_file.comp.module.?; +fn airNot(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const zcu = func.bin_file.comp.module.?; - const operand = try self.resolveInst(ty_op.operand); - const ty = self.typeOf(ty_op.operand); + const operand = try func.resolveInst(ty_op.operand); + const ty = func.typeOf(ty_op.operand); switch (ty.zigTypeTag(zcu)) { .Bool => { const operand_reg = blk: { if (operand == .register) break :blk operand.register; - break :blk try self.copyToTmpRegister(ty, operand); + break :blk try func.copyToTmpRegister(ty, operand); }; const dst_reg: Register = - if (self.reuseOperand(inst, ty_op.operand, 0, operand) and operand == .register) + if (func.reuseOperand(inst, ty_op.operand, 0, operand) and operand == .register) operand.register else - (try self.allocRegOrMem(inst, true)).register; + (try func.allocRegOrMem(inst, true)).register; - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_not, .data = .{ @@ -1917,59 +1911,59 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { break :result .{ .register = dst_reg }; }, - .Int => return self.fail("TODO: airNot ints", .{}), + .Int => return func.fail("TODO: airNot ints", .{}), else => unreachable, } }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airMinMax( - self: *Self, + func: *Func, inst: Air.Inst.Index, comptime tag: enum { max, min, }, ) !void { - const zcu = self.bin_file.comp.module.?; - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const zcu = func.bin_file.comp.module.?; + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const lhs = try func.resolveInst(bin_op.lhs); + const rhs = try func.resolveInst(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); const int_info = lhs_ty.intInfo(zcu); - if (int_info.bits > 64) return self.fail("TODO: > 64 bit @min", .{}); + if (int_info.bits > 64) return func.fail("TODO: > 64 bit @min", .{}); const lhs_reg, const lhs_lock = blk: { - if (lhs == .register) break :blk .{ lhs.register, self.register_manager.lockReg(lhs.register) }; + if (lhs == .register) break :blk .{ lhs.register, func.register_manager.lockReg(lhs.register) }; - const lhs_reg, const lhs_lock = try self.allocReg(.int); - try self.genSetReg(lhs_ty, lhs_reg, lhs); + const lhs_reg, const lhs_lock = try func.allocReg(.int); + try func.genSetReg(lhs_ty, lhs_reg, lhs); break :blk .{ lhs_reg, lhs_lock }; }; - defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); + defer if (lhs_lock) |lock| func.register_manager.unlockReg(lock); const rhs_reg, const rhs_lock = blk: { - if (rhs == .register) break :blk .{ rhs.register, self.register_manager.lockReg(rhs.register) }; + if (rhs == .register) break :blk .{ rhs.register, func.register_manager.lockReg(rhs.register) }; - const rhs_reg, const rhs_lock = try self.allocReg(.int); - try self.genSetReg(rhs_ty, rhs_reg, rhs); + const rhs_reg, const rhs_lock = try func.allocReg(.int); + try func.genSetReg(rhs_ty, rhs_reg, rhs); break :blk .{ rhs_reg, rhs_lock }; }; - defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); + defer if (rhs_lock) |lock| func.register_manager.unlockReg(lock); - const mask_reg, const mask_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(mask_lock); + const mask_reg, const mask_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(mask_lock); - const result_reg, const result_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(result_lock); + const result_reg, const result_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(result_lock); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = if (int_info.signedness == .unsigned) .sltu else .slt, .ops = .rrr, .data = .{ .r_type = .{ @@ -1979,7 +1973,7 @@ fn airMinMax( } }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .sub, .ops = .rrr, .data = .{ .r_type = .{ @@ -1989,7 +1983,7 @@ fn airMinMax( } }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .xor, .ops = .rrr, .data = .{ .r_type = .{ @@ -1999,7 +1993,7 @@ fn airMinMax( } }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .@"and", .ops = .rrr, .data = .{ .r_type = .{ @@ -2009,7 +2003,7 @@ fn airMinMax( } }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .xor, .ops = .rrr, .data = .{ .r_type = .{ @@ -2021,22 +2015,22 @@ fn airMinMax( break :result .{ .register = result_reg }; }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airSlice(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; +fn airSlice(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; - const slice_ty = self.typeOfIndex(inst); - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, zcu)); + const slice_ty = func.typeOfIndex(inst); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(slice_ty, zcu)); - const ptr_ty = self.typeOf(bin_op.lhs); - try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, .{ .air_ref = bin_op.lhs }); + const ptr_ty = func.typeOf(bin_op.lhs); + try func.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, .{ .air_ref = bin_op.lhs }); - const len_ty = self.typeOf(bin_op.rhs); - try self.genSetMem( + const len_ty = func.typeOf(bin_op.rhs); + try func.genSetMem( .{ .frame = frame_index }, @intCast(ptr_ty.abiSize(zcu)), len_ty, @@ -2044,31 +2038,31 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { ); const result = MCValue{ .load_frame = .{ .index = frame_index } }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); +fn airBinOp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const lhs = try func.resolveInst(bin_op.lhs); + const rhs = try func.resolveInst(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - break :result try self.binOp(tag, lhs, lhs_ty, rhs, rhs_ty); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + break :result try func.binOp(tag, lhs, lhs_ty, rhs, rhs_ty); }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn binOp( - self: *Self, + func: *Func, tag: Air.Inst.Tag, lhs: MCValue, lhs_ty: Type, rhs: MCValue, rhs_ty: Type, ) InnerError!MCValue { - const zcu = self.bin_file.comp.module.?; + const zcu = func.bin_file.comp.module.?; switch (tag) { // Arithmetic operations on integers and floats @@ -2087,23 +2081,23 @@ fn binOp( switch (lhs_ty.zigTypeTag(zcu)) { .Float => { const float_bits = lhs_ty.floatBits(zcu.getTarget()); - const float_reg_bits: u32 = if (self.hasFeature(.d)) 64 else 32; + const float_reg_bits: u32 = if (func.hasFeature(.d)) 64 else 32; if (float_bits <= float_reg_bits) { - return self.binOpFloat(tag, lhs, lhs_ty, rhs, rhs_ty); + return func.binOpFloat(tag, lhs, lhs_ty, rhs, rhs_ty); } else { - return self.fail("TODO: binary operations for floats with bits > {d}", .{float_reg_bits}); + return func.fail("TODO: binary operations for floats with bits > {d}", .{float_reg_bits}); } }, - .Vector => return self.fail("TODO binary operations on vectors", .{}), + .Vector => return func.fail("TODO binary operations on vectors", .{}), .Int, .Enum, .ErrorSet => { const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 64) { - return self.binOpRegister(tag, lhs, lhs_ty, rhs, rhs_ty); + return func.binOpRegister(tag, lhs, lhs_ty, rhs, rhs_ty); } else { - return self.fail("TODO binary operations on int with bits > 64", .{}); + return func.fail("TODO binary operations on int with bits > 64", .{}); } }, - else => |x| return self.fail("TOOD: binOp {s}", .{@tagName(x)}), + else => |x| return func.fail("TOOD: binOp {s}", .{@tagName(x)}), } }, @@ -2126,9 +2120,9 @@ fn binOp( else => unreachable, }; - return try self.binOpRegister(base_tag, lhs, lhs_ty, rhs, rhs_ty); + return try func.binOpRegister(base_tag, lhs, lhs_ty, rhs, rhs_ty); } else { - const offset = try self.binOp( + const offset = try func.binOp( .mul, rhs, Type.usize, @@ -2136,7 +2130,7 @@ fn binOp( Type.usize, ); - const addr = try self.binOp( + const addr = try func.binOp( tag, lhs, Type.manyptr_u8, @@ -2155,39 +2149,39 @@ fn binOp( .shl, => { switch (lhs_ty.zigTypeTag(zcu)) { - .Float => return self.fail("TODO binary operations on floats", .{}), - .Vector => return self.fail("TODO binary operations on vectors", .{}), + .Float => return func.fail("TODO binary operations on floats", .{}), + .Vector => return func.fail("TODO binary operations on vectors", .{}), .Int => { const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 64) { - return self.binOpRegister(tag, lhs, lhs_ty, rhs, rhs_ty); + return func.binOpRegister(tag, lhs, lhs_ty, rhs, rhs_ty); } else { - return self.fail("TODO binary operations on int with bits > 64", .{}); + return func.fail("TODO binary operations on int with bits > 64", .{}); } }, else => unreachable, } }, - else => return self.fail("TODO binOp {}", .{tag}), + else => return func.fail("TODO binOp {}", .{tag}), } } fn binOpRegister( - self: *Self, + func: *Func, tag: Air.Inst.Tag, lhs: MCValue, lhs_ty: Type, rhs: MCValue, rhs_ty: Type, ) !MCValue { - const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs, .{ .zero = true }); - defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); + const lhs_reg, const lhs_lock = try func.promoteReg(lhs_ty, lhs, .{ .zero = true }); + defer if (lhs_lock) |lock| func.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs, .{ .zero = true }); - defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); + const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs, .{ .zero = true }); + defer if (rhs_lock) |lock| func.register_manager.unlockReg(lock); - const dest_reg, const dest_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(dest_lock); + const dest_reg, const dest_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(dest_lock); const mir_tag: Mir.Inst.Tag = switch (tag) { .add => .add, @@ -2205,7 +2199,7 @@ fn binOpRegister( .cmp_lte, => .pseudo, - else => return self.fail("TODO: binOpRegister {s}", .{@tagName(tag)}), + else => return func.fail("TODO: binOpRegister {s}", .{@tagName(tag)}), }; switch (mir_tag) { @@ -2215,7 +2209,7 @@ fn binOpRegister( .sllw, .srlw, => { - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = mir_tag, .ops = .rrr, .data = .{ @@ -2240,7 +2234,7 @@ fn binOpRegister( else => unreachable, }; - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = pseudo_op, .data = .{ @@ -2257,7 +2251,7 @@ fn binOpRegister( .cmp_lte => .lte, else => unreachable, }, - .size = self.memSize(lhs_ty), + .size = func.memSize(lhs_ty), }, }, }); @@ -2270,21 +2264,21 @@ fn binOpRegister( } fn binOpFloat( - self: *Self, + func: *Func, tag: Air.Inst.Tag, lhs: MCValue, lhs_ty: Type, rhs: MCValue, rhs_ty: Type, ) !MCValue { - const zcu = self.bin_file.comp.module.?; + const zcu = func.bin_file.comp.module.?; const float_bits = lhs_ty.floatBits(zcu.getTarget()); - const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs, .{}); - defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); + const lhs_reg, const lhs_lock = try func.promoteReg(lhs_ty, lhs, .{}); + defer if (lhs_lock) |lock| func.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs, .{}); - defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); + const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs, .{}); + defer if (rhs_lock) |lock| func.register_manager.unlockReg(lock); const mir_tag: Mir.Inst.Tag = switch (tag) { .add => if (float_bits == 32) .fadds else .faddd, @@ -2300,7 +2294,7 @@ fn binOpFloat( .cmp_lte, => .pseudo, - else => return self.fail("TODO: binOpFloat mir_tag {s}", .{@tagName(tag)}), + else => return func.fail("TODO: binOpFloat mir_tag {s}", .{@tagName(tag)}), }; const return_class: abi.RegisterClass = switch (tag) { @@ -2320,8 +2314,8 @@ fn binOpFloat( else => unreachable, }; - const dest_reg, const dest_lock = try self.allocReg(return_class); - defer self.register_manager.unlockReg(dest_lock); + const dest_reg, const dest_lock = try func.allocReg(return_class); + defer func.register_manager.unlockReg(dest_lock); switch (tag) { .add, @@ -2329,7 +2323,7 @@ fn binOpFloat( .mul, .div_float, => { - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = mir_tag, .ops = .rrr, .data = .{ .r_type = .{ @@ -2347,7 +2341,7 @@ fn binOpFloat( .cmp_lt, .cmp_lte, => { - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_compare, .data = .{ @@ -2364,7 +2358,7 @@ fn binOpFloat( .cmp_lte => .lte, else => unreachable, }, - .size = self.memSize(lhs_ty), + .size = func.memSize(lhs_ty), }, }, }); @@ -2376,120 +2370,120 @@ fn binOpFloat( return MCValue{ .register = dest_reg }; } -fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); +fn airPtrArithmetic(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; + const lhs = try func.resolveInst(bin_op.lhs); + const rhs = try func.resolveInst(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - break :result try self.binOp(tag, lhs, lhs_ty, rhs, rhs_ty); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + break :result try func.binOp(tag, lhs, lhs_ty, rhs, rhs_ty); }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airAddWrap(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement addwrap for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +fn airAddWrap(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement addwrap for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement add_sat for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +fn airAddSat(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement add_sat for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airSubWrap(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { +fn airSubWrap(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { // RISCV arthemtic instructions already wrap, so this is simply a sub binOp with // no overflow checks. - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); + const lhs = try func.resolveInst(bin_op.lhs); + const rhs = try func.resolveInst(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); - break :result try self.binOp(.sub, lhs, lhs_ty, rhs, rhs_ty); + break :result try func.binOp(.sub, lhs, lhs_ty, rhs, rhs_ty); }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement sub_sat for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +fn airSubSat(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement sub_sat for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airMul(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); +fn airMul(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const lhs = try func.resolveInst(bin_op.lhs); + const rhs = try func.resolveInst(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); - break :result try self.binOp(.mul, lhs, lhs_ty, rhs, rhs_ty); + break :result try func.binOp(.mul, lhs, lhs_ty, rhs, rhs_ty); }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airDiv(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); +fn airDiv(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const lhs = try func.resolveInst(bin_op.lhs); + const rhs = try func.resolveInst(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); - break :result try self.binOp(.div_float, lhs, lhs_ty, rhs, rhs_ty); + break :result try func.binOp(.div_float, lhs, lhs_ty, rhs, rhs_ty); }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airMulWrap(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement mulwrap for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +fn airMulWrap(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement mulwrap for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement mul_sat for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +fn airMulSat(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement mul_sat for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; +fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const lhs = try self.resolveInst(extra.lhs); - const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.typeOf(extra.lhs); - const rhs_ty = self.typeOf(extra.rhs); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const lhs = try func.resolveInst(extra.lhs); + const rhs = try func.resolveInst(extra.rhs); + const lhs_ty = func.typeOf(extra.lhs); + const rhs_ty = func.typeOf(extra.rhs); const int_info = lhs_ty.intInfo(zcu); - const tuple_ty = self.typeOfIndex(inst); - const result_mcv = try self.allocRegOrMem(inst, false); + const tuple_ty = func.typeOfIndex(inst); + const result_mcv = try func.allocRegOrMem(inst, false); const offset = result_mcv.load_frame; if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { - const add_result = try self.binOp(.add, lhs, lhs_ty, rhs, rhs_ty); - const add_result_reg = try self.copyToTmpRegister(lhs_ty, add_result); - const add_result_reg_lock = self.register_manager.lockRegAssumeUnused(add_result_reg); - defer self.register_manager.unlockReg(add_result_reg_lock); + const add_result = try func.binOp(.add, lhs, lhs_ty, rhs, rhs_ty); + const add_result_reg = try func.copyToTmpRegister(lhs_ty, add_result); + const add_result_reg_lock = func.register_manager.lockRegAssumeUnused(add_result_reg); + defer func.register_manager.unlockReg(add_result_reg_lock); const shift_amount: u6 = @intCast(Type.usize.bitSize(zcu) - int_info.bits); - const shift_reg, const shift_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(shift_lock); + const shift_reg, const shift_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(shift_lock); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .slli, .ops = .rri, .data = .{ @@ -2501,7 +2495,7 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = if (int_info.signedness == .unsigned) .srli else .srai, .ops = .rri, .data = .{ @@ -2513,21 +2507,21 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }, }); - try self.genSetMem( + try func.genSetMem( .{ .frame = offset.index }, offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))), lhs_ty, add_result, ); - const overflow_mcv = try self.binOp( + const overflow_mcv = try func.binOp( .cmp_neq, .{ .register = shift_reg }, lhs_ty, .{ .register = add_result_reg }, lhs_ty, ); - try self.genSetMem( + try func.genSetMem( .{ .frame = offset.index }, offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), Type.u1, @@ -2536,50 +2530,50 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { break :result result_mcv; } else { - return self.fail("TODO: less than 8 bit or non-pow 2 addition", .{}); + return func.fail("TODO: less than 8 bit or non-pow 2 addition", .{}); } }; - return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); + return func.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } -fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; +fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const lhs = try self.resolveInst(extra.lhs); - const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.typeOf(extra.lhs); - const rhs_ty = self.typeOf(extra.rhs); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const lhs = try func.resolveInst(extra.lhs); + const rhs = try func.resolveInst(extra.rhs); + const lhs_ty = func.typeOf(extra.lhs); + const rhs_ty = func.typeOf(extra.rhs); const int_info = lhs_ty.intInfo(zcu); if (!math.isPowerOfTwo(int_info.bits) or !(int_info.bits >= 8)) { - return self.fail("TODO: airSubWithOverflow non-power of 2 and less than 8 bits", .{}); + return func.fail("TODO: airSubWithOverflow non-power of 2 and less than 8 bits", .{}); } - const tuple_ty = self.typeOfIndex(inst); - const result_mcv = try self.allocRegOrMem(inst, false); + const tuple_ty = func.typeOfIndex(inst); + const result_mcv = try func.allocRegOrMem(inst, false); const offset = result_mcv.load_frame; - const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs, .{}); - defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); + const lhs_reg, const lhs_lock = try func.promoteReg(lhs_ty, lhs, .{}); + defer if (lhs_lock) |lock| func.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs, .{}); - defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); + const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs, .{}); + defer if (rhs_lock) |lock| func.register_manager.unlockReg(lock); - const dest_reg, const dest_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(dest_lock); + const dest_reg, const dest_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(dest_lock); switch (int_info.signedness) { - .unsigned => return self.fail("TODO: airSubWithOverflow unsigned", .{}), + .unsigned => return func.fail("TODO: airSubWithOverflow unsigned", .{}), .signed => { switch (int_info.bits) { 64 => { // result - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .sub, .ops = .rrr, .data = .{ .r_type = .{ @@ -2589,7 +2583,7 @@ fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } }, }); - try self.genSetMem( + try func.genSetMem( .{ .frame = offset.index }, offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))), lhs_ty, @@ -2597,9 +2591,9 @@ fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { ); // overflow check - const overflow_reg = try self.copyToTmpRegister(Type.usize, .{ .immediate = 0 }); + const overflow_reg = try func.copyToTmpRegister(Type.usize, .{ .immediate = 0 }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .slt, .ops = .rrr, .data = .{ .r_type = .{ @@ -2609,7 +2603,7 @@ fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .slt, .ops = .rrr, .data = .{ .r_type = .{ @@ -2619,7 +2613,7 @@ fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .xor, .ops = .rrr, .data = .{ .r_type = .{ @@ -2629,7 +2623,7 @@ fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } }, }); - const overflow_mcv = try self.binOp( + const overflow_mcv = try func.binOp( .cmp_neq, .{ .register = overflow_reg }, Type.usize, @@ -2637,7 +2631,7 @@ fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { Type.usize, ); - try self.genSetMem( + try func.genSetMem( .{ .frame = offset.index }, offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), Type.u1, @@ -2646,47 +2640,47 @@ fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { break :result result_mcv; }, - else => |int_bits| return self.fail("TODO: airSubWithOverflow signed {}", .{int_bits}), + else => |int_bits| return func.fail("TODO: airSubWithOverflow signed {}", .{int_bits}), } }, } }; - return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); + return func.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } -fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { - //const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const zcu = self.bin_file.comp.module.?; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const lhs = try self.resolveInst(extra.lhs); - const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.typeOf(extra.lhs); - const rhs_ty = self.typeOf(extra.rhs); +fn airMulWithOverflow(func: *Func, inst: Air.Inst.Index) !void { + //const tag = func.air.instructions.items(.tag)[@intFromEnum(inst)]; + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; + const zcu = func.bin_file.comp.module.?; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const lhs = try func.resolveInst(extra.lhs); + const rhs = try func.resolveInst(extra.rhs); + const lhs_ty = func.typeOf(extra.lhs); + const rhs_ty = func.typeOf(extra.rhs); switch (lhs_ty.zigTypeTag(zcu)) { - else => |x| return self.fail("TODO: airMulWithOverflow {s}", .{@tagName(x)}), + else => |x| return func.fail("TODO: airMulWithOverflow {s}", .{@tagName(x)}), .Int => { assert(lhs_ty.eql(rhs_ty, zcu)); const int_info = lhs_ty.intInfo(zcu); switch (int_info.bits) { 1...32 => { - if (self.hasFeature(.m)) { - const dest = try self.binOp(.mul, lhs, lhs_ty, rhs, rhs_ty); + if (func.hasFeature(.m)) { + const dest = try func.binOp(.mul, lhs, lhs_ty, rhs, rhs_ty); - const add_result_lock = self.register_manager.lockRegAssumeUnused(dest.register); - defer self.register_manager.unlockReg(add_result_lock); + const add_result_lock = func.register_manager.lockRegAssumeUnused(dest.register); + defer func.register_manager.unlockReg(add_result_lock); - const tuple_ty = self.typeOfIndex(inst); + const tuple_ty = func.typeOfIndex(inst); - const result_mcv = try self.allocRegOrMem(inst, true); + const result_mcv = try func.allocRegOrMem(inst, true); const result_off: i32 = @intCast(tuple_ty.structFieldOffset(0, zcu)); const overflow_off: i32 = @intCast(tuple_ty.structFieldOffset(1, zcu)); - try self.genCopy( + try func.genCopy( lhs_ty, result_mcv.offset(result_off), dest, @@ -2698,13 +2692,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { 1...8 => { const max_val = std.math.pow(u16, 2, int_info.bits) - 1; - const add_reg, const add_lock = try self.promoteReg(lhs_ty, lhs, .{}); - defer if (add_lock) |lock| self.register_manager.unlockReg(lock); + const add_reg, const add_lock = try func.promoteReg(lhs_ty, lhs, .{}); + defer if (add_lock) |lock| func.register_manager.unlockReg(lock); - const overflow_reg, const overflow_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(overflow_lock); + const overflow_reg, const overflow_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(overflow_lock); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .andi, .ops = .rri, .data = .{ .i_type = .{ @@ -2714,7 +2708,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } }, }); - const overflow_mcv = try self.binOp( + const overflow_mcv = try func.binOp( .cmp_neq, .{ .register = overflow_reg }, lhs_ty, @@ -2722,7 +2716,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { lhs_ty, ); - try self.genCopy( + try func.genCopy( lhs_ty, result_mcv.offset(overflow_off), overflow_mcv, @@ -2731,63 +2725,63 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { break :result result_mcv; }, - else => return self.fail("TODO: airMulWithOverflow check for size {d}", .{int_info.bits}), + else => return func.fail("TODO: airMulWithOverflow check for size {d}", .{int_info.bits}), } } else { - return self.fail("TODO: airMulWithOverflow calculate carry for signed addition", .{}); + return func.fail("TODO: airMulWithOverflow calculate carry for signed addition", .{}); } } else { - return self.fail("TODO: airMulWithOverflow with < 8 bits or non-pow of 2", .{}); + return func.fail("TODO: airMulWithOverflow with < 8 bits or non-pow of 2", .{}); } } else { - return self.fail("TODO: emulate mul for targets without M feature", .{}); + return func.fail("TODO: emulate mul for targets without M feature", .{}); } }, - else => return self.fail("TODO: airMulWithOverflow larger than 32-bit mul", .{}), + else => return func.fail("TODO: airMulWithOverflow larger than 32-bit mul", .{}), } }, } }; - return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); + return func.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } -fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { +fn airShlWithOverflow(func: *Func, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airShlWithOverflow for {}", .{self.target.cpu.arch}); + return func.fail("TODO implement airShlWithOverflow for {}", .{func.target.cpu.arch}); } -fn airRem(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement rem for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +fn airRem(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement rem for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airMod(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement zcu for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +fn airMod(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement zcu for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); +fn airBitAnd(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const lhs = try func.resolveInst(bin_op.lhs); + const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); - const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs, .{}); - defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); + const lhs_reg, const lhs_lock = try func.promoteReg(lhs_ty, lhs, .{}); + defer if (lhs_lock) |lock| func.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs, .{}); - defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); + const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs, .{}); + defer if (rhs_lock) |lock| func.register_manager.unlockReg(lock); - const dest_reg, const dest_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(dest_lock); + const dest_reg, const dest_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(dest_lock); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .@"and", .ops = .rrr, .data = .{ .r_type = .{ @@ -2799,28 +2793,28 @@ fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void { break :result .{ .register = dest_reg }; }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airBitOr(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); +fn airBitOr(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const lhs = try func.resolveInst(bin_op.lhs); + const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); - const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs, .{}); - defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); + const lhs_reg, const lhs_lock = try func.promoteReg(lhs_ty, lhs, .{}); + defer if (lhs_lock) |lock| func.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs, .{}); - defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); + const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs, .{}); + defer if (rhs_lock) |lock| func.register_manager.unlockReg(lock); - const dest_reg, const dest_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(dest_lock); + const dest_reg, const dest_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(dest_lock); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .@"or", .ops = .rrr, .data = .{ .r_type = .{ @@ -2832,72 +2826,72 @@ fn airBitOr(self: *Self, inst: Air.Inst.Index) !void { break :result .{ .register = dest_reg }; }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airXor(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement xor for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +fn airXor(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement xor for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airShl(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); +fn airShl(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const lhs = try func.resolveInst(bin_op.lhs); + const rhs = try func.resolveInst(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); - break :result try self.binOp(.shl, lhs, lhs_ty, rhs, rhs_ty); + break :result try func.binOp(.shl, lhs, lhs_ty, rhs, rhs_ty); }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +fn airShlSat(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement shl_sat for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airShr(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); +fn airShr(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const lhs = try func.resolveInst(bin_op.lhs); + const rhs = try func.resolveInst(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); - break :result try self.binOp(.shr, lhs, lhs_ty, rhs, rhs_ty); + break :result try func.binOp(.shr, lhs, lhs_ty, rhs, rhs_ty); }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement .optional_payload for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airOptionalPayload(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement .optional_payload for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airOptionalPayloadPtr(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement .optional_payload_ptr for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement .optional_payload_ptr_set for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airOptionalPayloadPtrSet(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement .optional_payload_ptr_set for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const zcu = self.bin_file.comp.module.?; - const err_union_ty = self.typeOf(ty_op.operand); +fn airUnwrapErrErr(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const zcu = func.bin_file.comp.module.?; + const err_union_ty = func.typeOf(ty_op.operand); const err_ty = err_union_ty.errorUnionSet(zcu); const payload_ty = err_union_ty.errorUnionPayload(zcu); - const operand = try self.resolveInst(ty_op.operand); + const operand = try func.resolveInst(ty_op.operand); const result: MCValue = result: { if (err_ty.errorSetIsEmpty(zcu)) { @@ -2912,13 +2906,13 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { switch (operand) { .register => |reg| { - const eu_lock = self.register_manager.lockReg(reg); - defer if (eu_lock) |lock| self.register_manager.unlockReg(lock); + const eu_lock = func.register_manager.lockReg(reg); + defer if (eu_lock) |lock| func.register_manager.unlockReg(lock); - var result = try self.copyToNewRegister(inst, operand); + var result = try func.copyToNewRegister(inst, operand); if (err_off > 0) { - result = try self.binOp( + result = try func.binOp( .shr, result, err_union_ty, @@ -2932,27 +2926,27 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { .index = frame_addr.index, .off = frame_addr.off + @as(i32, @intCast(err_off)), } }, - else => return self.fail("TODO implement unwrap_err_err for {}", .{operand}), + else => return func.fail("TODO implement unwrap_err_err for {}", .{operand}), } }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand_ty = self.typeOf(ty_op.operand); - const operand = try self.resolveInst(ty_op.operand); - const result = try self.genUnwrapErrUnionPayloadMir(operand_ty, operand); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airUnwrapErrPayload(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const operand_ty = func.typeOf(ty_op.operand); + const operand = try func.resolveInst(ty_op.operand); + const result = try func.genUnwrapErrUnionPayloadMir(operand_ty, operand); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn genUnwrapErrUnionPayloadMir( - self: *Self, + func: *Func, err_union_ty: Type, err_union: MCValue, ) !MCValue { - const zcu = self.bin_file.comp.module.?; + const zcu = func.bin_file.comp.module.?; const payload_ty = err_union_ty.errorUnionPayload(zcu); const result: MCValue = result: { @@ -2965,13 +2959,13 @@ fn genUnwrapErrUnionPayloadMir( .off = frame_addr.off + payload_off, } }, .register => |reg| { - const eu_lock = self.register_manager.lockReg(reg); - defer if (eu_lock) |lock| self.register_manager.unlockReg(lock); + const eu_lock = func.register_manager.lockReg(reg); + defer if (eu_lock) |lock| func.register_manager.unlockReg(lock); - var result: MCValue = .{ .register = try self.copyToTmpRegister(err_union_ty, err_union) }; + var result: MCValue = .{ .register = try func.copyToTmpRegister(err_union_ty, err_union) }; if (payload_off > 0) { - result = try self.binOp( + result = try func.binOp( .shr, result, err_union_ty, @@ -2982,7 +2976,7 @@ fn genUnwrapErrUnionPayloadMir( break :result result; }, - else => return self.fail("TODO implement genUnwrapErrUnionPayloadMir for {}", .{err_union}), + else => return func.fail("TODO implement genUnwrapErrUnionPayloadMir for {}", .{err_union}), } }; @@ -2990,116 +2984,116 @@ fn genUnwrapErrUnionPayloadMir( } // *(E!T) -> E -fn airUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement unwrap error union error ptr for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airUnwrapErrErrPtr(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement unwrap error union error ptr for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } // *(E!T) -> *T -fn airUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement unwrap error union payload ptr for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airUnwrapErrPayloadPtr(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement unwrap error union payload ptr for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement .errunion_payload_ptr_set for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airErrUnionPayloadPtrSet(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement .errunion_payload_ptr_set for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void { - const result: MCValue = if (self.liveness.isUnused(inst)) +fn airErrReturnTrace(func: *Func, inst: Air.Inst.Index) !void { + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else - return self.fail("TODO implement airErrReturnTrace for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ .none, .none, .none }); + return func.fail("TODO implement airErrReturnTrace for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ .none, .none, .none }); } -fn airSetErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void { +fn airSetErrReturnTrace(func: *Func, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airSetErrReturnTrace for {}", .{self.target.cpu.arch}); + return func.fail("TODO implement airSetErrReturnTrace for {}", .{func.target.cpu.arch}); } -fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { +fn airSaveErrReturnTraceIndex(func: *Func, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airSaveErrReturnTraceIndex for {}", .{self.target.cpu.arch}); + return func.fail("TODO implement airSaveErrReturnTraceIndex for {}", .{func.target.cpu.arch}); } -fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const zcu = self.bin_file.comp.module.?; - const optional_ty = self.typeOfIndex(inst); +fn airWrapOptional(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const zcu = func.bin_file.comp.module.?; + const optional_ty = func.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true if (optional_ty.abiSize(zcu) == 1) break :result MCValue{ .immediate = 1 }; - return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}); + return func.fail("TODO implement wrap optional for {}", .{func.target.cpu.arch}); }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// T to E!T -fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airWrapErrUnionPayload(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const eu_ty = ty_op.ty.toType(); const pl_ty = eu_ty.errorUnionPayload(zcu); const err_ty = eu_ty.errorUnionSet(zcu); - const operand = try self.resolveInst(ty_op.operand); + const operand = try func.resolveInst(ty_op.operand); const result: MCValue = result: { if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .{ .immediate = 0 }; - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu)); const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu)); const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu)); - try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand); - try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }); + try func.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand); + try func.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }); break :result .{ .load_frame = .{ .index = frame_index } }; }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// E to E!T -fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airWrapErrUnionErr(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const eu_ty = ty_op.ty.toType(); const pl_ty = eu_ty.errorUnionPayload(zcu); const err_ty = eu_ty.errorUnionSet(zcu); const result: MCValue = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result try self.resolveInst(ty_op.operand); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result try func.resolveInst(ty_op.operand); - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu)); const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu)); const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu)); - try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef); - const operand = try self.resolveInst(ty_op.operand); - try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand); + try func.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef); + const operand = try func.resolveInst(ty_op.operand); + try func.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand); break :result .{ .load_frame = .{ .index = frame_index } }; }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airTry(self: *Self, inst: Air.Inst.Index) !void { - const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const extra = self.air.extraData(Air.Try, pl_op.payload); - const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]); - const operand_ty = self.typeOf(pl_op.operand); - const result = try self.genTry(inst, pl_op.operand, body, operand_ty, false); - return self.finishAir(inst, result, .{ .none, .none, .none }); +fn airTry(func: *Func, inst: Air.Inst.Index) !void { + const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const extra = func.air.extraData(Air.Try, pl_op.payload); + const body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]); + const operand_ty = func.typeOf(pl_op.operand); + const result = try func.genTry(inst, pl_op.operand, body, operand_ty, false); + return func.finishAir(inst, result, .{ .none, .none, .none }); } fn genTry( - self: *Self, + func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, body: []const Air.Inst.Index, @@ -3108,180 +3102,180 @@ fn genTry( ) !MCValue { _ = operand_is_ptr; - const liveness_cond_br = self.liveness.getCondBr(inst); + const liveness_cond_br = func.liveness.getCondBr(inst); - const operand_mcv = try self.resolveInst(operand); - const is_err_mcv = try self.isErr(null, operand_ty, operand_mcv); + const operand_mcv = try func.resolveInst(operand); + const is_err_mcv = try func.isErr(null, operand_ty, operand_mcv); // A branch to the false section. Uses beq. 1 is the default "true" state. - const reloc = try self.condBr(Type.anyerror, is_err_mcv); + const reloc = try func.condBr(Type.anyerror, is_err_mcv); - if (self.liveness.operandDies(inst, 0)) { - if (operand.toIndex()) |operand_inst| try self.processDeath(operand_inst); + if (func.liveness.operandDies(inst, 0)) { + if (operand.toIndex()) |operand_inst| try func.processDeath(operand_inst); } - self.scope_generation += 1; - const state = try self.saveState(); + func.scope_generation += 1; + const state = try func.saveState(); - for (liveness_cond_br.else_deaths) |death| try self.processDeath(death); - try self.genBody(body); - try self.restoreState(state, &.{}, .{ + for (liveness_cond_br.else_deaths) |death| try func.processDeath(death); + try func.genBody(body); + try func.restoreState(state, &.{}, .{ .emit_instructions = false, .update_tracking = true, .resurrect = true, .close_scope = true, }); - self.performReloc(reloc); + func.performReloc(reloc); - for (liveness_cond_br.then_deaths) |death| try self.processDeath(death); + for (liveness_cond_br.then_deaths) |death| try func.processDeath(death); - const result = if (self.liveness.isUnused(inst)) + const result = if (func.liveness.isUnused(inst)) .unreach else - try self.genUnwrapErrUnionPayloadMir(operand_ty, operand_mcv); + try func.genUnwrapErrUnionPayloadMir(operand_ty, operand_mcv); return result; } -fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airSlicePtr(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result = result: { - const src_mcv = try self.resolveInst(ty_op.operand); - if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; + const src_mcv = try func.resolveInst(ty_op.operand); + if (func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; - const dst_mcv = try self.allocRegOrMem(inst, true); - const dst_ty = self.typeOfIndex(inst); - try self.genCopy(dst_ty, dst_mcv, src_mcv); + const dst_mcv = try func.allocRegOrMem(inst, true); + const dst_ty = func.typeOfIndex(inst); + try func.genCopy(dst_ty, dst_mcv, src_mcv); break :result dst_mcv; }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const src_mcv = try self.resolveInst(ty_op.operand); +fn airSliceLen(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const src_mcv = try func.resolveInst(ty_op.operand); switch (src_mcv) { .load_frame => |frame_addr| { const len_mcv: MCValue = .{ .load_frame = .{ .index = frame_addr.index, .off = frame_addr.off + 8, } }; - if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result len_mcv; + if (func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result len_mcv; - const dst_mcv = try self.allocRegOrMem(inst, true); - try self.genCopy(Type.usize, dst_mcv, len_mcv); + const dst_mcv = try func.allocRegOrMem(inst, true); + try func.genCopy(Type.usize, dst_mcv, len_mcv); break :result dst_mcv; }, .register_pair => |pair| { const len_mcv: MCValue = .{ .register = pair[1] }; - if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result len_mcv; + if (func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result len_mcv; - const dst_mcv = try self.allocRegOrMem(inst, true); - try self.genCopy(Type.usize, dst_mcv, len_mcv); + const dst_mcv = try func.allocRegOrMem(inst, true); + try func.genCopy(Type.usize, dst_mcv, len_mcv); break :result dst_mcv; }, - else => return self.fail("TODO airSliceLen for {}", .{src_mcv}), + else => return func.fail("TODO airSliceLen for {}", .{src_mcv}), } }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement ptr_slice_len_ptr for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airPtrSliceLenPtr(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement ptr_slice_len_ptr for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement ptr_slice_ptr_ptr for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airPtrSlicePtrPtr(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement ptr_slice_ptr_ptr for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; +fn airSliceElemVal(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; const is_volatile = false; // TODO - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - if (!is_volatile and self.liveness.isUnused(inst)) return self.finishAir( + if (!is_volatile and func.liveness.isUnused(inst)) return func.finishAir( inst, .unreach, .{ bin_op.lhs, bin_op.rhs, .none }, ); const result: MCValue = result: { - const slice_mcv = try self.resolveInst(bin_op.lhs); - const index_mcv = try self.resolveInst(bin_op.rhs); + const slice_mcv = try func.resolveInst(bin_op.lhs); + const index_mcv = try func.resolveInst(bin_op.rhs); - const slice_ty = self.typeOf(bin_op.lhs); + const slice_ty = func.typeOf(bin_op.lhs); const slice_ptr_field_type = slice_ty.slicePtrFieldType(zcu); const index_lock: ?RegisterLock = if (index_mcv == .register) - self.register_manager.lockRegAssumeUnused(index_mcv.register) + func.register_manager.lockRegAssumeUnused(index_mcv.register) else null; - defer if (index_lock) |reg| self.register_manager.unlockReg(reg); + defer if (index_lock) |reg| func.register_manager.unlockReg(reg); const base_mcv: MCValue = switch (slice_mcv) { .load_frame, .load_symbol, - => .{ .register = try self.copyToTmpRegister(slice_ptr_field_type, slice_mcv) }, - else => return self.fail("TODO slice_elem_val when slice is {}", .{slice_mcv}), + => .{ .register = try func.copyToTmpRegister(slice_ptr_field_type, slice_mcv) }, + else => return func.fail("TODO slice_elem_val when slice is {}", .{slice_mcv}), }; - const dest = try self.allocRegOrMem(inst, true); - const addr = try self.binOp(.ptr_add, base_mcv, slice_ptr_field_type, index_mcv, Type.usize); - try self.load(dest, addr, slice_ptr_field_type); + const dest = try func.allocRegOrMem(inst, true); + const addr = try func.binOp(.ptr_add, base_mcv, slice_ptr_field_type, index_mcv, Type.usize); + try func.load(dest, addr, slice_ptr_field_type); break :result dest; }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement slice_elem_ptr for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); +fn airSliceElemPtr(func: *Func, inst: Air.Inst.Index) !void { + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement slice_elem_ptr for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } -fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const array_ty = self.typeOf(bin_op.lhs); - const array_mcv = try self.resolveInst(bin_op.lhs); +fn airArrayElemVal(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const array_ty = func.typeOf(bin_op.lhs); + const array_mcv = try func.resolveInst(bin_op.lhs); - const index_mcv = try self.resolveInst(bin_op.rhs); - const index_ty = self.typeOf(bin_op.rhs); + const index_mcv = try func.resolveInst(bin_op.rhs); + const index_ty = func.typeOf(bin_op.rhs); const elem_ty = array_ty.childType(zcu); const elem_abi_size = elem_ty.abiSize(zcu); - const addr_reg, const addr_reg_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(addr_reg_lock); + const addr_reg, const addr_reg_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(addr_reg_lock); switch (array_mcv) { .register => { - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, zcu)); - try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mcv); - try self.genSetReg(Type.usize, addr_reg, .{ .lea_frame = .{ .index = frame_index } }); + const frame_index = try func.allocFrameIndex(FrameAlloc.initType(array_ty, zcu)); + try func.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mcv); + try func.genSetReg(Type.usize, addr_reg, .{ .lea_frame = .{ .index = frame_index } }); }, .load_frame => |frame_addr| { - try self.genSetReg(Type.usize, addr_reg, .{ .lea_frame = frame_addr }); + try func.genSetReg(Type.usize, addr_reg, .{ .lea_frame = frame_addr }); }, - else => try self.genSetReg(Type.usize, addr_reg, array_mcv.address()), + else => try func.genSetReg(Type.usize, addr_reg, array_mcv.address()), } - const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_abi_size); - const offset_lock = self.register_manager.lockRegAssumeUnused(offset_reg); - defer self.register_manager.unlockReg(offset_lock); + const offset_reg = try func.elemOffset(index_ty, index_mcv, elem_abi_size); + const offset_lock = func.register_manager.lockRegAssumeUnused(offset_reg); + defer func.register_manager.unlockReg(offset_lock); - const dst_mcv = try self.allocRegOrMem(inst, false); - _ = try self.addInst(.{ + const dst_mcv = try func.allocRegOrMem(inst, false); + _ = try func.addInst(.{ .tag = .add, .ops = .rrr, .data = .{ .r_type = .{ @@ -3290,138 +3284,138 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { .rs2 = addr_reg, } }, }); - try self.genCopy(elem_ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg } }); + try func.genCopy(elem_ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg } }); break :result dst_mcv; }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { +fn airPtrElemVal(func: *Func, inst: Air.Inst.Index) !void { const is_volatile = false; // TODO - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement ptr_elem_val for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (!is_volatile and func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement ptr_elem_val for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; +fn airPtrElemPtr(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; const result = result: { - const elem_ptr_ty = self.typeOfIndex(inst); - const base_ptr_ty = self.typeOf(extra.lhs); + const elem_ptr_ty = func.typeOfIndex(inst); + const base_ptr_ty = func.typeOf(extra.lhs); - const base_ptr_mcv = try self.resolveInst(extra.lhs); + const base_ptr_mcv = try func.resolveInst(extra.lhs); const base_ptr_lock: ?RegisterLock = switch (base_ptr_mcv) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + .register => |reg| func.register_manager.lockRegAssumeUnused(reg), else => null, }; - defer if (base_ptr_lock) |lock| self.register_manager.unlockReg(lock); + defer if (base_ptr_lock) |lock| func.register_manager.unlockReg(lock); if (elem_ptr_ty.ptrInfo(zcu).flags.vector_index != .none) { - break :result if (self.reuseOperand(inst, extra.lhs, 0, base_ptr_mcv)) + break :result if (func.reuseOperand(inst, extra.lhs, 0, base_ptr_mcv)) base_ptr_mcv else - try self.copyToNewRegister(inst, base_ptr_mcv); + try func.copyToNewRegister(inst, base_ptr_mcv); } const elem_ty = base_ptr_ty.elemType2(zcu); const elem_abi_size = elem_ty.abiSize(zcu); - const index_ty = self.typeOf(extra.rhs); - const index_mcv = try self.resolveInst(extra.rhs); + const index_ty = func.typeOf(extra.rhs); + const index_mcv = try func.resolveInst(extra.rhs); const index_lock: ?RegisterLock = switch (index_mcv) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + .register => |reg| func.register_manager.lockRegAssumeUnused(reg), else => null, }; - defer if (index_lock) |lock| self.register_manager.unlockReg(lock); + defer if (index_lock) |lock| func.register_manager.unlockReg(lock); - const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_abi_size); - const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); - defer self.register_manager.unlockReg(offset_reg_lock); + const offset_reg = try func.elemOffset(index_ty, index_mcv, elem_abi_size); + const offset_reg_lock = func.register_manager.lockRegAssumeUnused(offset_reg); + defer func.register_manager.unlockReg(offset_reg_lock); - break :result try self.binOp(.ptr_add, base_ptr_mcv, base_ptr_ty, .{ .register = offset_reg }, base_ptr_ty); + break :result try func.binOp(.ptr_add, base_ptr_mcv, base_ptr_ty, .{ .register = offset_reg }, base_ptr_ty); }; - return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); + return func.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } -fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airSetUnionTag(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; _ = bin_op; - return self.fail("TODO implement airSetUnionTag for {}", .{self.target.cpu.arch}); - // return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.fail("TODO implement airSetUnionTag for {}", .{func.target.cpu.arch}); + // return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airGetUnionTag for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airGetUnionTag(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airGetUnionTag for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airClz(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airClz for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airClz(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airClz for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airCtz(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airCtz(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; _ = ty_op; - return self.fail("TODO: finish ctz", .{}); + return func.fail("TODO: finish ctz", .{}); } -fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airPopcount for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airPopcount(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airPopcount for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airAbs(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const ty = self.typeOf(ty_op.operand); +fn airAbs(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const ty = func.typeOf(ty_op.operand); const scalar_ty = ty.scalarType(zcu); - const operand = try self.resolveInst(ty_op.operand); + const operand = try func.resolveInst(ty_op.operand); _ = operand; switch (scalar_ty.zigTypeTag(zcu)) { .Int => if (ty.zigTypeTag(zcu) == .Vector) { - return self.fail("TODO implement airAbs for {}", .{ty.fmt(zcu)}); + return func.fail("TODO implement airAbs for {}", .{ty.fmt(zcu)}); } else { - return self.fail("TODO: implement airAbs for Int", .{}); + return func.fail("TODO: implement airAbs for Int", .{}); }, - else => return self.fail("TODO: implement airAbs {}", .{scalar_ty.fmt(zcu)}), + else => return func.fail("TODO: implement airAbs {}", .{scalar_ty.fmt(zcu)}), } break :result .{.unreach}; }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const zcu = self.bin_file.comp.module.?; - const ty = self.typeOf(ty_op.operand); - const operand = try self.resolveInst(ty_op.operand); +fn airByteSwap(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const zcu = func.bin_file.comp.module.?; + const ty = func.typeOf(ty_op.operand); + const operand = try func.resolveInst(ty_op.operand); const int_bits = ty.intInfo(zcu).bits; // bytes are no-op - if (int_bits == 8 and self.reuseOperand(inst, ty_op.operand, 0, operand)) { - return self.finishAir(inst, operand, .{ ty_op.operand, .none, .none }); + if (int_bits == 8 and func.reuseOperand(inst, ty_op.operand, 0, operand)) { + return func.finishAir(inst, operand, .{ ty_op.operand, .none, .none }); } - const dest_mcv = try self.copyToNewRegister(inst, operand); + const dest_mcv = try func.copyToNewRegister(inst, operand); const dest_reg = dest_mcv.register; switch (int_bits) { 16 => { - const temp_reg, const temp_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(temp_lock); + const temp_reg, const temp_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(temp_lock); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .srli, .ops = .rri, .data = .{ .i_type = .{ @@ -3431,7 +3425,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { } }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .slli, .ops = .rri, .data = .{ .i_type = .{ @@ -3440,7 +3434,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { .rs1 = dest_reg, } }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .@"or", .ops = .rri, .data = .{ .r_type = .{ @@ -3450,49 +3444,49 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { } }, }); }, - else => return self.fail("TODO: {d} bits for airByteSwap", .{int_bits}), + else => return func.fail("TODO: {d} bits for airByteSwap", .{int_bits}), } break :result dest_mcv; }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airBitReverse for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airBitReverse(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airBitReverse for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void { - const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) +fn airUnaryMath(func: *Func, inst: Air.Inst.Index) !void { + const tag = func.air.instructions.items(.tag)[@intFromEnum(inst)]; + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else - return self.fail("TODO implementairUnaryMath {s} for {}", .{ @tagName(tag), self.target.cpu.arch }); - return self.finishAir(inst, result, .{ un_op, .none, .none }); + return func.fail("TODO implementairUnaryMath {s} for {}", .{ @tagName(tag), func.target.cpu.arch }); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } fn reuseOperand( - self: *Self, + func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue, ) bool { - return self.reuseOperandAdvanced(inst, operand, op_index, mcv, inst); + return func.reuseOperandAdvanced(inst, operand, op_index, mcv, inst); } fn reuseOperandAdvanced( - self: *Self, + func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue, maybe_tracked_inst: ?Air.Inst.Index, ) bool { - if (!self.liveness.operandDies(inst, op_index)) + if (!func.liveness.operandDies(inst, op_index)) return false; switch (mcv) { @@ -3502,59 +3496,59 @@ fn reuseOperandAdvanced( // If it's in the registers table, need to associate the register(s) with the // new instruction. if (maybe_tracked_inst) |tracked_inst| { - if (!self.register_manager.isRegFree(reg)) { + if (!func.register_manager.isRegFree(reg)) { if (RegisterManager.indexOfRegIntoTracked(reg)) |index| { - self.register_manager.registers[index] = tracked_inst; + func.register_manager.registers[index] = tracked_inst; } } - } else self.register_manager.freeReg(reg); + } else func.register_manager.freeReg(reg); }, .load_frame => |frame_addr| if (frame_addr.index.isNamed()) return false, else => return false, } // Prevent the operand deaths processing code from deallocating it. - self.liveness.clearOperandDeath(inst, op_index); + func.liveness.clearOperandDeath(inst, op_index); const op_inst = operand.toIndex().?; - self.getResolvedInstValue(op_inst).reuse(self, maybe_tracked_inst, op_inst); + func.getResolvedInstValue(op_inst).reuse(func, maybe_tracked_inst, op_inst); return true; } -fn airLoad(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const elem_ty = self.typeOfIndex(inst); +fn airLoad(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const elem_ty = func.typeOfIndex(inst); const result: MCValue = result: { if (!elem_ty.hasRuntimeBits(zcu)) break :result .none; - const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(zcu); - if (self.liveness.isUnused(inst) and !is_volatile) + const ptr = try func.resolveInst(ty_op.operand); + const is_volatile = func.typeOf(ty_op.operand).isVolatilePtr(zcu); + if (func.liveness.isUnused(inst) and !is_volatile) break :result .unreach; const elem_size = elem_ty.abiSize(zcu); const dst_mcv: MCValue = blk: { // Pointer is 8 bytes, and if the element is more than that, we cannot reuse it. - if (elem_size <= 8 and self.reuseOperand(inst, ty_op.operand, 0, ptr)) { + if (elem_size <= 8 and func.reuseOperand(inst, ty_op.operand, 0, ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk ptr; } else { - break :blk try self.allocRegOrMem(inst, true); + break :blk try func.allocRegOrMem(inst, true); } }; - try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); + try func.load(dst_mcv, ptr, func.typeOf(ty_op.operand)); break :result dst_mcv; }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn load(self: *Self, dst_mcv: MCValue, ptr_mcv: MCValue, ptr_ty: Type) InnerError!void { - const zcu = self.bin_file.comp.module.?; +fn load(func: *Func, dst_mcv: MCValue, ptr_mcv: MCValue, ptr_ty: Type) InnerError!void { + const zcu = func.bin_file.comp.module.?; const dst_ty = ptr_ty.childType(zcu); log.debug("loading {}:{} into {}", .{ ptr_mcv, ptr_ty.fmt(zcu), dst_mcv }); @@ -3573,43 +3567,43 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_mcv: MCValue, ptr_ty: Type) InnerErro .register_offset, .lea_frame, .lea_symbol, - => try self.genCopy(dst_ty, dst_mcv, ptr_mcv.deref()), + => try func.genCopy(dst_ty, dst_mcv, ptr_mcv.deref()), .memory, .indirect, .load_symbol, .load_frame, => { - const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv); - const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); - defer self.register_manager.unlockReg(addr_lock); + const addr_reg = try func.copyToTmpRegister(ptr_ty, ptr_mcv); + const addr_lock = func.register_manager.lockRegAssumeUnused(addr_reg); + defer func.register_manager.unlockReg(addr_lock); - try self.genCopy(dst_ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg } }); + try func.genCopy(dst_ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg } }); }, - .air_ref => |ptr_ref| try self.load(dst_mcv, try self.resolveInst(ptr_ref), ptr_ty), + .air_ref => |ptr_ref| try func.load(dst_mcv, try func.resolveInst(ptr_ref), ptr_ty), } } -fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { +fn airStore(func: *Func, inst: Air.Inst.Index, safety: bool) !void { if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { // TODO if the value is undef, don't lower this instruction } - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const ptr = try self.resolveInst(bin_op.lhs); - const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.typeOf(bin_op.lhs); - const value_ty = self.typeOf(bin_op.rhs); + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const ptr = try func.resolveInst(bin_op.lhs); + const value = try func.resolveInst(bin_op.rhs); + const ptr_ty = func.typeOf(bin_op.lhs); + const value_ty = func.typeOf(bin_op.rhs); - try self.store(ptr, value, ptr_ty, value_ty); + try func.store(ptr, value, ptr_ty, value_ty); - return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); } /// Loads `value` into the "payload" of `pointer`. -fn store(self: *Self, ptr_mcv: MCValue, src_mcv: MCValue, ptr_ty: Type, src_ty: Type) !void { - const zcu = self.bin_file.comp.module.?; +fn store(func: *Func, ptr_mcv: MCValue, src_mcv: MCValue, ptr_ty: Type, src_ty: Type) !void { + const zcu = func.bin_file.comp.module.?; log.debug("storing {}:{} in {}:{}", .{ src_mcv, src_ty.fmt(zcu), ptr_mcv, ptr_ty.fmt(zcu) }); @@ -3626,40 +3620,40 @@ fn store(self: *Self, ptr_mcv: MCValue, src_mcv: MCValue, ptr_ty: Type, src_ty: .register_offset, .lea_symbol, .lea_frame, - => try self.genCopy(src_ty, ptr_mcv.deref(), src_mcv), + => try func.genCopy(src_ty, ptr_mcv.deref(), src_mcv), .memory, .indirect, .load_symbol, .load_frame, => { - const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv); - const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); - defer self.register_manager.unlockReg(addr_lock); + const addr_reg = try func.copyToTmpRegister(ptr_ty, ptr_mcv); + const addr_lock = func.register_manager.lockRegAssumeUnused(addr_reg); + defer func.register_manager.unlockReg(addr_lock); - try self.genCopy(src_ty, .{ .indirect = .{ .reg = addr_reg } }, src_mcv); + try func.genCopy(src_ty, .{ .indirect = .{ .reg = addr_reg } }, src_mcv); }, - .air_ref => |ptr_ref| try self.store(try self.resolveInst(ptr_ref), src_mcv, ptr_ty, src_ty), + .air_ref => |ptr_ref| try func.store(try func.resolveInst(ptr_ref), src_mcv, ptr_ty, src_ty), } } -fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; - const result = try self.structFieldPtr(inst, extra.struct_operand, extra.field_index); - return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); +fn airStructFieldPtr(func: *Func, inst: Air.Inst.Index) !void { + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.StructField, ty_pl.payload).data; + const result = try func.structFieldPtr(inst, extra.struct_operand, extra.field_index); + return func.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); } -fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result = try self.structFieldPtr(inst, ty_op.operand, index); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airStructFieldPtrIndex(func: *Func, inst: Air.Inst.Index, index: u8) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result = try func.structFieldPtr(inst, ty_op.operand, index); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { - const zcu = self.bin_file.comp.module.?; - const ptr_field_ty = self.typeOfIndex(inst); - const ptr_container_ty = self.typeOf(operand); +fn structFieldPtr(func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { + const zcu = func.bin_file.comp.module.?; + const ptr_field_ty = func.typeOfIndex(inst); + const ptr_container_ty = func.typeOf(operand); const ptr_container_ty_info = ptr_container_ty.ptrInfo(zcu); const container_ty = ptr_container_ty.childType(zcu); @@ -3672,27 +3666,27 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde else @intCast(container_ty.structFieldOffset(index, zcu)); - const src_mcv = try self.resolveInst(operand); + const src_mcv = try func.resolveInst(operand); const dst_mcv = if (switch (src_mcv) { .immediate, .lea_frame => true, - .register, .register_offset => self.reuseOperand(inst, operand, 0, src_mcv), + .register, .register_offset => func.reuseOperand(inst, operand, 0, src_mcv), else => false, - }) src_mcv else try self.copyToNewRegister(inst, src_mcv); + }) src_mcv else try func.copyToNewRegister(inst, src_mcv); return dst_mcv.offset(field_offset); } -fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; +fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void { + const mod = func.bin_file.comp.module.?; - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.StructField, ty_pl.payload).data; const operand = extra.struct_operand; const index = extra.field_index; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const zcu = self.bin_file.comp.module.?; - const src_mcv = try self.resolveInst(operand); - const struct_ty = self.typeOf(operand); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const zcu = func.bin_file.comp.module.?; + const src_mcv = try func.resolveInst(operand); + const struct_ty = func.typeOf(operand); const field_ty = struct_ty.structFieldType(index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none; @@ -3707,20 +3701,20 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { switch (src_mcv) { .dead, .unreach => unreachable, .register => |src_reg| { - const src_reg_lock = self.register_manager.lockRegAssumeUnused(src_reg); - defer self.register_manager.unlockReg(src_reg_lock); + const src_reg_lock = func.register_manager.lockRegAssumeUnused(src_reg); + defer func.register_manager.unlockReg(src_reg_lock); const dst_reg = if (field_off == 0) - (try self.copyToNewRegister(inst, src_mcv)).register + (try func.copyToNewRegister(inst, src_mcv)).register else - try self.copyToTmpRegister(Type.usize, .{ .register = src_reg }); + try func.copyToTmpRegister(Type.usize, .{ .register = src_reg }); const dst_mcv: MCValue = .{ .register = dst_reg }; - const dst_lock = self.register_manager.lockReg(dst_reg); - defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); + const dst_lock = func.register_manager.lockReg(dst_reg); + defer if (dst_lock) |lock| func.register_manager.unlockReg(lock); if (field_off > 0) { - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .srli, .ops = .rri, .data = .{ .i_type = .{ @@ -3731,7 +3725,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { }); } - break :result if (field_off == 0) dst_mcv else try self.copyToNewRegister(inst, dst_mcv); + break :result if (field_off == 0) dst_mcv else try func.copyToNewRegister(inst, dst_mcv); }, .load_frame => { const field_abi_size: u32 = @intCast(field_ty.abiSize(mod)); @@ -3746,57 +3740,57 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { @intCast(field_bit_size), ); - const dst_reg, const dst_lock = try self.allocReg(.int); + const dst_reg, const dst_lock = try func.allocReg(.int); const dst_mcv = MCValue{ .register = dst_reg }; - defer self.register_manager.unlockReg(dst_lock); + defer func.register_manager.unlockReg(dst_lock); - try self.genCopy(int_ty, dst_mcv, off_mcv); - break :result try self.copyToNewRegister(inst, dst_mcv); + try func.genCopy(int_ty, dst_mcv, off_mcv); + break :result try func.copyToNewRegister(inst, dst_mcv); } const container_abi_size: u32 = @intCast(struct_ty.abiSize(mod)); const dst_mcv = if (field_byte_off + field_abi_size <= container_abi_size and - self.reuseOperand(inst, operand, 0, src_mcv)) + func.reuseOperand(inst, operand, 0, src_mcv)) off_mcv else dst: { - const dst_mcv = try self.allocRegOrMem(inst, true); - try self.genCopy(field_ty, dst_mcv, off_mcv); + const dst_mcv = try func.allocRegOrMem(inst, true); + try func.genCopy(field_ty, dst_mcv, off_mcv); break :dst dst_mcv; }; if (field_abi_size * 8 > field_bit_size and dst_mcv.isMemory()) { - const tmp_reg, const tmp_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(tmp_lock); + const tmp_reg, const tmp_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(tmp_lock); const hi_mcv = dst_mcv.address().offset(@intCast(field_bit_size / 64 * 8)).deref(); - try self.genSetReg(Type.usize, tmp_reg, hi_mcv); - try self.genCopy(Type.usize, hi_mcv, .{ .register = tmp_reg }); + try func.genSetReg(Type.usize, tmp_reg, hi_mcv); + try func.genCopy(Type.usize, hi_mcv, .{ .register = tmp_reg }); } break :result dst_mcv; } - return self.fail("TODO: airStructFieldVal load_frame field_off non multiple of 8", .{}); + return func.fail("TODO: airStructFieldVal load_frame field_off non multiple of 8", .{}); }, - else => return self.fail("TODO: airStructField {s}", .{@tagName(src_mcv)}), + else => return func.fail("TODO: airStructField {s}", .{@tagName(src_mcv)}), } }; - return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); + return func.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); } -fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airFieldParentPtr(func: *Func, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement codegen airFieldParentPtr", .{}); + return func.fail("TODO implement codegen airFieldParentPtr", .{}); } -fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void { - const zcu = self.bin_file.comp.module.?; - const arg = self.air.instructions.items(.data)[@intFromEnum(inst)].arg; +fn genArgDbgInfo(func: Func, inst: Air.Inst.Index, mcv: MCValue) !void { + const zcu = func.bin_file.comp.module.?; + const arg = func.air.instructions.items(.data)[@intFromEnum(inst)].arg; const ty = arg.ty.toType(); - const owner_decl = zcu.funcOwnerDeclIndex(self.func_index); - const name = zcu.getParamName(self.func_index, arg.src_index); + const owner_decl = zcu.funcOwnerDeclIndex(func.func_index); + const name = zcu.getParamName(func.func_index, arg.src_index); - switch (self.debug_output) { + switch (func.debug_output) { .dwarf => |dw| switch (mcv) { .register => |reg| try dw.genArgDbgInfo(name, ty, owner_decl, .{ .register = reg.dwarfLocOp(), @@ -3809,100 +3803,100 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void { } } -fn airArg(self: *Self, inst: Air.Inst.Index) !void { - var arg_index = self.arg_index; +fn airArg(func: *Func, inst: Air.Inst.Index) !void { + var arg_index = func.arg_index; // we skip over args that have no bits - while (self.args[arg_index] == .none) arg_index += 1; - self.arg_index = arg_index + 1; + while (func.args[arg_index] == .none) arg_index += 1; + func.arg_index = arg_index + 1; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const src_mcv = self.args[arg_index]; - const arg_ty = self.typeOfIndex(inst); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const src_mcv = func.args[arg_index]; + const arg_ty = func.typeOfIndex(inst); - const dst_mcv = try self.allocRegOrMem(inst, false); + const dst_mcv = try func.allocRegOrMem(inst, false); log.debug("airArg {} -> {}", .{ src_mcv, dst_mcv }); - try self.genCopy(arg_ty, dst_mcv, src_mcv); + try func.genCopy(arg_ty, dst_mcv, src_mcv); - try self.genArgDbgInfo(inst, src_mcv); + try func.genArgDbgInfo(inst, src_mcv); break :result dst_mcv; }; - return self.finishAir(inst, result, .{ .none, .none, .none }); + return func.finishAir(inst, result, .{ .none, .none, .none }); } -fn airTrap(self: *Self) !void { - _ = try self.addInst(.{ +fn airTrap(func: *Func) !void { + _ = try func.addInst(.{ .tag = .unimp, .ops = .none, .data = undefined, }); - return self.finishAirBookkeeping(); + return func.finishAirBookkeeping(); } -fn airBreakpoint(self: *Self) !void { - _ = try self.addInst(.{ +fn airBreakpoint(func: *Func) !void { + _ = try func.addInst(.{ .tag = .ebreak, .ops = .none, .data = undefined, }); - return self.finishAirBookkeeping(); + return func.finishAirBookkeeping(); } -fn airRetAddr(self: *Self, inst: Air.Inst.Index) !void { - const dst_mcv = try self.allocRegOrMem(inst, true); - try self.genCopy(Type.usize, dst_mcv, .{ .load_frame = .{ .index = .ret_addr } }); - return self.finishAir(inst, dst_mcv, .{ .none, .none, .none }); +fn airRetAddr(func: *Func, inst: Air.Inst.Index) !void { + const dst_mcv = try func.allocRegOrMem(inst, true); + try func.genCopy(Type.usize, dst_mcv, .{ .load_frame = .{ .index = .ret_addr } }); + return func.finishAir(inst, dst_mcv, .{ .none, .none, .none }); } -fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void { - const dst_mcv = try self.allocRegOrMem(inst, true); - try self.genCopy(Type.usize, dst_mcv, .{ .lea_frame = .{ .index = .base_ptr } }); - return self.finishAir(inst, dst_mcv, .{ .none, .none, .none }); +fn airFrameAddress(func: *Func, inst: Air.Inst.Index) !void { + const dst_mcv = try func.allocRegOrMem(inst, true); + try func.genCopy(Type.usize, dst_mcv, .{ .lea_frame = .{ .index = .base_ptr } }); + return func.finishAir(inst, dst_mcv, .{ .none, .none, .none }); } -fn airFence(self: *Self) !void { - return self.fail("TODO implement fence() for {}", .{self.target.cpu.arch}); - //return self.finishAirBookkeeping(); +fn airFence(func: *Func) !void { + return func.fail("TODO implement fence() for {}", .{func.target.cpu.arch}); + //return func.finishAirBookkeeping(); } -fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { - if (modifier == .always_tail) return self.fail("TODO implement tail calls for riscv64", .{}); - const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; +fn airCall(func: *Func, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { + if (modifier == .always_tail) return func.fail("TODO implement tail calls for riscv64", .{}); + const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const callee = pl_op.operand; - const extra = self.air.extraData(Air.Call, pl_op.payload); - const arg_refs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); + const extra = func.air.extraData(Air.Call, pl_op.payload); + const arg_refs: []const Air.Inst.Ref = @ptrCast(func.air.extra[extra.end..][0..extra.data.args_len]); const expected_num_args = 8; const ExpectedContents = extern struct { vals: [expected_num_args][@sizeOf(MCValue)]u8 align(@alignOf(MCValue)), }; var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + std.heap.stackFallback(@sizeOf(ExpectedContents), func.gpa); const allocator = stack.get(); const arg_tys = try allocator.alloc(Type, arg_refs.len); defer allocator.free(arg_tys); - for (arg_tys, arg_refs) |*arg_ty, arg_ref| arg_ty.* = self.typeOf(arg_ref); + for (arg_tys, arg_refs) |*arg_ty, arg_ref| arg_ty.* = func.typeOf(arg_ref); const arg_vals = try allocator.alloc(MCValue, arg_refs.len); defer allocator.free(arg_vals); for (arg_vals, arg_refs) |*arg_val, arg_ref| arg_val.* = .{ .air_ref = arg_ref }; - const call_ret = try self.genCall(.{ .air = callee }, arg_tys, arg_vals); + const call_ret = try func.genCall(.{ .air = callee }, arg_tys, arg_vals); - var bt = self.liveness.iterateBigTomb(inst); - try self.feed(&bt, pl_op.operand); - for (arg_refs) |arg_ref| try self.feed(&bt, arg_ref); + var bt = func.liveness.iterateBigTomb(inst); + try func.feed(&bt, pl_op.operand); + for (arg_refs) |arg_ref| try func.feed(&bt, arg_ref); - const result = if (self.liveness.isUnused(inst)) .unreach else call_ret; - return self.finishAirResult(inst, result); + const result = if (func.liveness.isUnused(inst)) .unreach else call_ret; + return func.finishAirResult(inst, result); } fn genCall( - self: *Self, + func: *Func, info: union(enum) { air: Air.Inst.Ref, lib: struct { @@ -3915,11 +3909,11 @@ fn genCall( arg_tys: []const Type, args: []const MCValue, ) !MCValue { - const zcu = self.bin_file.comp.module.?; + const zcu = func.bin_file.comp.module.?; const fn_ty = switch (info) { .air => |callee| fn_info: { - const callee_ty = self.typeOf(callee); + const callee_ty = func.typeOf(callee); break :fn_info switch (callee_ty.zigTypeTag(zcu)) { .Fn => callee_ty, .Pointer => callee_ty.childType(zcu), @@ -3935,14 +3929,14 @@ fn genCall( const fn_info = zcu.typeToFunc(fn_ty).?; - const allocator = self.gpa; + const allocator = func.gpa; const var_args = try allocator.alloc(Type, args.len - fn_info.param_types.len); defer allocator.free(var_args); for (var_args, arg_tys[fn_info.param_types.len..]) |*var_arg, arg_ty| var_arg.* = arg_ty; - var call_info = try self.resolveCallingConventionValues(fn_info, var_args); - defer call_info.deinit(self); + var call_info = try func.resolveCallingConventionValues(fn_info, var_args); + defer call_info.deinit(func); // We need a properly aligned and sized call frame to be able to call this function. { @@ -3950,7 +3944,7 @@ fn genCall( .size = call_info.stack_byte_count, .alignment = call_info.stack_align, }); - const frame_allocs_slice = self.frame_allocs.slice(); + const frame_allocs_slice = func.frame_allocs.slice(); const stack_frame_size = &frame_allocs_slice.items(.abi_size)[@intFromEnum(FrameIndex.call_frame)]; stack_frame_size.* = @max(stack_frame_size.*, needed_call_frame.abi_size); @@ -3962,34 +3956,34 @@ fn genCall( var reg_locks = std.ArrayList(?RegisterLock).init(allocator); defer reg_locks.deinit(); try reg_locks.ensureTotalCapacity(8); - defer for (reg_locks.items) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock); + defer for (reg_locks.items) |reg_lock| if (reg_lock) |lock| func.register_manager.unlockReg(lock); const frame_indices = try allocator.alloc(FrameIndex, args.len); defer allocator.free(frame_indices); switch (call_info.return_value.long) { .none, .unreach => {}, - .indirect => |reg_off| try self.register_manager.getReg(reg_off.reg, null), + .indirect => |reg_off| try func.register_manager.getReg(reg_off.reg, null), else => unreachable, } for (call_info.args, args, arg_tys, frame_indices) |dst_arg, src_arg, arg_ty, *frame_index| { switch (dst_arg) { .none => {}, .register => |reg| { - try self.register_manager.getReg(reg, null); - try reg_locks.append(self.register_manager.lockReg(reg)); + try func.register_manager.getReg(reg, null); + try reg_locks.append(func.register_manager.lockReg(reg)); }, .register_pair => |regs| { - for (regs) |reg| try self.register_manager.getReg(reg, null); - try reg_locks.appendSlice(&self.register_manager.lockRegs(2, regs)); + for (regs) |reg| try func.register_manager.getReg(reg, null); + try reg_locks.appendSlice(&func.register_manager.lockRegs(2, regs)); }, .indirect => |reg_off| { - frame_index.* = try self.allocFrameIndex(FrameAlloc.initType(arg_ty, zcu)); - try self.genSetMem(.{ .frame = frame_index.* }, 0, arg_ty, src_arg); - try self.register_manager.getReg(reg_off.reg, null); - try reg_locks.append(self.register_manager.lockReg(reg_off.reg)); + frame_index.* = try func.allocFrameIndex(FrameAlloc.initType(arg_ty, zcu)); + try func.genSetMem(.{ .frame = frame_index.* }, 0, arg_ty, src_arg); + try func.register_manager.getReg(reg_off.reg, null); + try reg_locks.append(func.register_manager.lockReg(reg_off.reg)); }, - else => return self.fail("TODO: genCall set arg {s}", .{@tagName(dst_arg)}), + else => return func.fail("TODO: genCall set arg {s}", .{@tagName(dst_arg)}), } } @@ -3997,12 +3991,12 @@ fn genCall( .none, .unreach => {}, .indirect => |reg_off| { const ret_ty = Type.fromInterned(fn_info.return_type); - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ret_ty, zcu)); - try self.genSetReg(Type.usize, reg_off.reg, .{ + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(ret_ty, zcu)); + try func.genSetReg(Type.usize, reg_off.reg, .{ .lea_frame = .{ .index = frame_index, .off = -reg_off.off }, }); call_info.return_value.short = .{ .load_frame = .{ .index = frame_index } }; - try reg_locks.append(self.register_manager.lockReg(reg_off.reg)); + try reg_locks.append(func.register_manager.lockReg(reg_off.reg)); }, else => unreachable, } @@ -4010,16 +4004,16 @@ fn genCall( for (call_info.args, arg_tys, args, frame_indices) |dst_arg, arg_ty, src_arg, frame_index| { switch (dst_arg) { .none, .load_frame => {}, - .register_pair => try self.genCopy(arg_ty, dst_arg, src_arg), - .register => |dst_reg| try self.genSetReg( + .register_pair => try func.genCopy(arg_ty, dst_arg, src_arg), + .register => |dst_reg| try func.genSetReg( arg_ty, dst_reg, src_arg, ), - .indirect => |reg_off| try self.genSetReg(Type.usize, reg_off.reg, .{ + .indirect => |reg_off| try func.genSetReg(Type.usize, reg_off.reg, .{ .lea_frame = .{ .index = frame_index, .off = -reg_off.off }, }), - else => return self.fail("TODO: genCall actual set {s}", .{@tagName(dst_arg)}), + else => return func.fail("TODO: genCall actual set {s}", .{@tagName(dst_arg)}), } } @@ -4027,7 +4021,7 @@ fn genCall( // on linking. switch (info) { .air => |callee| { - if (try self.air.value(callee, zcu)) |func_value| { + if (try func.air.value(callee, zcu)) |func_value| { const func_key = zcu.intern_pool.indexToKey(func_value.ip_index); switch (switch (func_key) { else => func_key, @@ -4036,19 +4030,19 @@ fn genCall( else => func_key, } else func_key, }) { - .func => |func| { - if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func.owner_decl); + .func => |func_val| { + if (func.bin_file.cast(link.File.Elf)) |elf_file| { + const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func_val.owner_decl); const sym = elf_file.symbol(sym_index); - if (self.mod.pic) { - return self.fail("TODO: genCall pic", .{}); + if (func.mod.pic) { + return func.fail("TODO: genCall pic", .{}); } else { _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); const got_addr = sym.zigGotAddress(elf_file); - try self.genSetReg(Type.usize, .ra, .{ .memory = @intCast(got_addr) }); + try func.genSetReg(Type.usize, .ra, .{ .memory = @intCast(got_addr) }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .jalr, .ops = .rri, .data = .{ .i_type = .{ @@ -4064,10 +4058,10 @@ fn genCall( const owner_decl = zcu.declPtr(extern_func.decl); const lib_name = extern_func.lib_name.toSlice(&zcu.intern_pool); const decl_name = owner_decl.name.toSlice(&zcu.intern_pool); - const atom_index = try self.symbolIndex(); + const atom_index = try func.symbolIndex(); - if (self.bin_file.cast(link.File.Elf)) |elf_file| { - _ = try self.addInst(.{ + if (func.bin_file.cast(link.File.Elf)) |elf_file| { + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_extern_fn_reloc, .data = .{ .reloc = .{ @@ -4077,15 +4071,15 @@ fn genCall( }); } else unreachable; // not a valid riscv64 format }, - else => return self.fail("TODO implement calling bitcasted functions", .{}), + else => return func.fail("TODO implement calling bitcasted functions", .{}), } } else { - assert(self.typeOf(callee).zigTypeTag(zcu) == .Pointer); - const addr_reg, const addr_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(addr_lock); - try self.genSetReg(Type.usize, addr_reg, .{ .air_ref = callee }); + assert(func.typeOf(callee).zigTypeTag(zcu) == .Pointer); + const addr_reg, const addr_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(addr_lock); + try func.genSetReg(Type.usize, addr_reg, .{ .air_ref = callee }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .jalr, .ops = .rri, .data = .{ .i_type = .{ @@ -4096,15 +4090,15 @@ fn genCall( }); } }, - .lib => return self.fail("TODO: lib func calls", .{}), + .lib => return func.fail("TODO: lib func calls", .{}), } return call_info.return_value.short; } -fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void { - const zcu = self.bin_file.comp.module.?; - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; +fn airRet(func: *Func, inst: Air.Inst.Index, safety: bool) !void { + const zcu = func.bin_file.comp.module.?; + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; if (safety) { // safe @@ -4112,19 +4106,19 @@ fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void { // not safe } - const ret_ty = self.fn_type.fnReturnType(zcu); - switch (self.ret_mcv.short) { + const ret_ty = func.fn_type.fnReturnType(zcu); + switch (func.ret_mcv.short) { .none => {}, .register, .register_pair, - => try self.genCopy(ret_ty, self.ret_mcv.short, .{ .air_ref = un_op }), + => try func.genCopy(ret_ty, func.ret_mcv.short, .{ .air_ref = un_op }), .indirect => |reg_off| { - try self.register_manager.getReg(reg_off.reg, null); - const lock = self.register_manager.lockRegAssumeUnused(reg_off.reg); - defer self.register_manager.unlockReg(lock); + try func.register_manager.getReg(reg_off.reg, null); + const lock = func.register_manager.lockRegAssumeUnused(reg_off.reg); + defer func.register_manager.unlockReg(lock); - try self.genSetReg(Type.usize, reg_off.reg, self.ret_mcv.long); - try self.genSetMem( + try func.genSetReg(Type.usize, reg_off.reg, func.ret_mcv.long); + try func.genSetMem( .{ .reg = reg_off.reg }, reg_off.off, ret_ty, @@ -4134,52 +4128,52 @@ fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void { else => unreachable, } - self.ret_mcv.liveOut(self, inst); - try self.finishAir(inst, .unreach, .{ un_op, .none, .none }); + func.ret_mcv.liveOut(func, inst); + try func.finishAir(inst, .unreach, .{ un_op, .none, .none }); // Just add space for an instruction, reloced this later - const index = try self.addInst(.{ + const index = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_j, .data = .{ .inst = undefined }, }); - try self.exitlude_jump_relocs.append(self.gpa, index); + try func.exitlude_jump_relocs.append(func.gpa, index); } -fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const ptr = try self.resolveInst(un_op); +fn airRetLoad(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const ptr = try func.resolveInst(un_op); - const ptr_ty = self.typeOf(un_op); - switch (self.ret_mcv.short) { + const ptr_ty = func.typeOf(un_op); + switch (func.ret_mcv.short) { .none => {}, - .register, .register_pair => try self.load(self.ret_mcv.short, ptr, ptr_ty), - .indirect => |reg_off| try self.genSetReg(ptr_ty, reg_off.reg, ptr), + .register, .register_pair => try func.load(func.ret_mcv.short, ptr, ptr_ty), + .indirect => |reg_off| try func.genSetReg(ptr_ty, reg_off.reg, ptr), else => unreachable, } - self.ret_mcv.liveOut(self, inst); - try self.finishAir(inst, .unreach, .{ un_op, .none, .none }); + func.ret_mcv.liveOut(func, inst); + try func.finishAir(inst, .unreach, .{ un_op, .none, .none }); // Just add space for an instruction, reloced this later - const index = try self.addInst(.{ + const index = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_j, .data = .{ .inst = undefined }, }); - try self.exitlude_jump_relocs.append(self.gpa, index); + try func.exitlude_jump_relocs.append(func.gpa, index); } -fn airCmp(self: *Self, inst: Air.Inst.Index) !void { - const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const zcu = self.bin_file.comp.module.?; +fn airCmp(func: *Func, inst: Air.Inst.Index) !void { + const tag = func.air.instructions.items(.tag)[@intFromEnum(inst)]; + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const zcu = func.bin_file.comp.module.?; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const lhs = try func.resolveInst(bin_op.lhs); + const rhs = try func.resolveInst(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); switch (lhs_ty.zigTypeTag(zcu)) { .Int, @@ -4202,7 +4196,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index) !void { } else if (lhs_ty.isPtrLikeOptional(zcu)) { break :blk Type.usize; } else { - return self.fail("TODO riscv cmp non-pointer optionals", .{}); + return func.fail("TODO riscv cmp non-pointer optionals", .{}); } }, else => unreachable, @@ -4210,43 +4204,43 @@ fn airCmp(self: *Self, inst: Air.Inst.Index) !void { const int_info = int_ty.intInfo(zcu); if (int_info.bits <= 64) { - break :result try self.binOp(tag, lhs, int_ty, rhs, int_ty); + break :result try func.binOp(tag, lhs, int_ty, rhs, int_ty); } else { - return self.fail("TODO riscv cmp for ints > 64 bits", .{}); + return func.fail("TODO riscv cmp for ints > 64 bits", .{}); } }, .Float => { - const float_bits = lhs_ty.floatBits(self.target.*); - const float_reg_size: u32 = if (self.hasFeature(.d)) 64 else 32; + const float_bits = lhs_ty.floatBits(func.target.*); + const float_reg_size: u32 = if (func.hasFeature(.d)) 64 else 32; if (float_bits > float_reg_size) { - return self.fail("TODO: airCmp float > 64/32 bits", .{}); + return func.fail("TODO: airCmp float > 64/32 bits", .{}); } - break :result try self.binOpFloat(tag, lhs, lhs_ty, rhs, lhs_ty); + break :result try func.binOpFloat(tag, lhs, lhs_ty, rhs, lhs_ty); }, else => unreachable, } }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void { +fn airCmpVector(func: *Func, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airCmpVector for {}", .{self.target.cpu.arch}); + return func.fail("TODO implement airCmpVector for {}", .{func.target.cpu.arch}); } -fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const operand = try self.resolveInst(un_op); +fn airCmpLtErrorsLen(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try func.resolveInst(un_op); _ = operand; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airCmpLtErrorsLen for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ un_op, .none, .none }); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airCmpLtErrorsLen for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { - const dbg_stmt = self.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt; +fn airDbgStmt(func: *Func, inst: Air.Inst.Index) !void { + const dbg_stmt = func.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt; - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_dbg_line_column, .data = .{ .pseudo_dbg_line_column = .{ @@ -4255,44 +4249,44 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } }, }); - return self.finishAirBookkeeping(); + return func.finishAirBookkeeping(); } -fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload); - try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len])); +fn airDbgInlineBlock(func: *Func, inst: Air.Inst.Index) !void { + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.DbgInlineBlock, ty_pl.payload); + try func.lowerBlock(inst, @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len])); } -fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { - const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; +fn airDbgVar(func: *Func, inst: Air.Inst.Index) !void { + const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const operand = pl_op.operand; - const ty = self.typeOf(operand); - const mcv = try self.resolveInst(operand); + const ty = func.typeOf(operand); + const mcv = try func.resolveInst(operand); - const name = self.air.nullTerminatedString(pl_op.payload); + const name = func.air.nullTerminatedString(pl_op.payload); - const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; - try self.genVarDbgInfo(tag, ty, mcv, name); + const tag = func.air.instructions.items(.tag)[@intFromEnum(inst)]; + try func.genVarDbgInfo(tag, ty, mcv, name); - return self.finishAir(inst, .unreach, .{ operand, .none, .none }); + return func.finishAir(inst, .unreach, .{ operand, .none, .none }); } fn genVarDbgInfo( - self: Self, + func: Func, tag: Air.Inst.Tag, ty: Type, mcv: MCValue, name: [:0]const u8, ) !void { - const zcu = self.bin_file.comp.module.?; + const zcu = func.bin_file.comp.module.?; const is_ptr = switch (tag) { .dbg_var_ptr => true, .dbg_var_val => false, else => unreachable, }; - switch (self.debug_output) { + switch (func.debug_output) { .dwarf => |dw| { const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (mcv) { .register => |reg| .{ .register = reg.dwarfLocOp() }, @@ -4309,47 +4303,47 @@ fn genVarDbgInfo( break :blk .nop; }, }; - try dw.genVarDbgInfo(name, ty, zcu.funcOwnerDeclIndex(self.func_index), is_ptr, loc); + try dw.genVarDbgInfo(name, ty, zcu.funcOwnerDeclIndex(func.func_index), is_ptr, loc); }, .plan9 => {}, .none => {}, } } -fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { - const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const cond = try self.resolveInst(pl_op.operand); - const cond_ty = self.typeOf(pl_op.operand); - const extra = self.air.extraData(Air.CondBr, pl_op.payload); - const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.then_body_len]); - const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]); - const liveness_cond_br = self.liveness.getCondBr(inst); +fn airCondBr(func: *Func, inst: Air.Inst.Index) !void { + const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const cond = try func.resolveInst(pl_op.operand); + const cond_ty = func.typeOf(pl_op.operand); + const extra = func.air.extraData(Air.CondBr, pl_op.payload); + const then_body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end..][0..extra.data.then_body_len]); + const else_body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]); + const liveness_cond_br = func.liveness.getCondBr(inst); // If the condition dies here in this condbr instruction, process // that death now instead of later as this has an effect on // whether it needs to be spilled in the branches - if (self.liveness.operandDies(inst, 0)) { - if (pl_op.operand.toIndex()) |op_inst| try self.processDeath(op_inst); + if (func.liveness.operandDies(inst, 0)) { + if (pl_op.operand.toIndex()) |op_inst| try func.processDeath(op_inst); } - self.scope_generation += 1; - const state = try self.saveState(); - const reloc = try self.condBr(cond_ty, cond); + func.scope_generation += 1; + const state = try func.saveState(); + const reloc = try func.condBr(cond_ty, cond); - for (liveness_cond_br.then_deaths) |death| try self.processDeath(death); - try self.genBody(then_body); - try self.restoreState(state, &.{}, .{ + for (liveness_cond_br.then_deaths) |death| try func.processDeath(death); + try func.genBody(then_body); + try func.restoreState(state, &.{}, .{ .emit_instructions = false, .update_tracking = true, .resurrect = true, .close_scope = true, }); - self.performReloc(reloc); + func.performReloc(reloc); - for (liveness_cond_br.else_deaths) |death| try self.processDeath(death); - try self.genBody(else_body); - try self.restoreState(state, &.{}, .{ + for (liveness_cond_br.else_deaths) |death| try func.processDeath(death); + try func.genBody(else_body); + try func.restoreState(state, &.{}, .{ .emit_instructions = false, .update_tracking = true, .resurrect = true, @@ -4357,13 +4351,13 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { }); // We already took care of pl_op.operand earlier, so there's nothing left to do. - self.finishAirBookkeeping(); + func.finishAirBookkeeping(); } -fn condBr(self: *Self, cond_ty: Type, condition: MCValue) !Mir.Inst.Index { - const cond_reg = try self.copyToTmpRegister(cond_ty, condition); +fn condBr(func: *Func, cond_ty: Type, condition: MCValue) !Mir.Inst.Index { + const cond_reg = try func.copyToTmpRegister(cond_ty, condition); - return try self.addInst(.{ + return try func.addInst(.{ .tag = .beq, .ops = .rr_inst, .data = .{ @@ -4376,111 +4370,111 @@ fn condBr(self: *Self, cond_ty: Type, condition: MCValue) !Mir.Inst.Index { }); } -fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand = try self.resolveInst(un_op); - break :result try self.isNull(operand); +fn airIsNull(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const operand = try func.resolveInst(un_op); + break :result try func.isNull(operand); }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand_ptr = try self.resolveInst(un_op); +fn airIsNullPtr(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const operand_ptr = try func.resolveInst(un_op); const operand: MCValue = blk: { - if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + if (func.reuseOperand(inst, un_op, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk operand_ptr; } else { - break :blk try self.allocRegOrMem(inst, true); + break :blk try func.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.typeOf(un_op)); - break :result try self.isNull(operand); + try func.load(operand, operand_ptr, func.typeOf(un_op)); + break :result try func.isNull(operand); }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } -fn isNull(self: *Self, operand: MCValue) !MCValue { +fn isNull(func: *Func, operand: MCValue) !MCValue { _ = operand; // Here you can specialize this instruction if it makes sense to, otherwise the default // will call isNonNull and invert the result. - return self.fail("TODO call isNonNull and invert the result", .{}); + return func.fail("TODO call isNonNull and invert the result", .{}); } -fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand = try self.resolveInst(un_op); - break :result try self.isNonNull(operand); +fn airIsNonNull(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const operand = try func.resolveInst(un_op); + break :result try func.isNonNull(operand); }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } -fn isNonNull(self: *Self, operand: MCValue) !MCValue { +fn isNonNull(func: *Func, operand: MCValue) !MCValue { _ = operand; // Here you can specialize this instruction if it makes sense to, otherwise the default // will call isNull and invert the result. - return self.fail("TODO call isNull and invert the result", .{}); + return func.fail("TODO call isNull and invert the result", .{}); } -fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand_ptr = try self.resolveInst(un_op); +fn airIsNonNullPtr(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const operand_ptr = try func.resolveInst(un_op); const operand: MCValue = blk: { - if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + if (func.reuseOperand(inst, un_op, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk operand_ptr; } else { - break :blk try self.allocRegOrMem(inst, true); + break :blk try func.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.typeOf(un_op)); - break :result try self.isNonNull(operand); + try func.load(operand, operand_ptr, func.typeOf(un_op)); + break :result try func.isNonNull(operand); }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand = try self.resolveInst(un_op); - const operand_ty = self.typeOf(un_op); - break :result try self.isErr(inst, operand_ty, operand); +fn airIsErr(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const operand = try func.resolveInst(un_op); + const operand_ty = func.typeOf(un_op); + break :result try func.isErr(inst, operand_ty, operand); }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand_ptr = try self.resolveInst(un_op); +fn airIsErrPtr(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const operand_ptr = try func.resolveInst(un_op); const operand: MCValue = blk: { - if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + if (func.reuseOperand(inst, un_op, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk operand_ptr; } else { - break :blk try self.allocRegOrMem(inst, true); + break :blk try func.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.typeOf(un_op)); - const operand_ptr_ty = self.typeOf(un_op); + try func.load(operand, operand_ptr, func.typeOf(un_op)); + const operand_ptr_ty = func.typeOf(un_op); const operand_ty = operand_ptr_ty.childType(zcu); - break :result try self.isErr(inst, operand_ty, operand); + break :result try func.isErr(inst, operand_ty, operand); }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } /// Generates a compare instruction which will indicate if `eu_mcv` is an error. /// /// Result is in the return register. -fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue { - const zcu = self.bin_file.comp.module.?; +fn isErr(func: *Func, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue { + const zcu = func.bin_file.comp.module.?; const err_ty = eu_ty.errorUnionSet(zcu); if (err_ty.errorSetIsEmpty(zcu)) return MCValue{ .immediate = 0 }; // always false @@ -4490,17 +4484,17 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) switch (eu_mcv) { .register => |reg| { - const eu_lock = self.register_manager.lockReg(reg); - defer if (eu_lock) |lock| self.register_manager.unlockReg(lock); + const eu_lock = func.register_manager.lockReg(reg); + defer if (eu_lock) |lock| func.register_manager.unlockReg(lock); - const return_reg = try self.copyToTmpRegister(eu_ty, eu_mcv); - const return_lock = self.register_manager.lockRegAssumeUnused(return_reg); - defer self.register_manager.unlockReg(return_lock); + const return_reg = try func.copyToTmpRegister(eu_ty, eu_mcv); + const return_lock = func.register_manager.lockRegAssumeUnused(return_reg); + defer func.register_manager.unlockReg(return_lock); var return_mcv: MCValue = .{ .register = return_reg }; if (err_off > 0) { - return_mcv = try self.binOp( + return_mcv = try func.binOp( .shr, return_mcv, eu_ty, @@ -4509,7 +4503,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) ); } - return try self.binOp( + return try func.binOp( .cmp_neq, return_mcv, Type.u16, @@ -4518,7 +4512,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) ); }, .load_frame => |frame_addr| { - return self.binOp( + return func.binOp( .cmp_neq, .{ .load_frame = .{ .index = frame_addr.index, @@ -4529,25 +4523,25 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) Type.anyerror, ); }, - else => return self.fail("TODO implement isErr for {}", .{eu_mcv}), + else => return func.fail("TODO implement isErr for {}", .{eu_mcv}), } } -fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand = try self.resolveInst(un_op); - const ty = self.typeOf(un_op); - break :result try self.isNonErr(inst, ty, operand); +fn airIsNonErr(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const operand = try func.resolveInst(un_op); + const ty = func.typeOf(un_op); + break :result try func.isNonErr(inst, ty, operand); }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } -fn isNonErr(self: *Self, inst: Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue { - const is_err_res = try self.isErr(inst, eu_ty, eu_mcv); +fn isNonErr(func: *Func, inst: Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue { + const is_err_res = try func.isErr(inst, eu_ty, eu_mcv); switch (is_err_res) { .register => |reg| { - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_not, .data = .{ @@ -4568,53 +4562,53 @@ fn isNonErr(self: *Self, inst: Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MC } } -fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand_ptr = try self.resolveInst(un_op); +fn airIsNonErrPtr(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const operand_ptr = try func.resolveInst(un_op); const operand: MCValue = blk: { - if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + if (func.reuseOperand(inst, un_op, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk operand_ptr; } else { - break :blk try self.allocRegOrMem(inst, true); + break :blk try func.allocRegOrMem(inst, true); } }; - const operand_ptr_ty = self.typeOf(un_op); + const operand_ptr_ty = func.typeOf(un_op); const operand_ty = operand_ptr_ty.childType(zcu); - try self.load(operand, operand_ptr, self.typeOf(un_op)); - break :result try self.isNonErr(inst, operand_ty, operand); + try func.load(operand, operand_ptr, func.typeOf(un_op)); + break :result try func.isNonErr(inst, operand_ty, operand); }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airLoop(self: *Self, inst: Air.Inst.Index) !void { +fn airLoop(func: *Func, inst: Air.Inst.Index) !void { // A loop is a setup to be able to jump back to the beginning. - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const loop = self.air.extraData(Air.Block, ty_pl.payload); - const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]); + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const loop = func.air.extraData(Air.Block, ty_pl.payload); + const body: []const Air.Inst.Index = @ptrCast(func.air.extra[loop.end..][0..loop.data.body_len]); - self.scope_generation += 1; - const state = try self.saveState(); + func.scope_generation += 1; + const state = try func.saveState(); - const jmp_target: Mir.Inst.Index = @intCast(self.mir_instructions.len); - try self.genBody(body); - try self.restoreState(state, &.{}, .{ + const jmp_target: Mir.Inst.Index = @intCast(func.mir_instructions.len); + try func.genBody(body); + try func.restoreState(state, &.{}, .{ .emit_instructions = true, .update_tracking = false, .resurrect = false, .close_scope = true, }); - _ = try self.jump(jmp_target); + _ = try func.jump(jmp_target); - self.finishAirBookkeeping(); + func.finishAirBookkeeping(); } -/// Send control flow to the `index` of `self.code`. -fn jump(self: *Self, index: Mir.Inst.Index) !Mir.Inst.Index { - return self.addInst(.{ +/// Send control flow to the `index` of `func.code`. +fn jump(func: *Func, index: Mir.Inst.Index) !Mir.Inst.Index { + return func.addInst(.{ .tag = .pseudo, .ops = .pseudo_j, .data = .{ @@ -4623,79 +4617,79 @@ fn jump(self: *Self, index: Mir.Inst.Index) !Mir.Inst.Index { }); } -fn airBlock(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Block, ty_pl.payload); - try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len])); +fn airBlock(func: *Func, inst: Air.Inst.Index) !void { + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.Block, ty_pl.payload); + try func.lowerBlock(inst, @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len])); } -fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) !void { +fn lowerBlock(func: *Func, inst: Air.Inst.Index, body: []const Air.Inst.Index) !void { // A block is a setup to be able to jump to the end. - const inst_tracking_i = self.inst_tracking.count(); - self.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(.unreach)); + const inst_tracking_i = func.inst_tracking.count(); + func.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(.unreach)); - self.scope_generation += 1; - try self.blocks.putNoClobber(self.gpa, inst, .{ .state = self.initRetroactiveState() }); - const liveness = self.liveness.getBlock(inst); + func.scope_generation += 1; + try func.blocks.putNoClobber(func.gpa, inst, .{ .state = func.initRetroactiveState() }); + const liveness = func.liveness.getBlock(inst); // TODO emit debug info lexical block - try self.genBody(body); + try func.genBody(body); - var block_data = self.blocks.fetchRemove(inst).?; - defer block_data.value.deinit(self.gpa); + var block_data = func.blocks.fetchRemove(inst).?; + defer block_data.value.deinit(func.gpa); if (block_data.value.relocs.items.len > 0) { - try self.restoreState(block_data.value.state, liveness.deaths, .{ + try func.restoreState(block_data.value.state, liveness.deaths, .{ .emit_instructions = false, .update_tracking = true, .resurrect = true, .close_scope = true, }); - for (block_data.value.relocs.items) |reloc| self.performReloc(reloc); + for (block_data.value.relocs.items) |reloc| func.performReloc(reloc); } - if (std.debug.runtime_safety) assert(self.inst_tracking.getIndex(inst).? == inst_tracking_i); - const tracking = &self.inst_tracking.values()[inst_tracking_i]; - if (self.liveness.isUnused(inst)) try tracking.die(self, inst); - self.getValueIfFree(tracking.short, inst); - self.finishAirBookkeeping(); + if (std.debug.runtime_safety) assert(func.inst_tracking.getIndex(inst).? == inst_tracking_i); + const tracking = &func.inst_tracking.values()[inst_tracking_i]; + if (func.liveness.isUnused(inst)) try tracking.die(func, inst); + func.getValueIfFree(tracking.short, inst); + func.finishAirBookkeeping(); } -fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { - const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const condition = try self.resolveInst(pl_op.operand); - const condition_ty = self.typeOf(pl_op.operand); - const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); +fn airSwitchBr(func: *Func, inst: Air.Inst.Index) !void { + const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const condition = try func.resolveInst(pl_op.operand); + const condition_ty = func.typeOf(pl_op.operand); + const switch_br = func.air.extraData(Air.SwitchBr, pl_op.payload); var extra_index: usize = switch_br.end; var case_i: u32 = 0; - const liveness = try self.liveness.getSwitchBr(self.gpa, inst, switch_br.data.cases_len + 1); - defer self.gpa.free(liveness.deaths); + const liveness = try func.liveness.getSwitchBr(func.gpa, inst, switch_br.data.cases_len + 1); + defer func.gpa.free(liveness.deaths); // If the condition dies here in this switch instruction, process // that death now instead of later as this has an effect on // whether it needs to be spilled in the branches - if (self.liveness.operandDies(inst, 0)) { - if (pl_op.operand.toIndex()) |op_inst| try self.processDeath(op_inst); + if (func.liveness.operandDies(inst, 0)) { + if (pl_op.operand.toIndex()) |op_inst| try func.processDeath(op_inst); } - self.scope_generation += 1; - const state = try self.saveState(); + func.scope_generation += 1; + const state = try func.saveState(); while (case_i < switch_br.data.cases_len) : (case_i += 1) { - const case = self.air.extraData(Air.SwitchBr.Case, extra_index); + const case = func.air.extraData(Air.SwitchBr.Case, extra_index); const items: []const Air.Inst.Ref = - @ptrCast(self.air.extra[case.end..][0..case.data.items_len]); + @ptrCast(func.air.extra[case.end..][0..case.data.items_len]); const case_body: []const Air.Inst.Index = - @ptrCast(self.air.extra[case.end + items.len ..][0..case.data.body_len]); + @ptrCast(func.air.extra[case.end + items.len ..][0..case.data.body_len]); extra_index = case.end + items.len + case_body.len; - var relocs = try self.gpa.alloc(Mir.Inst.Index, items.len); - defer self.gpa.free(relocs); + var relocs = try func.gpa.alloc(Mir.Inst.Index, items.len); + defer func.gpa.free(relocs); for (items, relocs, 0..) |item, *reloc, i| { // switch branches must be comptime-known, so this is stored in an immediate - const item_mcv = try self.resolveInst(item); + const item_mcv = try func.resolveInst(item); - const cmp_mcv: MCValue = try self.binOp( + const cmp_mcv: MCValue = try func.binOp( .cmp_neq, condition, condition_ty, @@ -4703,10 +4697,10 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { condition_ty, ); - const cmp_reg = try self.copyToTmpRegister(Type.bool, cmp_mcv); + const cmp_reg = try func.copyToTmpRegister(Type.bool, cmp_mcv); if (!(i < relocs.len - 1)) { - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_not, .data = .{ .rr = .{ @@ -4716,32 +4710,32 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { }); } - reloc.* = try self.condBr(condition_ty, .{ .register = cmp_reg }); + reloc.* = try func.condBr(condition_ty, .{ .register = cmp_reg }); } - for (liveness.deaths[case_i]) |operand| try self.processDeath(operand); + for (liveness.deaths[case_i]) |operand| try func.processDeath(operand); - for (relocs[0 .. relocs.len - 1]) |reloc| self.performReloc(reloc); - try self.genBody(case_body); - try self.restoreState(state, &.{}, .{ + for (relocs[0 .. relocs.len - 1]) |reloc| func.performReloc(reloc); + try func.genBody(case_body); + try func.restoreState(state, &.{}, .{ .emit_instructions = false, .update_tracking = true, .resurrect = true, .close_scope = true, }); - self.performReloc(relocs[relocs.len - 1]); + func.performReloc(relocs[relocs.len - 1]); } if (switch_br.data.else_body_len > 0) { const else_body: []const Air.Inst.Index = - @ptrCast(self.air.extra[extra_index..][0..switch_br.data.else_body_len]); + @ptrCast(func.air.extra[extra_index..][0..switch_br.data.else_body_len]); const else_deaths = liveness.deaths.len - 1; - for (liveness.deaths[else_deaths]) |operand| try self.processDeath(operand); + for (liveness.deaths[else_deaths]) |operand| try func.processDeath(operand); - try self.genBody(else_body); - try self.restoreState(state, &.{}, .{ + try func.genBody(else_body); + try func.restoreState(state, &.{}, .{ .emit_instructions = false, .update_tracking = true, .resurrect = true, @@ -4750,71 +4744,71 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { } // We already took care of pl_op.operand earlier, so there's nothing left to do - self.finishAirBookkeeping(); + func.finishAirBookkeeping(); } -fn performReloc(self: *Self, inst: Mir.Inst.Index) void { - const tag = self.mir_instructions.items(.tag)[inst]; - const ops = self.mir_instructions.items(.ops)[inst]; - const target: Mir.Inst.Index = @intCast(self.mir_instructions.len); +fn performReloc(func: *Func, inst: Mir.Inst.Index) void { + const tag = func.mir_instructions.items(.tag)[inst]; + const ops = func.mir_instructions.items(.ops)[inst]; + const target: Mir.Inst.Index = @intCast(func.mir_instructions.len); switch (tag) { .bne, .beq, - => self.mir_instructions.items(.data)[inst].b_type.inst = target, - .jal => self.mir_instructions.items(.data)[inst].j_type.inst = target, + => func.mir_instructions.items(.data)[inst].b_type.inst = target, + .jal => func.mir_instructions.items(.data)[inst].j_type.inst = target, .pseudo => switch (ops) { - .pseudo_j => self.mir_instructions.items(.data)[inst].inst = target, + .pseudo_j => func.mir_instructions.items(.data)[inst].inst = target, else => std.debug.panic("TODO: performReloc {s}", .{@tagName(ops)}), }, else => std.debug.panic("TODO: performReloc {s}", .{@tagName(tag)}), } } -fn airBr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; - const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br; +fn airBr(func: *Func, inst: Air.Inst.Index) !void { + const mod = func.bin_file.comp.module.?; + const br = func.air.instructions.items(.data)[@intFromEnum(inst)].br; - const block_ty = self.typeOfIndex(br.block_inst); + const block_ty = func.typeOfIndex(br.block_inst); const block_unused = - !block_ty.hasRuntimeBitsIgnoreComptime(mod) or self.liveness.isUnused(br.block_inst); - const block_tracking = self.inst_tracking.getPtr(br.block_inst).?; - const block_data = self.blocks.getPtr(br.block_inst).?; + !block_ty.hasRuntimeBitsIgnoreComptime(mod) or func.liveness.isUnused(br.block_inst); + const block_tracking = func.inst_tracking.getPtr(br.block_inst).?; + const block_data = func.blocks.getPtr(br.block_inst).?; const first_br = block_data.relocs.items.len == 0; const block_result = result: { if (block_unused) break :result .none; - if (!first_br) try self.getValue(block_tracking.short, null); - const src_mcv = try self.resolveInst(br.operand); + if (!first_br) try func.getValue(block_tracking.short, null); + const src_mcv = try func.resolveInst(br.operand); - if (self.reuseOperandAdvanced(inst, br.operand, 0, src_mcv, br.block_inst)) { + if (func.reuseOperandAdvanced(inst, br.operand, 0, src_mcv, br.block_inst)) { if (first_br) break :result src_mcv; - try self.getValue(block_tracking.short, br.block_inst); + try func.getValue(block_tracking.short, br.block_inst); // .long = .none to avoid merging operand and block result stack frames. const current_tracking: InstTracking = .{ .long = .none, .short = src_mcv }; - try current_tracking.materializeUnsafe(self, br.block_inst, block_tracking.*); - for (current_tracking.getRegs()) |src_reg| self.register_manager.freeReg(src_reg); + try current_tracking.materializeUnsafe(func, br.block_inst, block_tracking.*); + for (current_tracking.getRegs()) |src_reg| func.register_manager.freeReg(src_reg); break :result block_tracking.short; } - const dst_mcv = if (first_br) try self.allocRegOrMem(br.block_inst, true) else dst: { - try self.getValue(block_tracking.short, br.block_inst); + const dst_mcv = if (first_br) try func.allocRegOrMem(br.block_inst, true) else dst: { + try func.getValue(block_tracking.short, br.block_inst); break :dst block_tracking.short; }; - try self.genCopy(block_ty, dst_mcv, try self.resolveInst(br.operand)); + try func.genCopy(block_ty, dst_mcv, try func.resolveInst(br.operand)); break :result dst_mcv; }; // Process operand death so that it is properly accounted for in the State below. - if (self.liveness.operandDies(inst, 0)) { - if (br.operand.toIndex()) |op_inst| try self.processDeath(op_inst); + if (func.liveness.operandDies(inst, 0)) { + if (br.operand.toIndex()) |op_inst| try func.processDeath(op_inst); } if (first_br) { block_tracking.* = InstTracking.init(block_result); - try self.saveRetroactiveState(&block_data.state); - } else try self.restoreState(block_data.state, &.{}, .{ + try func.saveRetroactiveState(&block_data.state); + } else try func.restoreState(block_data.state, &.{}, .{ .emit_instructions = true, .update_tracking = false, .resurrect = false, @@ -4823,35 +4817,35 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { // Emit a jump with a relocation. It will be patched up after the block ends. // Leave the jump offset undefined - const jmp_reloc = try self.jump(undefined); - try block_data.relocs.append(self.gpa, jmp_reloc); + const jmp_reloc = try func.jump(undefined); + try block_data.relocs.append(func.gpa, jmp_reloc); // Stop tracking block result without forgetting tracking info - try self.freeValue(block_tracking.short); + try func.freeValue(block_tracking.short); - self.finishAirBookkeeping(); + func.finishAirBookkeeping(); } -fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const tag: Air.Inst.Tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; +fn airBoolOp(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const tag: Air.Inst.Tag = func.air.instructions.items(.tag)[@intFromEnum(inst)]; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const lhs = try func.resolveInst(bin_op.lhs); + const rhs = try func.resolveInst(bin_op.rhs); const lhs_ty = Type.bool; const rhs_ty = Type.bool; - const lhs_reg, const lhs_lock = try self.promoteReg(lhs_ty, lhs, .{}); - defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); + const lhs_reg, const lhs_lock = try func.promoteReg(lhs_ty, lhs, .{}); + defer if (lhs_lock) |lock| func.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = try self.promoteReg(rhs_ty, rhs, .{}); - defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); + const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs, .{}); + defer if (rhs_lock) |lock| func.register_manager.unlockReg(lock); - const result_reg, const result_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(result_lock); + const result_reg, const result_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(result_lock); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = if (tag == .bool_or) .@"or" else .@"and", .ops = .rrr, .data = .{ .r_type = .{ @@ -4862,8 +4856,8 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { }); // safety truncate - if (self.wantSafety()) { - _ = try self.addInst(.{ + if (func.wantSafety()) { + _ = try func.addInst(.{ .tag = .andi, .ops = .rri, .data = .{ .i_type = .{ @@ -4876,35 +4870,35 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { break :result .{ .register = result_reg }; }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airAsm(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Asm, ty_pl.payload); +fn airAsm(func: *Func, inst: Air.Inst.Index) !void { + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; const clobbers_len: u31 = @truncate(extra.data.flags); var extra_i: usize = extra.end; const outputs: []const Air.Inst.Ref = - @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]); + @ptrCast(func.air.extra[extra_i..][0..extra.data.outputs_len]); extra_i += outputs.len; - const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs: []const Air.Inst.Ref = @ptrCast(func.air.extra[extra_i..][0..extra.data.inputs_len]); extra_i += inputs.len; log.debug("airAsm input: {any}", .{inputs}); - const dead = !is_volatile and self.liveness.isUnused(inst); + const dead = !is_volatile and func.liveness.isUnused(inst); const result: MCValue = if (dead) .unreach else result: { if (outputs.len > 1) { - return self.fail("TODO implement codegen for asm with more than 1 output", .{}); + return func.fail("TODO implement codegen for asm with more than 1 output", .{}); } const output_constraint: ?[]const u8 = for (outputs) |output| { if (output != .none) { - return self.fail("TODO implement codegen for non-expr asm", .{}); + return func.fail("TODO implement codegen for non-expr asm", .{}); } - const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]); - const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); + const extra_bytes = std.mem.sliceAsBytes(func.air.extra[extra_i..]); + const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(func.air.extra[extra_i..]), 0); const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0); // This equation accounts for the fact that even if we have exactly 4 bytes // for the string, we still use the next u32 for the null terminator. @@ -4914,7 +4908,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } else null; for (inputs) |input| { - const input_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]); + const input_bytes = std.mem.sliceAsBytes(func.air.extra[extra_i..]); const constraint = std.mem.sliceTo(input_bytes, 0); const name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0); // This equation accounts for the fact that even if we have exactly 4 bytes @@ -4922,21 +4916,21 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { extra_i += (constraint.len + name.len + (2 + 3)) / 4; if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { - return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); + return func.fail("unrecognized asm input constraint: '{s}'", .{constraint}); } const reg_name = constraint[1 .. constraint.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail("unrecognized register: '{s}'", .{reg_name}); + return func.fail("unrecognized register: '{s}'", .{reg_name}); - const arg_mcv = try self.resolveInst(input); - try self.register_manager.getReg(reg, null); - try self.genSetReg(self.typeOf(input), reg, arg_mcv); + const arg_mcv = try func.resolveInst(input); + try func.register_manager.getReg(reg, null); + try func.genSetReg(func.typeOf(input), reg, arg_mcv); } { var clobber_i: u32 = 0; while (clobber_i < clobbers_len) : (clobber_i += 1) { - const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); + const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(func.air.extra[extra_i..]), 0); // This equation accounts for the fact that even if we have exactly 4 bytes // for the string, we still use the next u32 for the null terminator. extra_i += clobber.len / 4 + 1; @@ -4944,31 +4938,31 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { if (std.mem.eql(u8, clobber, "") or std.mem.eql(u8, clobber, "memory")) { // nothing really to do } else { - try self.register_manager.getReg(parseRegName(clobber) orelse - return self.fail("invalid clobber: '{s}'", .{clobber}), null); + try func.register_manager.getReg(parseRegName(clobber) orelse + return func.fail("invalid clobber: '{s}'", .{clobber}), null); } } } - const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len]; + const asm_source = std.mem.sliceAsBytes(func.air.extra[extra_i..])[0..extra.data.source_len]; if (std.meta.stringToEnum(Mir.Inst.Tag, asm_source)) |tag| { - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = tag, .ops = .none, .data = undefined, }); } else { - return self.fail("TODO: asm_source {s}", .{asm_source}); + return func.fail("TODO: asm_source {s}", .{asm_source}); } if (output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { - return self.fail("unrecognized asm output constraint: '{s}'", .{output}); + return func.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail("unrecognized register: '{s}'", .{reg_name}); + return func.fail("unrecognized register: '{s}'", .{reg_name}); break :result .{ .register = reg }; } else { break :result .{ .none = {} }; @@ -4987,17 +4981,17 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } if (buf_index + inputs.len > buf.len) break :simple; @memcpy(buf[buf_index..][0..inputs.len], inputs); - return self.finishAir(inst, result, buf); + return func.finishAir(inst, result, buf); } - var bt = self.liveness.iterateBigTomb(inst); - for (outputs) |output| if (output != .none) try self.feed(&bt, output); - for (inputs) |input| try self.feed(&bt, input); - return self.finishAirResult(inst, result); + var bt = func.liveness.iterateBigTomb(inst); + for (outputs) |output| if (output != .none) try func.feed(&bt, output); + for (inputs) |input| try func.feed(&bt, input); + return func.finishAirResult(inst, result); } /// Sets the value of `dst_mcv` to the value of `src_mcv`. -fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { - const zcu = self.bin_file.comp.module.?; +fn genCopy(func: *Func, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { + const zcu = func.bin_file.comp.module.?; // There isn't anything to store if (dst_mcv == .none) return; @@ -5008,8 +5002,8 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { } switch (dst_mcv) { - .register => |reg| return self.genSetReg(ty, reg, src_mcv), - .register_offset => |dst_reg_off| try self.genSetReg(ty, dst_reg_off.reg, switch (src_mcv) { + .register => |reg| return func.genSetReg(ty, reg, src_mcv), + .register_offset => |dst_reg_off| try func.genSetReg(ty, dst_reg_off.reg, switch (src_mcv) { .none, .unreach, .dead, @@ -5020,49 +5014,49 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { .register_offset, => src_mcv.offset(-dst_reg_off.off), else => .{ .register_offset = .{ - .reg = try self.copyToTmpRegister(ty, src_mcv), + .reg = try func.copyToTmpRegister(ty, src_mcv), .off = -dst_reg_off.off, } }, }), - .indirect => |reg_off| try self.genSetMem( + .indirect => |reg_off| try func.genSetMem( .{ .reg = reg_off.reg }, reg_off.off, ty, src_mcv, ), - .load_frame => |frame_addr| try self.genSetMem( + .load_frame => |frame_addr| try func.genSetMem( .{ .frame = frame_addr.index }, frame_addr.off, ty, src_mcv, ), - .memory => return self.fail("TODO: genCopy memory", .{}), + .memory => return func.fail("TODO: genCopy memory", .{}), .register_pair => |dst_regs| { const src_info: ?struct { addr_reg: Register, addr_lock: ?RegisterLock } = switch (src_mcv) { .register_pair, .memory, .indirect, .load_frame => null, .load_symbol => src: { - const src_addr_reg, const src_addr_lock = try self.promoteReg(Type.usize, src_mcv.address(), .{}); - errdefer self.register_manager.unlockReg(src_addr_lock); + const src_addr_reg, const src_addr_lock = try func.promoteReg(Type.usize, src_mcv.address(), .{}); + errdefer func.register_manager.unlockReg(src_addr_lock); break :src .{ .addr_reg = src_addr_reg, .addr_lock = src_addr_lock }; }, - .air_ref => |src_ref| return self.genCopy( + .air_ref => |src_ref| return func.genCopy( ty, dst_mcv, - try self.resolveInst(src_ref), + try func.resolveInst(src_ref), ), else => unreachable, }; defer if (src_info) |info| { if (info.addr_lock) |lock| { - self.register_manager.unlockReg(lock); + func.register_manager.unlockReg(lock); } }; var part_disp: i32 = 0; - for (dst_regs, try self.splitType(ty), 0..) |dst_reg, dst_ty, part_i| { - try self.genSetReg(dst_ty, dst_reg, switch (src_mcv) { + for (dst_regs, try func.splitType(ty), 0..) |dst_reg, dst_ty, part_i| { + try func.genSetReg(dst_ty, dst_reg, switch (src_mcv) { .register_pair => |src_regs| .{ .register = src_regs[part_i] }, .memory, .indirect, .load_frame => src_mcv.address().offset(part_disp).deref(), .load_symbol => .{ .indirect = .{ @@ -5074,31 +5068,31 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { part_disp += @intCast(dst_ty.abiSize(zcu)); } }, - else => return self.fail("TODO: genCopy to {s} from {s}", .{ @tagName(dst_mcv), @tagName(src_mcv) }), + else => return func.fail("TODO: genCopy to {s} from {s}", .{ @tagName(dst_mcv), @tagName(src_mcv) }), } } fn genInlineMemcpy( - self: *Self, + func: *Func, dst_ptr: MCValue, src_ptr: MCValue, len: MCValue, ) !void { - const regs = try self.register_manager.allocRegs(4, .{null} ** 4, abi.Registers.Integer.temporary); - const locks = self.register_manager.lockRegsAssumeUnused(4, regs); - defer for (locks) |lock| self.register_manager.unlockReg(lock); + const regs = try func.register_manager.allocRegs(4, .{null} ** 4, abi.Registers.Integer.temporary); + const locks = func.register_manager.lockRegsAssumeUnused(4, regs); + defer for (locks) |lock| func.register_manager.unlockReg(lock); const count = regs[0]; const tmp = regs[1]; const src = regs[2]; const dst = regs[3]; - try self.genSetReg(Type.usize, count, len); - try self.genSetReg(Type.usize, src, src_ptr); - try self.genSetReg(Type.usize, dst, dst_ptr); + try func.genSetReg(Type.usize, count, len); + try func.genSetReg(Type.usize, src, src_ptr); + try func.genSetReg(Type.usize, dst, dst_ptr); // lb tmp, 0(src) - const first_inst = try self.addInst(.{ + const first_inst = try func.addInst(.{ .tag = .lb, .ops = .rri, .data = .{ @@ -5111,7 +5105,7 @@ fn genInlineMemcpy( }); // sb tmp, 0(dst) - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .sb, .ops = .rri, .data = .{ @@ -5124,7 +5118,7 @@ fn genInlineMemcpy( }); // dec count by 1 - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .addi, .ops = .rri, .data = .{ @@ -5137,12 +5131,12 @@ fn genInlineMemcpy( }); // branch if count is 0 - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .beq, .ops = .rr_inst, .data = .{ .b_type = .{ - .inst = @intCast(self.mir_instructions.len + 4), // points after the last inst + .inst = @intCast(func.mir_instructions.len + 4), // points after the last inst .rs1 = count, .rs2 = .zero, }, @@ -5150,7 +5144,7 @@ fn genInlineMemcpy( }); // increment the pointers - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .addi, .ops = .rri, .data = .{ @@ -5162,7 +5156,7 @@ fn genInlineMemcpy( }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .addi, .ops = .rri, .data = .{ @@ -5175,7 +5169,7 @@ fn genInlineMemcpy( }); // jump back to start of loop - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_j, .data = .{ .inst = first_inst }, @@ -5183,25 +5177,25 @@ fn genInlineMemcpy( } fn genInlineMemset( - self: *Self, + func: *Func, dst_ptr: MCValue, src_value: MCValue, len: MCValue, ) !void { - const regs = try self.register_manager.allocRegs(3, .{null} ** 3, abi.Registers.Integer.temporary); - const locks = self.register_manager.lockRegsAssumeUnused(3, regs); - defer for (locks) |lock| self.register_manager.unlockReg(lock); + const regs = try func.register_manager.allocRegs(3, .{null} ** 3, abi.Registers.Integer.temporary); + const locks = func.register_manager.lockRegsAssumeUnused(3, regs); + defer for (locks) |lock| func.register_manager.unlockReg(lock); const count = regs[0]; const src = regs[1]; const dst = regs[2]; - try self.genSetReg(Type.usize, count, len); - try self.genSetReg(Type.usize, src, src_value); - try self.genSetReg(Type.usize, dst, dst_ptr); + try func.genSetReg(Type.usize, count, len); + try func.genSetReg(Type.usize, src, src_value); + try func.genSetReg(Type.usize, dst, dst_ptr); // sb src, 0(dst) - const first_inst = try self.addInst(.{ + const first_inst = try func.addInst(.{ .tag = .sb, .ops = .rri, .data = .{ @@ -5214,7 +5208,7 @@ fn genInlineMemset( }); // dec count by 1 - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .addi, .ops = .rri, .data = .{ @@ -5227,12 +5221,12 @@ fn genInlineMemset( }); // branch if count is 0 - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .beq, .ops = .rr_inst, .data = .{ .b_type = .{ - .inst = @intCast(self.mir_instructions.len + 4), // points after the last inst + .inst = @intCast(func.mir_instructions.len + 4), // points after the last inst .rs1 = count, .rs2 = .zero, }, @@ -5240,7 +5234,7 @@ fn genInlineMemset( }); // increment the pointers - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .addi, .ops = .rri, .data = .{ @@ -5253,7 +5247,7 @@ fn genInlineMemset( }); // jump back to start of loop - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_j, .data = .{ @@ -5263,8 +5257,8 @@ fn genInlineMemset( } /// Sets the value of `src_mcv` into `reg`. Assumes you have a lock on it. -fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError!void { - const zcu = self.bin_file.comp.module.?; +fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!void { + const zcu = func.bin_file.comp.module.?; const abi_size: u32 = @intCast(ty.abiSize(zcu)); if (abi_size > 8) return std.debug.panic("tried to set reg with size {}", .{abi_size}); @@ -5275,17 +5269,17 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! .dead => unreachable, .unreach, .none => return, // Nothing to do. .undef => { - if (!self.wantSafety()) + if (!func.wantSafety()) return; // The already existing value will do just fine. // Write the debug undefined value. - return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); + return func.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); }, .immediate => |unsigned_x| { assert(dst_reg_class == .int); const x: i64 = @bitCast(unsigned_x); if (math.minInt(i12) <= x and x <= math.maxInt(i12)) { - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .addi, .ops = .rri, .data = .{ .i_type = .{ @@ -5299,7 +5293,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! const carry: i32 = if (lo12 < 0) 1 else 0; const hi20: i20 = @truncate((x >> 12) +% carry); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .lui, .ops = .ri, .data = .{ .u_type = .{ @@ -5307,7 +5301,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! .imm20 = Immediate.s(hi20), } }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .addi, .ops = .rri, .data = .{ .i_type = .{ @@ -5320,17 +5314,17 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! // TODO: use a more advanced myriad seq to do this without a reg. // see: https://github.com/llvm/llvm-project/blob/081a66ffacfe85a37ff775addafcf3371e967328/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp#L224 - const temp, const temp_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(temp_lock); + const temp, const temp_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(temp_lock); const lo32: i32 = @truncate(x); const carry: i32 = if (lo32 < 0) 1 else 0; const hi32: i32 = @truncate((x >> 32) +% carry); - try self.genSetReg(Type.i32, temp, .{ .immediate = @bitCast(@as(i64, lo32)) }); - try self.genSetReg(Type.i32, reg, .{ .immediate = @bitCast(@as(i64, hi32)) }); + try func.genSetReg(Type.i32, temp, .{ .immediate = @bitCast(@as(i64, lo32)) }); + try func.genSetReg(Type.i32, reg, .{ .immediate = @bitCast(@as(i64, hi32)) }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .slli, .ops = .rri, .data = .{ .i_type = .{ @@ -5340,7 +5334,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! } }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .add, .ops = .rrr, .data = .{ .r_type = .{ @@ -5360,11 +5354,11 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! if (src_reg_class == .float and dst_reg_class == .int) { // to move from float -> int, we use FMV.X.W - return self.fail("TODO: genSetReg float -> int", .{}); + return func.fail("TODO: genSetReg float -> int", .{}); } // mv reg, src_reg - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_mv, .data = .{ .rr = .{ @@ -5373,9 +5367,9 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! } }, }); }, - .register_pair => return self.fail("genSetReg should we allow reg -> reg_pair?", .{}), + .register_pair => return func.fail("genSetReg should we allow reg -> reg_pair?", .{}), .load_frame => |frame| { - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_load_rm, .data = .{ .rm = .{ @@ -5384,7 +5378,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! .base = .{ .frame = frame.index }, .mod = .{ .rm = .{ - .size = self.memSize(ty), + .size = func.memSize(ty), .disp = frame.off, }, }, @@ -5393,9 +5387,9 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! }); }, .memory => |addr| { - try self.genSetReg(ty, reg, .{ .immediate = addr }); + try func.genSetReg(ty, reg, .{ .immediate = addr }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .ld, .ops = .rri, .data = .{ .i_type = .{ @@ -5406,7 +5400,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! }); }, .lea_frame, .register_offset => { - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_lea_rm, .data = .{ .rm = .{ @@ -5416,7 +5410,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! .base = .{ .reg = reg_off.reg }, .mod = .{ .rm = .{ - .size = self.memSize(ty), + .size = func.memSize(ty), .disp = reg_off.off, }, }, @@ -5425,7 +5419,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! .base = .{ .frame = frame.index }, .mod = .{ .rm = .{ - .size = self.memSize(ty), + .size = func.memSize(ty), .disp = frame.off, }, }, @@ -5444,7 +5438,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! else .lb, 2 => if (float_class) - return self.fail("TODO: genSetReg indirect 16-bit float", .{}) + return func.fail("TODO: genSetReg indirect 16-bit float", .{}) else .lh, 4 => if (float_class) .flw else .lw, @@ -5452,7 +5446,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! else => return std.debug.panic("TODO: genSetReg for size {d}", .{abi_size}), }; - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = load_tag, .ops = .rri, .data = .{ .i_type = .{ @@ -5465,12 +5459,12 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! .lea_symbol => |sym_off| { assert(sym_off.off == 0); - const atom_index = try self.symbolIndex(); + const atom_index = try func.symbolIndex(); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_load_symbol, - .data = .{ .payload = try self.addExtra(Mir.LoadSymbolPayload{ + .data = .{ .payload = try func.addExtra(Mir.LoadSymbolPayload{ .register = reg.encodeId(), .atom_index = atom_index, .sym_index = sym_off.sym, @@ -5478,25 +5472,25 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! }); }, .load_symbol => { - const addr_reg, const addr_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(addr_lock); + const addr_reg, const addr_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(addr_lock); - try self.genSetReg(ty, addr_reg, src_mcv.address()); - try self.genSetReg(ty, reg, .{ .indirect = .{ .reg = addr_reg } }); + try func.genSetReg(ty, addr_reg, src_mcv.address()); + try func.genSetReg(ty, reg, .{ .indirect = .{ .reg = addr_reg } }); }, - .air_ref => |ref| try self.genSetReg(ty, reg, try self.resolveInst(ref)), - else => return self.fail("TODO: genSetReg {s}", .{@tagName(src_mcv)}), + .air_ref => |ref| try func.genSetReg(ty, reg, try func.resolveInst(ref)), + else => return func.fail("TODO: genSetReg {s}", .{@tagName(src_mcv)}), } } fn genSetMem( - self: *Self, + func: *Func, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCValue, ) InnerError!void { - const mod = self.bin_file.comp.module.?; + const mod = func.bin_file.comp.module.?; const abi_size: u32 = @intCast(ty.abiSize(mod)); const dst_ptr_mcv: MCValue = switch (base) { .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } }, @@ -5509,7 +5503,7 @@ fn genSetMem( .dead, .reserved_frame, => unreachable, - .undef => try self.genInlineMemset( + .undef => try func.genInlineMemset( dst_ptr_mcv, src_mcv, .{ .immediate = abi_size }, @@ -5525,13 +5519,13 @@ fn genSetMem( 0 => {}, 1, 2, 4, 8 => { // no matter what type, it should use an integer register - const src_reg = try self.copyToTmpRegister(Type.usize, src_mcv); - const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); - defer self.register_manager.unlockReg(src_lock); + const src_reg = try func.copyToTmpRegister(Type.usize, src_mcv); + const src_lock = func.register_manager.lockRegAssumeUnused(src_reg); + defer func.register_manager.unlockReg(src_lock); - try self.genSetMem(base, disp, ty, .{ .register = src_reg }); + try func.genSetMem(base, disp, ty, .{ .register = src_reg }); }, - else => try self.genInlineMemcpy( + else => try func.genInlineMemcpy( dst_ptr_mcv, src_mcv.address(), .{ .immediate = abi_size }, @@ -5541,8 +5535,8 @@ fn genSetMem( const mem_size = switch (base) { .frame => |base_fi| mem_size: { assert(disp >= 0); - const frame_abi_size = self.frame_allocs.items(.abi_size)[@intFromEnum(base_fi)]; - const frame_spill_pad = self.frame_allocs.items(.spill_pad)[@intFromEnum(base_fi)]; + const frame_abi_size = func.frame_allocs.items(.abi_size)[@intFromEnum(base_fi)]; + const frame_spill_pad = func.frame_allocs.items(.spill_pad)[@intFromEnum(base_fi)]; assert(frame_abi_size - frame_spill_pad - disp >= abi_size); break :mem_size if (frame_abi_size - frame_spill_pad - disp == abi_size) frame_abi_size @@ -5554,12 +5548,12 @@ fn genSetMem( const src_size = math.ceilPowerOfTwoAssert(u32, abi_size); const src_align = Alignment.fromNonzeroByteUnits(math.ceilPowerOfTwoAssert(u32, src_size)); if (src_size > mem_size) { - const frame_index = try self.allocFrameIndex(FrameAlloc.init(.{ + const frame_index = try func.allocFrameIndex(FrameAlloc.init(.{ .size = src_size, .alignment = src_align, })); const frame_mcv: MCValue = .{ .load_frame = .{ .index = frame_index } }; - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_store_rm, .data = .{ .rm = .{ @@ -5572,9 +5566,9 @@ fn genSetMem( }, } }, }); - try self.genSetMem(base, disp, ty, frame_mcv); - try self.freeValue(frame_mcv); - } else _ = try self.addInst(.{ + try func.genSetMem(base, disp, ty, frame_mcv); + try func.freeValue(frame_mcv); + } else _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_store_rm, .data = .{ .rm = .{ @@ -5582,7 +5576,7 @@ fn genSetMem( .m = .{ .base = base, .mod = .{ .rm = .{ - .size = self.memSize(ty), + .size = func.memSize(ty), .disp = disp, } }, }, @@ -5591,54 +5585,54 @@ fn genSetMem( }, .register_pair => |src_regs| { var part_disp: i32 = disp; - for (try self.splitType(ty), src_regs) |src_ty, src_reg| { - try self.genSetMem(base, part_disp, src_ty, .{ .register = src_reg }); + for (try func.splitType(ty), src_regs) |src_ty, src_reg| { + try func.genSetMem(base, part_disp, src_ty, .{ .register = src_reg }); part_disp += @intCast(src_ty.abiSize(mod)); } }, .immediate => { // TODO: remove this lock in favor of a copyToTmpRegister when we load 64 bit immediates with // a register allocation. - const reg, const reg_lock = try self.promoteReg(ty, src_mcv, .{}); - defer if (reg_lock) |lock| self.register_manager.unlockReg(lock); + const reg, const reg_lock = try func.promoteReg(ty, src_mcv, .{}); + defer if (reg_lock) |lock| func.register_manager.unlockReg(lock); - return self.genSetMem(base, disp, ty, .{ .register = reg }); + return func.genSetMem(base, disp, ty, .{ .register = reg }); }, - .air_ref => |src_ref| try self.genSetMem(base, disp, ty, try self.resolveInst(src_ref)), + .air_ref => |src_ref| try func.genSetMem(base, disp, ty, try func.resolveInst(src_ref)), } } -fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; +fn airIntFromPtr(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result = result: { - const src_mcv = try self.resolveInst(un_op); - if (self.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv; + const src_mcv = try func.resolveInst(un_op); + if (func.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv; - const dst_mcv = try self.allocRegOrMem(inst, true); - const dst_ty = self.typeOfIndex(inst); - try self.genCopy(dst_ty, dst_mcv, src_mcv); + const dst_mcv = try func.allocRegOrMem(inst, true); + const dst_ty = func.typeOfIndex(inst); + try func.genCopy(dst_ty, dst_mcv, src_mcv); break :result dst_mcv; }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; +fn airBitCast(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result = if (self.liveness.isUnused(inst)) .unreach else result: { - const src_mcv = try self.resolveInst(ty_op.operand); + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result = if (func.liveness.isUnused(inst)) .unreach else result: { + const src_mcv = try func.resolveInst(ty_op.operand); - const dst_ty = self.typeOfIndex(inst); - const src_ty = self.typeOf(ty_op.operand); + const dst_ty = func.typeOfIndex(inst); + const src_ty = func.typeOf(ty_op.operand); - const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; - defer if (src_lock) |lock| self.register_manager.unlockReg(lock); + const src_lock = if (src_mcv.getReg()) |reg| func.register_manager.lockReg(reg) else null; + defer if (src_lock) |lock| func.register_manager.unlockReg(lock); const dst_mcv = if (dst_ty.abiSize(zcu) <= src_ty.abiSize(zcu) and - self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: { - const dst_mcv = try self.allocRegOrMem(inst, true); - try self.genCopy(switch (math.order(dst_ty.abiSize(zcu), src_ty.abiSize(zcu))) { + func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: { + const dst_mcv = try func.allocRegOrMem(inst, true); + try func.genCopy(switch (math.order(dst_ty.abiSize(zcu), src_ty.abiSize(zcu))) { .lt => dst_ty, .eq => if (!dst_mcv.isMemory() or src_mcv.isMemory()) dst_ty else src_ty, .gt => src_ty, @@ -5653,83 +5647,83 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const bit_size = dst_ty.bitSize(zcu); if (abi_size * 8 <= bit_size) break :result dst_mcv; - return self.fail("TODO: airBitCast {} to {}", .{ src_ty.fmt(zcu), dst_ty.fmt(zcu) }); + return func.fail("TODO: airBitCast {} to {}", .{ src_ty.fmt(zcu), dst_ty.fmt(zcu) }); }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airArrayToSlice for {}", .{ - self.target.cpu.arch, +fn airArrayToSlice(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airArrayToSlice for {}", .{ + func.target.cpu.arch, }); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airFloatFromInt for {}", .{ - self.target.cpu.arch, +fn airFloatFromInt(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airFloatFromInt for {}", .{ + func.target.cpu.arch, }); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airIntFromFloat for {}", .{ - self.target.cpu.arch, +fn airIntFromFloat(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airIntFromFloat for {}", .{ + func.target.cpu.arch, }); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Block, ty_pl.payload); +fn airCmpxchg(func: *Func, inst: Air.Inst.Index) !void { + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.Block, ty_pl.payload); _ = extra; - return self.fail("TODO implement airCmpxchg for {}", .{ - self.target.cpu.arch, + return func.fail("TODO implement airCmpxchg for {}", .{ + func.target.cpu.arch, }); - // return self.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value }); + // return func.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value }); } -fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void { +fn airAtomicRmw(func: *Func, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airCmpxchg for {}", .{self.target.cpu.arch}); + return func.fail("TODO implement airCmpxchg for {}", .{func.target.cpu.arch}); } -fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void { +fn airAtomicLoad(func: *Func, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airAtomicLoad for {}", .{self.target.cpu.arch}); + return func.fail("TODO implement airAtomicLoad for {}", .{func.target.cpu.arch}); } -fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void { +fn airAtomicStore(func: *Func, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void { _ = inst; _ = order; - return self.fail("TODO implement airAtomicStore for {}", .{self.target.cpu.arch}); + return func.fail("TODO implement airAtomicStore for {}", .{func.target.cpu.arch}); } -fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { - const zcu = self.bin_file.comp.module.?; - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airMemset(func: *Func, inst: Air.Inst.Index, safety: bool) !void { + const zcu = func.bin_file.comp.module.?; + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; result: { - if (!safety and (try self.resolveInst(bin_op.rhs)) == .undef) break :result; + if (!safety and (try func.resolveInst(bin_op.rhs)) == .undef) break :result; - const dst_ptr = try self.resolveInst(bin_op.lhs); - const dst_ptr_ty = self.typeOf(bin_op.lhs); + const dst_ptr = try func.resolveInst(bin_op.lhs); + const dst_ptr_ty = func.typeOf(bin_op.lhs); const dst_ptr_lock: ?RegisterLock = switch (dst_ptr) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + .register => |reg| func.register_manager.lockRegAssumeUnused(reg), else => null, }; - defer if (dst_ptr_lock) |lock| self.register_manager.unlockReg(lock); + defer if (dst_ptr_lock) |lock| func.register_manager.unlockReg(lock); - const src_val = try self.resolveInst(bin_op.rhs); - const elem_ty = self.typeOf(bin_op.rhs); + const src_val = try func.resolveInst(bin_op.rhs); + const elem_ty = func.typeOf(bin_op.rhs); const src_val_lock: ?RegisterLock = switch (src_val) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + .register => |reg| func.register_manager.lockRegAssumeUnused(reg), else => null, }; - defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock); + defer if (src_val_lock) |lock| func.register_manager.unlockReg(lock); const elem_abi_size: u31 = @intCast(elem_ty.abiSize(zcu)); @@ -5747,12 +5741,12 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { .C, .Many => unreachable, }; const len_lock: ?RegisterLock = switch (len) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + .register => |reg| func.register_manager.lockRegAssumeUnused(reg), else => null, }; - defer if (len_lock) |lock| self.register_manager.unlockReg(lock); + defer if (len_lock) |lock| func.register_manager.unlockReg(lock); - try self.genInlineMemset(ptr, src_val, len); + try func.genInlineMemset(ptr, src_val, len); break :result; } @@ -5760,87 +5754,87 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { // Length zero requires a runtime check - so we handle arrays specially // here to elide it. switch (dst_ptr_ty.ptrSize(zcu)) { - .Slice => return self.fail("TODO: airMemset Slices", .{}), + .Slice => return func.fail("TODO: airMemset Slices", .{}), .One => { const elem_ptr_ty = try zcu.singleMutPtrType(elem_ty); const len = dst_ptr_ty.childType(zcu).arrayLen(zcu); assert(len != 0); // prevented by Sema - try self.store(dst_ptr, src_val, elem_ptr_ty, elem_ty); + try func.store(dst_ptr, src_val, elem_ptr_ty, elem_ty); - const second_elem_ptr_reg, const second_elem_ptr_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(second_elem_ptr_lock); + const second_elem_ptr_reg, const second_elem_ptr_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(second_elem_ptr_lock); const second_elem_ptr_mcv: MCValue = .{ .register = second_elem_ptr_reg }; - try self.genSetReg(Type.usize, second_elem_ptr_reg, .{ .register_offset = .{ - .reg = try self.copyToTmpRegister(Type.usize, dst_ptr), + try func.genSetReg(Type.usize, second_elem_ptr_reg, .{ .register_offset = .{ + .reg = try func.copyToTmpRegister(Type.usize, dst_ptr), .off = elem_abi_size, } }); const bytes_to_copy: MCValue = .{ .immediate = elem_abi_size * (len - 1) }; - try self.genInlineMemcpy(second_elem_ptr_mcv, dst_ptr, bytes_to_copy); + try func.genInlineMemcpy(second_elem_ptr_mcv, dst_ptr, bytes_to_copy); }, .C, .Many => unreachable, } } - return self.finishAir(inst, .unreach, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, .unreach, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { +fn airMemcpy(func: *Func, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airMemcpy for {}", .{self.target.cpu.arch}); + return func.fail("TODO implement airMemcpy for {}", .{func.target.cpu.arch}); } -fn airTagName(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const operand = try self.resolveInst(un_op); - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else { +fn airTagName(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try func.resolveInst(un_op); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else { _ = operand; - return self.fail("TODO implement airTagName for riscv64", .{}); + return func.fail("TODO implement airTagName for riscv64", .{}); }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; +fn airErrorName(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const err_ty = self.typeOf(un_op); - const err_mcv = try self.resolveInst(un_op); + const err_ty = func.typeOf(un_op); + const err_mcv = try func.resolveInst(un_op); - const err_reg = try self.copyToTmpRegister(err_ty, err_mcv); - const err_lock = self.register_manager.lockRegAssumeUnused(err_reg); - defer self.register_manager.unlockReg(err_lock); + const err_reg = try func.copyToTmpRegister(err_ty, err_mcv); + const err_lock = func.register_manager.lockRegAssumeUnused(err_reg); + defer func.register_manager.unlockReg(err_lock); - const addr_reg, const addr_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(addr_lock); + const addr_reg, const addr_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(addr_lock); // this is now the base address of the error name table const lazy_sym = link.File.LazySymbol.initDecl(.const_data, null, zcu); - if (self.bin_file.cast(link.File.Elf)) |elf_file| { + if (func.bin_file.cast(link.File.Elf)) |elf_file| { const sym_index = elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + return func.fail("{s} creating lazy symbol", .{@errorName(err)}); const sym = elf_file.symbol(sym_index); - try self.genSetReg(Type.usize, addr_reg, .{ .load_symbol = .{ .sym = sym.esym_index } }); + try func.genSetReg(Type.usize, addr_reg, .{ .load_symbol = .{ .sym = sym.esym_index } }); } else { - return self.fail("TODO: riscv non-elf", .{}); + return func.fail("TODO: riscv non-elf", .{}); } - const start_reg, const start_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(start_lock); + const start_reg, const start_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(start_lock); - const end_reg, const end_lock = try self.allocReg(.int); - defer self.register_manager.unlockReg(end_lock); + const end_reg, const end_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(end_lock); - // const tmp_reg, const tmp_lock = try self.allocReg(.int); - // defer self.register_manager.unlockReg(tmp_lock); + // const tmp_reg, const tmp_lock = try func.allocReg(.int); + // defer func.register_manager.unlockReg(tmp_lock); // we move the base address forward by the following formula: base + (errno * 8) // shifting left by 4 is the same as multiplying by 8 - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .slli, .ops = .rri, .data = .{ .i_type = .{ @@ -5850,7 +5844,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { } }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .add, .ops = .rrr, .data = .{ .r_type = .{ @@ -5860,7 +5854,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { } }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_load_rm, .data = .{ @@ -5874,7 +5868,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_load_rm, .data = .{ @@ -5888,64 +5882,64 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { }, }); - const dst_mcv = try self.allocRegOrMem(inst, false); + const dst_mcv = try func.allocRegOrMem(inst, false); const frame = dst_mcv.load_frame; - try self.genSetMem( + try func.genSetMem( .{ .frame = frame.index }, frame.off, Type.usize, .{ .register = start_reg }, ); - try self.genSetMem( + try func.genSetMem( .{ .frame = frame.index }, frame.off + 8, Type.usize, .{ .register = end_reg }, ); - return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); + return func.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); } -fn airSplat(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airSplat for riscv64", .{}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airSplat(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airSplat for riscv64", .{}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airSelect(self: *Self, inst: Air.Inst.Index) !void { - const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airSelect for riscv64", .{}); - return self.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs }); +fn airSelect(func: *Func, inst: Air.Inst.Index) !void { + const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const extra = func.air.extraData(Air.Bin, pl_op.payload).data; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airSelect for riscv64", .{}); + return func.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs }); } -fn airShuffle(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airShuffle for riscv64", .{}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airShuffle(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airShuffle for riscv64", .{}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airReduce(self: *Self, inst: Air.Inst.Index) !void { - const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airReduce for riscv64", .{}); - return self.finishAir(inst, result, .{ reduce.operand, .none, .none }); +fn airReduce(func: *Func, inst: Air.Inst.Index) !void { + const reduce = func.air.instructions.items(.data)[@intFromEnum(inst)].reduce; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airReduce for riscv64", .{}); + return func.finishAir(inst, result, .{ reduce.operand, .none, .none }); } -fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const result_ty = self.typeOfIndex(inst); +fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const result_ty = func.typeOfIndex(inst); const len: usize = @intCast(result_ty.arrayLen(zcu)); - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]); + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const elements: []const Air.Inst.Ref = @ptrCast(func.air.extra[ty_pl.payload..][0..len]); const result: MCValue = result: { switch (result_ty.zigTypeTag(zcu)) { .Struct => { - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu)); if (result_ty.containerLayout(zcu) == .@"packed") { const struct_obj = zcu.typeToStruct(result_ty).?; - try self.genInlineMemset( + try func.genInlineMemset( .{ .lea_frame = .{ .index = frame_index } }, .{ .immediate = 0 }, .{ .immediate = result_ty.abiSize(zcu) }, @@ -5958,7 +5952,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const elem_ty = result_ty.structFieldType(elem_i, zcu); const elem_bit_size: u32 = @intCast(elem_ty.bitSize(zcu)); if (elem_bit_size > 64) { - return self.fail( + return func.fail( "TODO airAggregateInit implement packed structs with large fields", .{}, ); @@ -5969,109 +5963,109 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const elem_off = zcu.structPackedFieldBitOffset(struct_obj, elem_i); const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size); const elem_bit_off = elem_off % elem_abi_bits; - const elem_mcv = try self.resolveInst(elem); + const elem_mcv = try func.resolveInst(elem); _ = elem_byte_off; _ = elem_bit_off; const elem_lock = switch (elem_mcv) { - .register => |reg| self.register_manager.lockReg(reg), + .register => |reg| func.register_manager.lockReg(reg), .immediate => |imm| lock: { if (imm == 0) continue; break :lock null; }, else => null, }; - defer if (elem_lock) |lock| self.register_manager.unlockReg(lock); + defer if (elem_lock) |lock| func.register_manager.unlockReg(lock); - return self.fail("TODO: airAggregateInit packed structs", .{}); + return func.fail("TODO: airAggregateInit packed structs", .{}); } } else for (elements, 0..) |elem, elem_i| { if ((try result_ty.structFieldValueComptime(zcu, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i, zcu); const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, zcu)); - const elem_mcv = try self.resolveInst(elem); - try self.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, elem_mcv); + const elem_mcv = try func.resolveInst(elem); + try func.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, elem_mcv); } break :result .{ .load_frame = .{ .index = frame_index } }; }, .Array => { const elem_ty = result_ty.childType(zcu); - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu)); const elem_size: u32 = @intCast(elem_ty.abiSize(zcu)); for (elements, 0..) |elem, elem_i| { - const elem_mcv = try self.resolveInst(elem); + const elem_mcv = try func.resolveInst(elem); const elem_off: i32 = @intCast(elem_size * elem_i); - try self.genSetMem( + try func.genSetMem( .{ .frame = frame_index }, elem_off, elem_ty, elem_mcv, ); } - if (result_ty.sentinel(zcu)) |sentinel| try self.genSetMem( + if (result_ty.sentinel(zcu)) |sentinel| try func.genSetMem( .{ .frame = frame_index }, @intCast(elem_size * elements.len), elem_ty, - try self.genTypedValue(sentinel), + try func.genTypedValue(sentinel), ); break :result .{ .load_frame = .{ .index = frame_index } }; }, - else => return self.fail("TODO: airAggregate {}", .{result_ty.fmt(zcu)}), + else => return func.fail("TODO: airAggregate {}", .{result_ty.fmt(zcu)}), } }; if (elements.len <= Liveness.bpi - 1) { var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); @memcpy(buf[0..elements.len], elements); - return self.finishAir(inst, result, buf); + return func.finishAir(inst, result, buf); } - var bt = self.liveness.iterateBigTomb(inst); - for (elements) |elem| try self.feed(&bt, elem); - return self.finishAirResult(inst, result); + var bt = func.liveness.iterateBigTomb(inst); + for (elements) |elem| try func.feed(&bt, elem); + return func.finishAirResult(inst, result); } -fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; +fn airUnionInit(func: *Func, inst: Air.Inst.Index) !void { + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data; _ = extra; - return self.fail("TODO implement airUnionInit for riscv64", .{}); - // return self.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value }); + return func.fail("TODO implement airUnionInit for riscv64", .{}); + // return func.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value }); } -fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void { - const prefetch = self.air.instructions.items(.data)[@intFromEnum(inst)].prefetch; +fn airPrefetch(func: *Func, inst: Air.Inst.Index) !void { + const prefetch = func.air.instructions.items(.data)[@intFromEnum(inst)].prefetch; // TODO: RISC-V does have prefetch instruction variants. // see here: https://raw.githubusercontent.com/riscv/riscv-CMOs/master/specifications/cmobase-v1.0.1.pdf - return self.finishAir(inst, .unreach, .{ prefetch.ptr, .none, .none }); + return func.finishAir(inst, .unreach, .{ prefetch.ptr, .none, .none }); } -fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { - const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else { - return self.fail("TODO implement airMulAdd for riscv64", .{}); +fn airMulAdd(func: *Func, inst: Air.Inst.Index) !void { + const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const extra = func.air.extraData(Air.Bin, pl_op.payload).data; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else { + return func.fail("TODO implement airMulAdd for riscv64", .{}); }; - return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand }); + return func.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand }); } -fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { - const zcu = self.bin_file.comp.module.?; +fn resolveInst(func: *Func, ref: Air.Inst.Ref) InnerError!MCValue { + const zcu = func.bin_file.comp.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.typeOf(ref); + const inst_ty = func.typeOf(ref); if (!inst_ty.hasRuntimeBits(zcu)) return .none; const mcv = if (ref.toIndex()) |inst| mcv: { - break :mcv self.inst_tracking.getPtr(inst).?.short; + break :mcv func.inst_tracking.getPtr(inst).?.short; } else mcv: { const ip_index = ref.toInterned().?; - const gop = try self.const_tracking.getOrPut(self.gpa, ip_index); + const gop = try func.const_tracking.getOrPut(func.gpa, ip_index); if (!gop.found_existing) gop.value_ptr.* = InstTracking.init( - try self.genTypedValue(Value.fromInterned(ip_index)), + try func.genTypedValue(Value.fromInterned(ip_index)), ); break :mcv gop.value_ptr.short; }; @@ -6079,21 +6073,21 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { return mcv; } -fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { - const tracking = self.inst_tracking.getPtr(inst).?; +fn getResolvedInstValue(func: *Func, inst: Air.Inst.Index) *InstTracking { + const tracking = func.inst_tracking.getPtr(inst).?; return switch (tracking.short) { .none, .unreach, .dead => unreachable, else => tracking, }; } -fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { - const zcu = self.bin_file.comp.module.?; +fn genTypedValue(func: *Func, val: Value) InnerError!MCValue { + const zcu = func.bin_file.comp.module.?; const result = try codegen.genTypedValue( - self.bin_file, - self.src_loc, + func.bin_file, + func.src_loc, val, - zcu.funcOwnerDeclIndex(self.func_index), + zcu.funcOwnerDeclIndex(func.func_index), ); const mcv: MCValue = switch (result) { .mcv => |mcv| switch (mcv) { @@ -6103,11 +6097,11 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { .immediate => |imm| .{ .immediate = imm }, .memory => |addr| .{ .memory = addr }, .load_got, .load_direct, .load_tlv => { - return self.fail("TODO: genTypedValue {s}", .{@tagName(mcv)}); + return func.fail("TODO: genTypedValue {s}", .{@tagName(mcv)}); }, }, .fail => |msg| { - self.err_msg = msg; + func.err_msg = msg; return error.CodegenFail; }, }; @@ -6120,39 +6114,39 @@ const CallMCValues = struct { stack_byte_count: u31, stack_align: Alignment, - fn deinit(self: *CallMCValues, func: *Self) void { - func.gpa.free(self.args); - self.* = undefined; + fn deinit(call: *CallMCValues, func: *Func) void { + func.gpa.free(call.args); + call.* = undefined; } }; /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues( - self: *Self, + func: *Func, fn_info: InternPool.Key.FuncType, var_args: []const Type, ) !CallMCValues { - const zcu = self.bin_file.comp.module.?; + const zcu = func.bin_file.comp.module.?; const ip = &zcu.intern_pool; - const param_types = try self.gpa.alloc(Type, fn_info.param_types.len + var_args.len); - defer self.gpa.free(param_types); + const param_types = try func.gpa.alloc(Type, fn_info.param_types.len + var_args.len); + defer func.gpa.free(param_types); for (param_types[0..fn_info.param_types.len], fn_info.param_types.get(ip)) |*dest, src| { dest.* = Type.fromInterned(src); } for (param_types[fn_info.param_types.len..], var_args) |*param_ty, arg_ty| - param_ty.* = self.promoteVarArg(arg_ty); + param_ty.* = func.promoteVarArg(arg_ty); const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try func.gpa.alloc(MCValue, param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = 0, .stack_align = undefined, }; - errdefer self.gpa.free(result.args); + errdefer func.gpa.free(result.args); const ret_ty = Type.fromInterned(fn_info.return_type); @@ -6164,7 +6158,7 @@ fn resolveCallingConventionValues( }, .C, .Unspecified => { if (result.args.len > 8) { - return self.fail("RISC-V calling convention does not support more than 8 arguments", .{}); + return func.fail("RISC-V calling convention does not support more than 8 arguments", .{}); } var ret_int_reg_i: u32 = 0; @@ -6211,11 +6205,11 @@ fn resolveCallingConventionValues( }; ret_tracking_i += 1; }, - else => return self.fail("TODO: C calling convention return class {}", .{class}), + else => return func.fail("TODO: C calling convention return class {}", .{class}), }; result.return_value = switch (ret_tracking_i) { - else => return self.fail("ty {} took {} tracking return indices", .{ ret_ty.fmt(zcu), ret_tracking_i }), + else => return func.fail("ty {} took {} tracking return indices", .{ ret_ty.fmt(zcu), ret_tracking_i }), 1 => ret_tracking[0], 2 => InstTracking.init(.{ .register_pair = .{ ret_tracking[0].short.register, ret_tracking[1].short.register, @@ -6267,28 +6261,28 @@ fn resolveCallingConventionValues( arg_mcv[arg_mcv_i] = .{ .indirect = .{ .reg = param_int_reg } }; arg_mcv_i += 1; }, - else => return self.fail("TODO: C calling convention arg class {}", .{class}), + else => return func.fail("TODO: C calling convention arg class {}", .{class}), } else { arg.* = switch (arg_mcv_i) { - else => return self.fail("ty {} took {} tracking arg indices", .{ ty.fmt(zcu), arg_mcv_i }), + else => return func.fail("ty {} took {} tracking arg indices", .{ ty.fmt(zcu), arg_mcv_i }), 1 => arg_mcv[0], 2 => .{ .register_pair = .{ arg_mcv[0].register, arg_mcv[1].register } }, }; continue; } - return self.fail("TODO: pass args by stack", .{}); + return func.fail("TODO: pass args by stack", .{}); } }, - else => return self.fail("TODO implement function parameters for {} on riscv64", .{cc}), + else => return func.fail("TODO implement function parameters for {} on riscv64", .{cc}), } result.stack_byte_count = @intCast(result.stack_align.forward(result.stack_byte_count)); return result; } -fn wantSafety(self: *Self) bool { - return switch (self.mod.optimize_mode) { +fn wantSafety(func: *Func) bool { + return switch (func.mod.optimize_mode) { .Debug => true, .ReleaseSafe => true, .ReleaseFast => false, @@ -6296,39 +6290,36 @@ fn wantSafety(self: *Self) bool { }; } -fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError { +fn fail(func: *Func, comptime format: []const u8, args: anytype) InnerError { @setCold(true); - assert(self.err_msg == null); - self.err_msg = try ErrorMsg.create(self.gpa, self.src_loc, format, args); + assert(func.err_msg == null); + func.err_msg = try ErrorMsg.create(func.gpa, func.src_loc, format, args); return error.CodegenFail; } -fn failSymbol(self: *Self, comptime format: []const u8, args: anytype) InnerError { +fn failSymbol(func: *Func, comptime format: []const u8, args: anytype) InnerError { @setCold(true); - assert(self.err_msg == null); - self.err_msg = try ErrorMsg.create(self.gpa, self.src_loc, format, args); + assert(func.err_msg == null); + func.err_msg = try ErrorMsg.create(func.gpa, func.src_loc, format, args); return error.CodegenFail; } fn parseRegName(name: []const u8) ?Register { - if (@hasDecl(Register, "parseRegName")) { - return Register.parseRegName(name); - } return std.meta.stringToEnum(Register, name); } -fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { - const zcu = self.bin_file.comp.module.?; - return self.air.typeOf(inst, &zcu.intern_pool); +fn typeOf(func: *Func, inst: Air.Inst.Ref) Type { + const zcu = func.bin_file.comp.module.?; + return func.air.typeOf(inst, &zcu.intern_pool); } -fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { - const zcu = self.bin_file.comp.module.?; - return self.air.typeOfIndex(inst, &zcu.intern_pool); +fn typeOfIndex(func: *Func, inst: Air.Inst.Index) Type { + const zcu = func.bin_file.comp.module.?; + return func.air.typeOfIndex(inst, &zcu.intern_pool); } -fn hasFeature(self: *Self, feature: Target.riscv.Feature) bool { - return Target.riscv.featureSetHas(self.target.cpu.features, feature); +fn hasFeature(func: *Func, feature: Target.riscv.Feature) bool { + return Target.riscv.featureSetHas(func.target.cpu.features, feature); } pub fn errUnionPayloadOffset(payload_ty: Type, zcu: *Module) u64 { @@ -6353,8 +6344,8 @@ pub fn errUnionErrorOffset(payload_ty: Type, zcu: *Module) u64 { } } -fn promoteInt(self: *Self, ty: Type) Type { - const mod = self.bin_file.comp.module.?; +fn promoteInt(func: *Func, ty: Type) Type { + const mod = func.bin_file.comp.module.?; const int_info: InternPool.Key.IntType = switch (ty.toIntern()) { .bool_type => .{ .signedness = .unsigned, .bits = 1 }, else => if (ty.isAbiInt(mod)) ty.intInfo(mod) else return ty, @@ -6372,12 +6363,12 @@ fn promoteInt(self: *Self, ty: Type) Type { return ty; } -fn promoteVarArg(self: *Self, ty: Type) Type { - if (!ty.isRuntimeFloat()) return self.promoteInt(ty); - switch (ty.floatBits(self.target.*)) { +fn promoteVarArg(func: *Func, ty: Type) Type { + if (!ty.isRuntimeFloat()) return func.promoteInt(ty); + switch (ty.floatBits(func.target.*)) { 32, 64 => return Type.f64, else => |float_bits| { - assert(float_bits == self.target.c_type_bit_size(.longdouble)); + assert(float_bits == func.target.c_type_bit_size(.longdouble)); return Type.c_longdouble; }, } From d69c48370a0381c7dce463c68b2097dd8fa67eb7 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sat, 25 May 2024 14:15:21 -0700 Subject: [PATCH 17/24] riscv: integer + float `@abs` --- src/arch/riscv64/CodeGen.zig | 71 +++++++++++++++++++++++++++++++++-- src/arch/riscv64/Encoding.zig | 8 ++++ src/arch/riscv64/Lower.zig | 20 ++++++++++ src/arch/riscv64/Mir.zig | 13 +++++++ test/behavior/abs.zig | 8 ++-- test/behavior/cast.zig | 1 - test/behavior/floatop.zig | 1 - test/behavior/math.zig | 1 - 8 files changed, 112 insertions(+), 11 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index f252c9e6b7..cca02cb1a0 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -3377,18 +3377,83 @@ fn airAbs(func: *Func, inst: Air.Inst.Index) !void { const ty = func.typeOf(ty_op.operand); const scalar_ty = ty.scalarType(zcu); const operand = try func.resolveInst(ty_op.operand); - _ = operand; switch (scalar_ty.zigTypeTag(zcu)) { .Int => if (ty.zigTypeTag(zcu) == .Vector) { return func.fail("TODO implement airAbs for {}", .{ty.fmt(zcu)}); } else { - return func.fail("TODO: implement airAbs for Int", .{}); + const return_mcv = try func.copyToNewRegister(inst, operand); + const operand_reg = return_mcv.register; + + const temp_reg, const temp_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(temp_lock); + + _ = try func.addInst(.{ + .tag = .srai, + .ops = .rri, + .data = .{ .i_type = .{ + .rd = temp_reg, + .rs1 = operand_reg, + .imm12 = Immediate.s(63), + } }, + }); + + _ = try func.addInst(.{ + .tag = .xor, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = operand_reg, + .rs1 = operand_reg, + .rs2 = temp_reg, + } }, + }); + + _ = try func.addInst(.{ + .tag = .sub, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = operand_reg, + .rs1 = operand_reg, + .rs2 = temp_reg, + } }, + }); + + break :result return_mcv; + }, + .Float => { + const float_bits = scalar_ty.floatBits(zcu.getTarget()); + switch (float_bits) { + 16 => return func.fail("TODO: airAbs 16-bit float", .{}), + 32 => {}, + 64 => {}, + 80 => return func.fail("TODO: airAbs 80-bit float", .{}), + 128 => return func.fail("TODO: airAbs 128-bit float", .{}), + else => unreachable, + } + + const return_mcv = try func.copyToNewRegister(inst, operand); + const operand_reg = return_mcv.register; + + assert(operand_reg.class() == .float); + + _ = try func.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_fabs, + .data = .{ + .fabs = .{ + .rd = operand_reg, + .rs = operand_reg, + .bits = float_bits, + }, + }, + }); + + break :result return_mcv; }, else => return func.fail("TODO: implement airAbs {}", .{scalar_ty.fmt(zcu)}), } - break :result .{.unreach}; + break :result .unreach; }; return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } diff --git a/src/arch/riscv64/Encoding.zig b/src/arch/riscv64/Encoding.zig index a944cac634..6f412935e4 100644 --- a/src/arch/riscv64/Encoding.zig +++ b/src/arch/riscv64/Encoding.zig @@ -130,6 +130,7 @@ pub const Mnemonic = enum { fles, fsgnjns, + fsgnjxs, // D extension (64-bit float) faddd, @@ -150,6 +151,7 @@ pub const Mnemonic = enum { fled, fsgnjnd, + fsgnjxd, pub fn encoding(mnem: Mnemonic) Enc { return switch (mnem) { @@ -218,6 +220,9 @@ pub const Mnemonic = enum { .fsgnjns => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .S, .rm = 0b000 } } }, .fsgnjnd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .D, .rm = 0b000 } } }, + .fsgnjxs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .S, .rm = 0b0010} } }, + .fsgnjxd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .D, .rm = 0b0010} } }, + // LOAD @@ -392,6 +397,9 @@ pub const InstEnc = enum { .fsgnjns, .fsgnjnd, + + .fsgnjxs, + .fsgnjxd, => .R, .ecall, diff --git a/src/arch/riscv64/Lower.zig b/src/arch/riscv64/Lower.zig index 99270c8a7e..560f8349df 100644 --- a/src/arch/riscv64/Lower.zig +++ b/src/arch/riscv64/Lower.zig @@ -208,6 +208,26 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }); }, + .pseudo_fabs => { + const fabs = inst.data.fabs; + assert(fabs.rs.class() == .float and fabs.rd.class() == .float); + + const mnem: Encoding.Mnemonic = switch (fabs.bits) { + 16 => return lower.fail("TODO: airAbs Float 16", .{}), + 32 => .fsgnjxs, + 64 => .fsgnjxd, + 80 => return lower.fail("TODO: airAbs Float 80", .{}), + 128 => return lower.fail("TODO: airAbs Float 128", .{}), + else => unreachable, + }; + + try lower.emit(mnem, &.{ + .{ .reg = fabs.rs }, + .{ .reg = fabs.rd }, + .{ .reg = fabs.rd }, + }); + }, + .pseudo_compare => { const compare = inst.data.compare; const op = compare.op; diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index dd79a8dd83..76822c3968 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -76,6 +76,8 @@ pub const Inst = struct { fmuls, fdivs, + fabss, + fmins, fmaxs, @@ -94,6 +96,8 @@ pub const Inst = struct { fmuld, fdivd, + fabsd, + fmind, fmaxd, @@ -194,6 +198,12 @@ pub const Inst = struct { rs: Register, }, + fabs: struct { + rd: Register, + rs: Register, + bits: u16, + }, + compare: struct { rd: Register, rs1: Register, @@ -273,6 +283,9 @@ pub const Inst = struct { /// Jumps. Uses `inst` payload. pseudo_j, + /// Floating point absolute value. + pseudo_fabs, + /// Dead inst, ignored by the emitter. pseudo_dead, diff --git a/test/behavior/abs.zig b/test/behavior/abs.zig index 21f02b2a3d..88d01de5c6 100644 --- a/test/behavior/abs.zig +++ b/test/behavior/abs.zig @@ -6,7 +6,6 @@ test "@abs integers" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testAbsIntegers(); try testAbsIntegers(); @@ -93,18 +92,17 @@ test "@abs floats" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testAbsFloats(f16); - try testAbsFloats(f16); + if (builtin.zig_backend != .stage2_riscv64) try testAbsFloats(f16); try comptime testAbsFloats(f32); try testAbsFloats(f32); try comptime testAbsFloats(f64); try testAbsFloats(f64); try comptime testAbsFloats(f80); - if (builtin.zig_backend != .stage2_wasm and builtin.zig_backend != .stage2_spirv64) try testAbsFloats(f80); + if (builtin.zig_backend != .stage2_wasm and builtin.zig_backend != .stage2_spirv64 and builtin.zig_backend != .stage2_riscv64) try testAbsFloats(f80); try comptime testAbsFloats(f128); - if (builtin.zig_backend != .stage2_wasm and builtin.zig_backend != .stage2_spirv64) try testAbsFloats(f128); + if (builtin.zig_backend != .stage2_wasm and builtin.zig_backend != .stage2_spirv64 and builtin.zig_backend != .stage2_riscv64) try testAbsFloats(f128); } fn testAbsFloats(comptime T: type) !void { diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 1c5fbe09cd..2a141b3eda 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -2608,7 +2608,6 @@ test "@as does not corrupt values with incompatible representations" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x: f32 = @as(f16, blk: { if (false) { diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index 670a7a01ec..65d889776a 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -999,7 +999,6 @@ test "@abs f32/f64" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testFabs(f32); try comptime testFabs(f32); diff --git a/test/behavior/math.zig b/test/behavior/math.zig index ea03d3f89d..509bfbb16a 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -1814,7 +1814,6 @@ test "absFloat" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testAbsFloat(); try comptime testAbsFloat(); From a270c6f8c82bc81eecf37cc06a326e46ed44fb8b Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sat, 25 May 2024 22:52:26 -0700 Subject: [PATCH 18/24] riscv: implement optional logic --- src/arch/riscv64/CodeGen.zig | 278 ++++++++++++++---- src/arch/riscv64/abi.zig | 1 + src/arch/riscv64/bits.zig | 2 +- test/behavior/align.zig | 1 - test/behavior/basic.zig | 2 - test/behavior/cast.zig | 15 - test/behavior/error.zig | 1 - test/behavior/eval.zig | 1 - test/behavior/fn.zig | 3 - test/behavior/generics.zig | 1 - test/behavior/if.zig | 1 - test/behavior/null.zig | 3 - test/behavior/optional.zig | 5 - test/behavior/pointers.zig | 2 - test/behavior/ptrfromint.zig | 1 - test/behavior/sizeof_and_typeof.zig | 1 - test/behavior/slice.zig | 1 - test/behavior/struct.zig | 3 - .../struct_contains_null_ptr_itself.zig | 1 - test/behavior/this.zig | 1 - test/behavior/type.zig | 1 - test/behavior/union.zig | 1 - test/behavior/void.zig | 1 - 23 files changed, 222 insertions(+), 105 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index cca02cb1a0..8636654e63 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1545,6 +1545,58 @@ fn splitType(func: *Func, ty: Type) ![2]Type { return func.fail("TODO implement splitType for {}", .{ty.fmt(zcu)}); } +/// Truncates the value in the register in place. +/// Clobbers any remaining bits. +fn truncateRegister(func: *Func, ty: Type, reg: Register) !void { + const mod = func.bin_file.comp.module.?; + const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ + .signedness = .unsigned, + .bits = @intCast(ty.bitSize(mod)), + }; + const shift = math.cast(u6, 64 - int_info.bits % 64) orelse return; + switch (int_info.signedness) { + .signed => { + _ = try func.addInst(.{ + .tag = .slli, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = reg, + .rs1 = reg, + .imm12 = Immediate.s(shift), + }, + }, + }); + _ = try func.addInst(.{ + .tag = .srai, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = reg, + .rs1 = reg, + .imm12 = Immediate.s(shift), + }, + }, + }); + }, + .unsigned => { + const mask = ~@as(u64, 0) >> shift; + const tmp_reg = try func.copyToTmpRegister(Type.usize, .{ .immediate = mask }); + _ = try func.addInst(.{ + .tag = .@"and", + .ops = .rrr, + .data = .{ + .r_type = .{ + .rd = reg, + .rs1 = reg, + .rs2 = tmp_reg, + }, + }, + }); + }, + } +} + fn symbolIndex(func: *Func) !u32 { const zcu = func.bin_file.comp.module.?; const decl_index = zcu.funcOwnerDeclIndex(func.func_index); @@ -2868,8 +2920,25 @@ fn airShr(func: *Func, inst: Air.Inst.Index) !void { } fn airOptionalPayload(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement .optional_payload for {}", .{func.target.cpu.arch}); + const result: MCValue = result: { + const pl_ty = func.typeOfIndex(inst); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none; + + const opt_mcv = try func.resolveInst(ty_op.operand); + if (func.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) { + switch (opt_mcv) { + .register => |pl_reg| try func.truncateRegister(pl_ty, pl_reg), + else => {}, + } + break :result opt_mcv; + } + + const pl_mcv = try func.allocRegOrMem(inst, true); + try func.genCopy(pl_ty, pl_mcv, opt_mcv); + break :result pl_mcv; + }; return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } @@ -3022,16 +3091,40 @@ fn airSaveErrReturnTraceIndex(func: *Func, inst: Air.Inst.Index) !void { } fn airWrapOptional(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - const zcu = func.bin_file.comp.module.?; - const optional_ty = func.typeOfIndex(inst); + const result: MCValue = result: { + const pl_ty = func.typeOf(ty_op.operand); + if (!pl_ty.hasRuntimeBits(zcu)) break :result .{ .immediate = 1 }; - // Optional with a zero-bit payload type is just a boolean true - if (optional_ty.abiSize(zcu) == 1) - break :result MCValue{ .immediate = 1 }; + const opt_ty = func.typeOfIndex(inst); + const pl_mcv = try func.resolveInst(ty_op.operand); + const same_repr = opt_ty.optionalReprIsPayload(zcu); + if (same_repr and func.reuseOperand(inst, ty_op.operand, 0, pl_mcv)) break :result pl_mcv; - return func.fail("TODO implement wrap optional for {}", .{func.target.cpu.arch}); + const pl_lock: ?RegisterLock = switch (pl_mcv) { + .register => |reg| func.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (pl_lock) |lock| func.register_manager.unlockReg(lock); + + const opt_mcv = try func.allocRegOrMem(inst, true); + try func.genCopy(pl_ty, opt_mcv, pl_mcv); + + if (!same_repr) { + const pl_abi_size: i32 = @intCast(pl_ty.abiSize(zcu)); + switch (opt_mcv) { + .load_frame => |frame_addr| try func.genSetMem( + .{ .frame = frame_addr.index }, + frame_addr.off + pl_abi_size, + Type.u8, + .{ .immediate = 1 }, + ), + .register => return func.fail("TODO: airWrapOption opt_mcv register", .{}), + else => unreachable, + } + } + break :result opt_mcv; }; return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } @@ -4435,72 +4528,141 @@ fn condBr(func: *Func, cond_ty: Type, condition: MCValue) !Mir.Inst.Index { }); } +fn isNull(func: *Func, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MCValue { + const zcu = func.bin_file.comp.module.?; + const pl_ty = opt_ty.optionalChild(zcu); + + const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(zcu)) + .{ .off = 0, .ty = if (pl_ty.isSlice(zcu)) pl_ty.slicePtrFieldType(zcu) else pl_ty } + else + .{ .off = @intCast(pl_ty.abiSize(zcu)), .ty = Type.bool }; + + const return_mcv = try func.allocRegOrMem(inst, true); + assert(return_mcv == .register); // should not be larger 8 bytes + const return_reg = return_mcv.register; + + switch (opt_mcv) { + .none, + .unreach, + .dead, + .undef, + .immediate, + .register_pair, + .register_offset, + .lea_frame, + .lea_symbol, + .reserved_frame, + .air_ref, + => return func.fail("TODO: hmm {}", .{opt_mcv}), + + .register => |opt_reg| { + if (some_info.off == 0) { + _ = try func.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_compare, + .data = .{ + .compare = .{ + .op = .eq, + .rd = return_reg, + .rs1 = opt_reg, + .rs2 = try func.copyToTmpRegister( + some_info.ty, + .{ .immediate = 0 }, + ), + .size = .byte, + }, + }, + }); + return return_mcv; + } + assert(some_info.ty.ip_index == .bool_type); + const opt_abi_size: u32 = @intCast(opt_ty.abiSize(zcu)); + _ = opt_abi_size; + return func.fail("TODO: isNull some_info.off != 0 register", .{}); + }, + + .load_frame => { + const opt_reg = try func.copyToTmpRegister( + some_info.ty, + opt_mcv.address().offset(some_info.off).deref(), + ); + const opt_reg_lock = func.register_manager.lockRegAssumeUnused(opt_reg); + defer func.register_manager.unlockReg(opt_reg_lock); + + _ = try func.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_compare, + .data = .{ + .compare = .{ + .op = .eq, + .rd = return_reg, + .rs1 = opt_reg, + .rs2 = try func.copyToTmpRegister( + some_info.ty, + .{ .immediate = 0 }, + ), + .size = .byte, + }, + }, + }); + return return_mcv; + }, + + else => return func.fail("TODO: isNull {}", .{opt_mcv}), + } +} + fn airIsNull(func: *Func, inst: Air.Inst.Index) !void { const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - const operand = try func.resolveInst(un_op); - break :result try func.isNull(operand); - }; + const operand = try func.resolveInst(un_op); + const ty = func.typeOf(un_op); + const result = try func.isNull(inst, ty, operand); return func.finishAir(inst, result, .{ un_op, .none, .none }); } fn airIsNullPtr(func: *Func, inst: Air.Inst.Index) !void { const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - const operand_ptr = try func.resolveInst(un_op); - const operand: MCValue = blk: { - if (func.reuseOperand(inst, un_op, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try func.allocRegOrMem(inst, true); - } - }; - try func.load(operand, operand_ptr, func.typeOf(un_op)); - break :result try func.isNull(operand); - }; - return func.finishAir(inst, result, .{ un_op, .none, .none }); -} + const operand = try func.resolveInst(un_op); + _ = operand; // autofix + const ty = func.typeOf(un_op); + _ = ty; // autofix -fn isNull(func: *Func, operand: MCValue) !MCValue { - _ = operand; - // Here you can specialize this instruction if it makes sense to, otherwise the default - // will call isNonNull and invert the result. - return func.fail("TODO call isNonNull and invert the result", .{}); + if (true) return func.fail("TODO: airIsNullPtr", .{}); + + return func.finishAir(inst, .unreach, .{ un_op, .none, .none }); } fn airIsNonNull(func: *Func, inst: Air.Inst.Index) !void { const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - const operand = try func.resolveInst(un_op); - break :result try func.isNonNull(operand); - }; - return func.finishAir(inst, result, .{ un_op, .none, .none }); -} + const operand = try func.resolveInst(un_op); + const ty = func.typeOf(un_op); + const result = try func.isNull(inst, ty, operand); + assert(result == .register); -fn isNonNull(func: *Func, operand: MCValue) !MCValue { - _ = operand; - // Here you can specialize this instruction if it makes sense to, otherwise the default - // will call isNull and invert the result. - return func.fail("TODO call isNull and invert the result", .{}); + _ = try func.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_not, + .data = .{ + .rr = .{ + .rd = result.register, + .rs = result.register, + }, + }, + }); + + return func.finishAir(inst, result, .{ un_op, .none, .none }); } fn airIsNonNullPtr(func: *Func, inst: Air.Inst.Index) !void { const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - const operand_ptr = try func.resolveInst(un_op); - const operand: MCValue = blk: { - if (func.reuseOperand(inst, un_op, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try func.allocRegOrMem(inst, true); - } - }; - try func.load(operand, operand_ptr, func.typeOf(un_op)); - break :result try func.isNonNull(operand); - }; - return func.finishAir(inst, result, .{ un_op, .none, .none }); + const operand = try func.resolveInst(un_op); + _ = operand; // autofix + const ty = func.typeOf(un_op); + _ = ty; // autofix + + if (true) return func.fail("TODO: airIsNonNullPtr", .{}); + + return func.finishAir(inst, .unreach, .{ un_op, .none, .none }); } fn airIsErr(func: *Func, inst: Air.Inst.Index) !void { @@ -5110,7 +5272,7 @@ fn genCopy(func: *Func, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { dst_mcv, try func.resolveInst(src_ref), ), - else => unreachable, + else => return func.fail("genCopy register_pair src: {}", .{src_mcv}), }; defer if (src_info) |info| { diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index b8254b68a5..a5b54f0a1b 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -125,6 +125,7 @@ pub fn classifySystem(ty: Type, zcu: *Module) [8]SystemClass { return result; } result[0] = .integer; + if (ty.optionalChild(zcu).abiSize(zcu) == 0) return result; result[1] = .integer; return result; }, diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig index 034fe49450..a18f445816 100644 --- a/src/arch/riscv64/bits.zig +++ b/src/arch/riscv64/bits.zig @@ -230,7 +230,7 @@ pub const Register = enum(u8) { return @as(u8, reg.id()); } - pub fn bitSize(reg: Register, zcu: Module) u32 { + pub fn bitSize(reg: Register, zcu: *const Module) u32 { const features = zcu.getTarget().cpu.features; return switch (@intFromEnum(reg)) { diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 674d375438..bbb786af78 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -603,7 +603,6 @@ test "comptime alloc alignment" { } test "@alignCast null" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index 7c0c6cc4ab..0262b59bde 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -483,7 +483,6 @@ fn testStructInFn() !void { test "fn call returning scalar optional in equality expression" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(getNull() == null); } @@ -494,7 +493,6 @@ fn getNull() ?*i32 { test "global variable assignment with optional unwrapping with var initialized to undefined" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { var data: i32 = 1234; diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 2a141b3eda..53616a82ce 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -186,7 +186,6 @@ fn expectIntFromFloat(comptime F: type, f: F, comptime I: type, i: I) !void { test "implicitly cast indirect pointer to maybe-indirect pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Self = @This(); @@ -247,7 +246,6 @@ test "coerce undefined to optional" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(MakeType(void).getNull() == null); try expect(MakeType(void).getNonNull() != null); @@ -1184,7 +1182,6 @@ test "implicit ptr to *anyopaque" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u32 = 1; const ptr: *align(@alignOf(u32)) anyopaque = &a; @@ -1198,7 +1195,6 @@ test "implicit ptr to *anyopaque" { test "return null from fn () anyerror!?&T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = returnNullFromOptionalTypeErrorRef(); const b = returnNullLitFromOptionalTypeErrorRef(); @@ -1289,7 +1285,6 @@ test "implicit cast from *T to ?*anyopaque" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u8 = 1; incrementVoidPtrValue(&a); @@ -1361,7 +1356,6 @@ test "assignment to optional pointer result loc" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo: struct { ptr: ?*anyopaque } = .{ .ptr = &global_struct }; _ = &foo; @@ -1437,7 +1431,6 @@ test "peer type resolution: unreachable, null, slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(num: usize, word: []const u8) !void { @@ -1478,7 +1471,6 @@ test "cast compatible optional types" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: ?[:0]const u8 = null; _ = &a; @@ -1591,7 +1583,6 @@ test "bitcast packed struct with u0" { test "optional pointer coerced to optional allowzero pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var p: ?*u32 = undefined; var q: ?*allowzero u32 = undefined; @@ -1608,8 +1599,6 @@ test "optional slice coerced to allowzero many pointer" { } test "optional slice passed as parameter coerced to allowzero many pointer" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const ns = struct { const Color = struct { r: u8, @@ -1832,7 +1821,6 @@ test "peer type resolution: error union and optional of same type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = error{Foo}; var a: E!*u8 = error.Foo; @@ -1878,7 +1866,6 @@ test "peer type resolution: three-way resolution combines error set and optional if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = error{Foo}; var a: E = error.Foo; @@ -2104,7 +2091,6 @@ test "peer type resolution: tuple pointer and optional slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // Miscompilation on Intel's OpenCL CPU runtime. if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky @@ -2389,7 +2375,6 @@ test "cast builtins can wrap result in error union and optional" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const MyEnum = enum(u32) { _ }; diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 4e7fe949f1..2863c5db6c 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -124,7 +124,6 @@ test "debug info for optional error set" { test "implicit cast to optional to error union to return result loc" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 7b01c8d25f..492b204842 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -1548,7 +1548,6 @@ test "non-optional and optional array elements concatenated" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const array = [1]u8{'A'} ++ [1]?u8{null}; var index: usize = 0; diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index 1c7adc38e8..73ef9bdbfe 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -104,7 +104,6 @@ test "inline function call that calls optional function pointer, return pointer if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { field: u32, @@ -259,7 +258,6 @@ test "implicit cast fn call result to optional in field result" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -473,7 +471,6 @@ test "method call with optional and error union first param" { test "method call with optional pointer first param" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: i32 = 1234, diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index 6cbb2f0786..46c400750c 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -444,7 +444,6 @@ test "generic function passed as comptime argument" { test "return type of generic function is function pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn b(comptime T: type) ?*const fn () error{}!T { diff --git a/test/behavior/if.zig b/test/behavior/if.zig index 8cb923dd43..ef0862bb70 100644 --- a/test/behavior/if.zig +++ b/test/behavior/if.zig @@ -139,7 +139,6 @@ test "if-else expression with runtime condition result location is inferred opti if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = struct { b: u64, c: u64 }; var d: bool = true; diff --git a/test/behavior/null.zig b/test/behavior/null.zig index 323f47c896..ebc390c36a 100644 --- a/test/behavior/null.zig +++ b/test/behavior/null.zig @@ -85,7 +85,6 @@ fn testTestNullRuntime(x: ?i32) !void { test "optional void" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try optionalVoidImpl(); try comptime optionalVoidImpl(); @@ -109,7 +108,6 @@ const Empty = struct {}; test "optional struct{}" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; _ = try optionalEmptyStructImpl(); _ = try comptime optionalEmptyStructImpl(); @@ -135,7 +133,6 @@ test "null with default unwrap" { test "optional pointer to 0 bit type null value at runtime" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const EmptyStruct = struct {}; var x: ?*EmptyStruct = null; diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig index ad90d5fd0a..9282184c3e 100644 --- a/test/behavior/optional.zig +++ b/test/behavior/optional.zig @@ -29,7 +29,6 @@ pub const EmptyStruct = struct {}; test "optional pointer to size zero struct" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var e = EmptyStruct{}; const o: ?*EmptyStruct = &e; @@ -60,7 +59,6 @@ fn testNullPtrsEql() !void { test "optional with zero-bit type" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = struct { @@ -241,7 +239,6 @@ test "compare optionals with modified payloads" { test "unwrap function call with optional pointer return value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -373,7 +370,6 @@ test "0-bit child type coerced to optional return ptr result location" { test "0-bit child type coerced to optional" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -638,7 +634,6 @@ test "result location initialization of optional with OPV payload" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = struct { diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index 891ae311dd..cbd3033e7d 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -174,7 +174,6 @@ test "implicit cast error unions with non-optional to optional pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -222,7 +221,6 @@ test "assign null directly to C pointer and test null equality" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: [*c]i32 = null; _ = &x; diff --git a/test/behavior/ptrfromint.zig b/test/behavior/ptrfromint.zig index 0ff54c9416..89706be891 100644 --- a/test/behavior/ptrfromint.zig +++ b/test/behavior/ptrfromint.zig @@ -34,7 +34,6 @@ test "@ptrFromInt creates null pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const ptr = @as(?*u32, @ptrFromInt(0)); try expectEqual(@as(?*u32, null), ptr); diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig index 11d74c43d9..5d78acb241 100644 --- a/test/behavior/sizeof_and_typeof.zig +++ b/test/behavior/sizeof_and_typeof.zig @@ -328,7 +328,6 @@ test "peer type resolution with @TypeOf doesn't trigger dependency loop check" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = struct { next: @TypeOf(null, @as(*const @This(), undefined)), diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index 606f6db9a3..2375977a4e 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -246,7 +246,6 @@ fn sliceFromLenToLen(a_slice: []u8, start: usize, end: usize) []u8 { test "C pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [*c]const u8 = "kjdhfkjdhfdkjhfkfjhdfkjdhfkdjhfdkjhf"; var len: u32 = 10; diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 44911378db..e27adafa79 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -1873,8 +1873,6 @@ test "initializer takes a pointer to a variable inside its struct" { } test "circular dependency through pointer field of a struct" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { const StructInner = extern struct { outer: StructOuter = std.mem.zeroes(StructOuter), @@ -2151,7 +2149,6 @@ test "initiate global variable with runtime value" { test "struct containing optional pointer to array of @This()" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: ?*const [1]@This(), diff --git a/test/behavior/struct_contains_null_ptr_itself.zig b/test/behavior/struct_contains_null_ptr_itself.zig index d3dacc50cd..d0cb3ef443 100644 --- a/test/behavior/struct_contains_null_ptr_itself.zig +++ b/test/behavior/struct_contains_null_ptr_itself.zig @@ -5,7 +5,6 @@ const builtin = @import("builtin"); test "struct contains null pointer which contains original struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: ?*NodeLineComment = null; _ = &x; diff --git a/test/behavior/this.zig b/test/behavior/this.zig index fadb21023e..3f8fe13316 100644 --- a/test/behavior/this.zig +++ b/test/behavior/this.zig @@ -50,7 +50,6 @@ test "this used as optional function parameter" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var global: State = undefined; global.enter = prev; diff --git a/test/behavior/type.zig b/test/behavior/type.zig index c00d3de417..1a36f576f1 100644 --- a/test/behavior/type.zig +++ b/test/behavior/type.zig @@ -260,7 +260,6 @@ test "Type.Struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = @Type(@typeInfo(struct { x: u8, y: u32 })); const infoA = @typeInfo(A).Struct; diff --git a/test/behavior/union.zig b/test/behavior/union.zig index 73bfe4a7cb..c720d5c908 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -2145,7 +2145,6 @@ test "pass register-sized field as non-register-sized union" { test "circular dependency through pointer field of a union" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const UnionInner = extern struct { diff --git a/test/behavior/void.zig b/test/behavior/void.zig index 5c4215b870..26d7a4e4c7 100644 --- a/test/behavior/void.zig +++ b/test/behavior/void.zig @@ -37,7 +37,6 @@ test "void optional" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: ?void = {}; _ = &x; From a9ef0169432a6709ca59588050be703ab1a10357 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 26 May 2024 14:01:32 -0700 Subject: [PATCH 19/24] riscv: implement `airArrayToSlice` --- src/arch/riscv64/CodeGen.zig | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 8636654e63..bd26ab652d 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -5880,10 +5880,25 @@ fn airBitCast(func: *Func, inst: Air.Inst.Index) !void { } fn airArrayToSlice(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airArrayToSlice for {}", .{ - func.target.cpu.arch, - }); + + const slice_ty = func.typeOfIndex(inst); + const ptr_ty = func.typeOf(ty_op.operand); + const ptr = try func.resolveInst(ty_op.operand); + const array_ty = ptr_ty.childType(zcu); + const array_len = array_ty.arrayLen(zcu); + + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(slice_ty, zcu)); + try func.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); + try func.genSetMem( + .{ .frame = frame_index }, + @intCast(ptr_ty.abiSize(zcu)), + Type.usize, + .{ .immediate = array_len }, + ); + + const result = MCValue{ .load_frame = .{ .index = frame_index } }; return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } From 4fd8900337f363c87b916e3a96004f8f35845549 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Mon, 27 May 2024 01:43:37 -0700 Subject: [PATCH 20/24] riscv: rewrite "binOp" Reorganize how the binOp and genBinOp functions work. I've spent quite a while here reading exactly through the spec and so many tests are enabled because of several critical issues the old design had. There are some regressions that will take a long time to figure out individually so I will ignore them for now, and pray they get fixed by themselves. When we're closer to 100% passing is when I will start diving into them one-by-one. --- src/arch/riscv64/CodeGen.zig | 1906 +++++++++-------- src/arch/riscv64/Encoding.zig | 221 +- src/arch/riscv64/Lower.zig | 40 +- src/arch/riscv64/Mir.zig | 29 +- src/arch/riscv64/bits.zig | 40 +- src/arch/riscv64/encoder.zig | 25 + test/behavior/align.zig | 1 - test/behavior/array.zig | 12 +- test/behavior/basic.zig | 3 - test/behavior/bitcast.zig | 3 + test/behavior/byval_arg_var.zig | 1 - test/behavior/cast.zig | 25 +- test/behavior/enum.zig | 1 + test/behavior/error.zig | 1 - test/behavior/eval.zig | 5 +- test/behavior/for.zig | 5 - test/behavior/generics.zig | 2 - test/behavior/globals.zig | 1 - test/behavior/if.zig | 1 - test/behavior/inline_switch.zig | 1 - test/behavior/math.zig | 6 - test/behavior/nan.zig | 1 - test/behavior/optional.zig | 2 - test/behavior/packed-struct.zig | 4 +- test/behavior/pointers.zig | 3 - test/behavior/ptrcast.zig | 2 - test/behavior/reflection.zig | 1 + test/behavior/sizeof_and_typeof.zig | 1 - test/behavior/slice.zig | 12 - test/behavior/struct.zig | 4 + .../struct_contains_slice_of_itself.zig | 2 - test/behavior/switch.zig | 4 - test/behavior/this.zig | 1 + test/behavior/undefined.zig | 1 - test/behavior/union.zig | 3 + test/behavior/vector.zig | 1 - test/behavior/while.zig | 1 - test/behavior/widening.zig | 2 - 38 files changed, 1258 insertions(+), 1116 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index bd26ab652d..115d5697a5 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -22,6 +22,8 @@ const DW = std.dwarf; const leb128 = std.leb; const log = std.log.scoped(.riscv_codegen); const tracking_log = std.log.scoped(.tracking); +const verbose_tracking_log = std.log.scoped(.verbose_tracking); +const wip_mir_log = std.log.scoped(.wip_mir); const build_options = @import("build_options"); const codegen = @import("../../codegen.zig"); const Alignment = InternPool.Alignment; @@ -32,6 +34,8 @@ const DebugInfoOutput = codegen.DebugInfoOutput; const bits = @import("bits.zig"); const abi = @import("abi.zig"); +const Lower = @import("Lower.zig"); + const Register = bits.Register; const Immediate = bits.Immediate; const Memory = bits.Memory; @@ -158,6 +162,14 @@ const MCValue = union(enum) { }; } + fn isRegister(mcv: MCValue) bool { + return switch (mcv) { + .register => true, + .register_offset => |reg_off| return reg_off.off == 0, + else => false, + }; + } + fn isMutable(mcv: MCValue) bool { return switch (mcv) { .none => unreachable, @@ -289,6 +301,7 @@ const Branch = struct { const InstTrackingMap = std.AutoArrayHashMapUnmanaged(Air.Inst.Index, InstTracking); const ConstTrackingMap = std.AutoArrayHashMapUnmanaged(InternPool.Index, InstTracking); + const InstTracking = struct { long: MCValue, short: MCValue, @@ -317,33 +330,37 @@ const InstTracking = struct { }, .short = result }; } - fn getReg(func: InstTracking) ?Register { - return func.short.getReg(); + fn getReg(inst_tracking: InstTracking) ?Register { + return inst_tracking.short.getReg(); } - fn getRegs(func: *const InstTracking) []const Register { - return func.short.getRegs(); + fn getRegs(inst_tracking: *const InstTracking) []const Register { + return inst_tracking.short.getRegs(); } - fn spill(func: *InstTracking, function: *Func, inst: Air.Inst.Index) !void { - if (std.meta.eql(func.long, func.short)) return; // Already spilled + fn spill(inst_tracking: *InstTracking, function: *Func, inst: Air.Inst.Index) !void { + if (std.meta.eql(inst_tracking.long, inst_tracking.short)) return; // Already spilled // Allocate or reuse frame index - switch (func.long) { - .none => func.long = try function.allocRegOrMem(inst, false), + switch (inst_tracking.long) { + .none => inst_tracking.long = try function.allocRegOrMem( + function.typeOfIndex(inst), + inst, + false, + ), .load_frame => {}, - .reserved_frame => |index| func.long = .{ .load_frame = .{ .index = index } }, + .reserved_frame => |index| inst_tracking.long = .{ .load_frame = .{ .index = index } }, else => unreachable, } - tracking_log.debug("spill %{d} from {} to {}", .{ inst, func.short, func.long }); - try function.genCopy(function.typeOfIndex(inst), func.long, func.short); + tracking_log.debug("spill %{d} from {} to {}", .{ inst, inst_tracking.short, inst_tracking.long }); + try function.genCopy(function.typeOfIndex(inst), inst_tracking.long, inst_tracking.short); } - fn reuseFrame(func: *InstTracking) void { - switch (func.long) { - .reserved_frame => |index| func.long = .{ .load_frame = .{ .index = index } }, + fn reuseFrame(inst_tracking: *InstTracking) void { + switch (inst_tracking.long) { + .reserved_frame => |index| inst_tracking.long = .{ .load_frame = .{ .index = index } }, else => {}, } - func.short = switch (func.long) { + inst_tracking.short = switch (inst_tracking.long) { .none, .unreach, .undef, @@ -353,7 +370,7 @@ const InstTracking = struct { .lea_frame, .load_symbol, .lea_symbol, - => func.long, + => inst_tracking.long, .dead, .register, .register_pair, @@ -365,14 +382,14 @@ const InstTracking = struct { }; } - fn trackSpill(func: *InstTracking, function: *Func, inst: Air.Inst.Index) !void { - try function.freeValue(func.short); - func.reuseFrame(); - tracking_log.debug("%{d} => {} (spilled)", .{ inst, func.* }); + fn trackSpill(inst_tracking: *InstTracking, function: *Func, inst: Air.Inst.Index) !void { + try function.freeValue(inst_tracking.short); + inst_tracking.reuseFrame(); + tracking_log.debug("%{d} => {} (spilled)", .{ inst, inst_tracking.* }); } - fn verifyMaterialize(func: InstTracking, target: InstTracking) void { - switch (func.long) { + fn verifyMaterialize(inst_tracking: InstTracking, target: InstTracking) void { + switch (inst_tracking.long) { .none, .unreach, .undef, @@ -381,7 +398,7 @@ const InstTracking = struct { .lea_frame, .load_symbol, .lea_symbol, - => assert(std.meta.eql(func.long, target.long)), + => assert(std.meta.eql(inst_tracking.long, target.long)), .load_frame, .reserved_frame, => switch (target.long) { @@ -402,73 +419,73 @@ const InstTracking = struct { } fn materialize( - func: *InstTracking, + inst_tracking: *InstTracking, function: *Func, inst: Air.Inst.Index, target: InstTracking, ) !void { - func.verifyMaterialize(target); - try func.materializeUnsafe(function, inst, target); + inst_tracking.verifyMaterialize(target); + try inst_tracking.materializeUnsafe(function, inst, target); } fn materializeUnsafe( - func: InstTracking, + inst_tracking: InstTracking, function: *Func, inst: Air.Inst.Index, target: InstTracking, ) !void { const ty = function.typeOfIndex(inst); - if ((func.long == .none or func.long == .reserved_frame) and target.long == .load_frame) - try function.genCopy(ty, target.long, func.short); - try function.genCopy(ty, target.short, func.short); + if ((inst_tracking.long == .none or inst_tracking.long == .reserved_frame) and target.long == .load_frame) + try function.genCopy(ty, target.long, inst_tracking.short); + try function.genCopy(ty, target.short, inst_tracking.short); } - fn trackMaterialize(func: *InstTracking, inst: Air.Inst.Index, target: InstTracking) void { - func.verifyMaterialize(target); + fn trackMaterialize(inst_tracking: *InstTracking, inst: Air.Inst.Index, target: InstTracking) void { + inst_tracking.verifyMaterialize(target); // Don't clobber reserved frame indices - func.long = if (target.long == .none) switch (func.long) { + inst_tracking.long = if (target.long == .none) switch (inst_tracking.long) { .load_frame => |addr| .{ .reserved_frame = addr.index }, - .reserved_frame => func.long, + .reserved_frame => inst_tracking.long, else => target.long, } else target.long; - func.short = target.short; - tracking_log.debug("%{d} => {} (materialize)", .{ inst, func.* }); + inst_tracking.short = target.short; + tracking_log.debug("%{d} => {} (materialize)", .{ inst, inst_tracking.* }); } - fn resurrect(func: *InstTracking, inst: Air.Inst.Index, scope_generation: u32) void { - switch (func.short) { + fn resurrect(inst_tracking: *InstTracking, inst: Air.Inst.Index, scope_generation: u32) void { + switch (inst_tracking.short) { .dead => |die_generation| if (die_generation >= scope_generation) { - func.reuseFrame(); - tracking_log.debug("%{d} => {} (resurrect)", .{ inst, func.* }); + inst_tracking.reuseFrame(); + tracking_log.debug("%{d} => {} (resurrect)", .{ inst, inst_tracking.* }); }, else => {}, } } - fn die(func: *InstTracking, function: *Func, inst: Air.Inst.Index) !void { - if (func.short == .dead) return; - try function.freeValue(func.short); - func.short = .{ .dead = function.scope_generation }; - tracking_log.debug("%{d} => {} (death)", .{ inst, func.* }); + fn die(inst_tracking: *InstTracking, function: *Func, inst: Air.Inst.Index) !void { + if (inst_tracking.short == .dead) return; + try function.freeValue(inst_tracking.short); + inst_tracking.short = .{ .dead = function.scope_generation }; + tracking_log.debug("%{d} => {} (death)", .{ inst, inst_tracking.* }); } fn reuse( - func: *InstTracking, + inst_tracking: *InstTracking, function: *Func, new_inst: ?Air.Inst.Index, old_inst: Air.Inst.Index, ) void { - func.short = .{ .dead = function.scope_generation }; + inst_tracking.short = .{ .dead = function.scope_generation }; if (new_inst) |inst| - tracking_log.debug("%{d} => {} (reuse %{d})", .{ inst, func.*, old_inst }) + tracking_log.debug("%{d} => {} (reuse %{d})", .{ inst, inst_tracking.*, old_inst }) else - tracking_log.debug("tmp => {} (reuse %{d})", .{ func.*, old_inst }); + tracking_log.debug("tmp => {} (reuse %{d})", .{ inst_tracking.*, old_inst }); } - fn liveOut(func: *InstTracking, function: *Func, inst: Air.Inst.Index) void { - for (func.getRegs()) |reg| { + fn liveOut(inst_tracking: *InstTracking, function: *Func, inst: Air.Inst.Index) void { + for (inst_tracking.getRegs()) |reg| { if (function.register_manager.isRegFree(reg)) { - tracking_log.debug("%{d} => {} (live-out)", .{ inst, func.* }); + tracking_log.debug("%{d} => {} (live-out)", .{ inst, inst_tracking.* }); continue; } @@ -495,18 +512,18 @@ const InstTracking = struct { // Perform side-effects of freeValue manually. function.register_manager.freeReg(reg); - tracking_log.debug("%{d} => {} (live-out %{d})", .{ inst, func.*, tracked_inst }); + tracking_log.debug("%{d} => {} (live-out %{d})", .{ inst, inst_tracking.*, tracked_inst }); } } pub fn format( - func: InstTracking, + inst_tracking: InstTracking, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (!std.meta.eql(func.long, func.short)) try writer.print("|{}| ", .{func.long}); - try writer.print("{}", .{func.short}); + if (!std.meta.eql(inst_tracking.long, inst_tracking.short)) try writer.print("|{}| ", .{inst_tracking.long}); + try writer.print("{}", .{inst_tracking.short}); } }; @@ -741,6 +758,8 @@ pub fn generate( function.mir_extra.deinit(gpa); } + wip_mir_log.debug("{}:", .{function.fmtDecl(func.owner_decl)}); + try function.frame_allocs.resize(gpa, FrameIndex.named_count); function.frame_allocs.set( @intFromEnum(FrameIndex.stack_frame), @@ -846,13 +865,133 @@ pub fn generate( } } +const FormatWipMirData = struct { + func: *Func, + inst: Mir.Inst.Index, +}; +fn formatWipMir( + data: FormatWipMirData, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, +) @TypeOf(writer).Error!void { + const comp = data.func.bin_file.comp; + const mod = comp.root_mod; + var lower = Lower{ + .bin_file = data.func.bin_file, + .allocator = data.func.gpa, + .mir = .{ + .instructions = data.func.mir_instructions.slice(), + .extra = data.func.mir_extra.items, + .frame_locs = data.func.frame_locs.slice(), + }, + .cc = .Unspecified, + .src_loc = data.func.src_loc, + .output_mode = comp.config.output_mode, + .link_mode = comp.config.link_mode, + .pic = mod.pic, + }; + var first = true; + for ((lower.lowerMir(data.inst) catch |err| switch (err) { + error.LowerFail => { + defer { + lower.err_msg.?.deinit(data.func.gpa); + lower.err_msg = null; + } + try writer.writeAll(lower.err_msg.?.msg); + return; + }, + error.OutOfMemory, error.InvalidInstruction => |e| { + try writer.writeAll(switch (e) { + error.OutOfMemory => "Out of memory", + error.InvalidInstruction => "CodeGen failed to find a viable instruction.", + }); + return; + }, + else => |e| return e, + }).insts) |lowered_inst| { + if (!first) try writer.writeAll("\ndebug(wip_mir): "); + try writer.print(" | {}", .{lowered_inst}); + first = false; + } +} +fn fmtWipMir(func: *Func, inst: Mir.Inst.Index) std.fmt.Formatter(formatWipMir) { + return .{ .data = .{ .func = func, .inst = inst } }; +} + +const FormatDeclData = struct { + mod: *Module, + decl_index: InternPool.DeclIndex, +}; +fn formatDecl( + data: FormatDeclData, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, +) @TypeOf(writer).Error!void { + try data.mod.declPtr(data.decl_index).renderFullyQualifiedName(data.mod, writer); +} +fn fmtDecl(func: *Func, decl_index: InternPool.DeclIndex) std.fmt.Formatter(formatDecl) { + return .{ .data = .{ + .mod = func.bin_file.comp.module.?, + .decl_index = decl_index, + } }; +} + +const FormatAirData = struct { + func: *Func, + inst: Air.Inst.Index, +}; +fn formatAir( + data: FormatAirData, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, +) @TypeOf(writer).Error!void { + @import("../../print_air.zig").dumpInst( + data.inst, + data.func.bin_file.comp.module.?, + data.func.air, + data.func.liveness, + ); +} +fn fmtAir(func: *Func, inst: Air.Inst.Index) std.fmt.Formatter(formatAir) { + return .{ .data = .{ .func = func, .inst = inst } }; +} + +const FormatTrackingData = struct { + func: *Func, +}; +fn formatTracking( + data: FormatTrackingData, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, +) @TypeOf(writer).Error!void { + var it = data.func.inst_tracking.iterator(); + while (it.next()) |entry| try writer.print("\n%{d} = {}", .{ entry.key_ptr.*, entry.value_ptr.* }); +} +fn fmtTracking(func: *Func) std.fmt.Formatter(formatTracking) { + return .{ .data = .{ .func = func } }; +} + fn addInst(func: *Func, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { const gpa = func.gpa; - try func.mir_instructions.ensureUnusedCapacity(gpa, 1); - const result_index: Mir.Inst.Index = @intCast(func.mir_instructions.len); func.mir_instructions.appendAssumeCapacity(inst); + if (inst.tag != .pseudo or switch (inst.ops) { + else => true, + .pseudo_dbg_prologue_end, + .pseudo_dbg_line_column, + .pseudo_dbg_epilogue_begin, + .pseudo_store_rm, + .pseudo_load_rm, + .pseudo_lea_rm, + .pseudo_mv, + .pseudo_dead, + => false, + }) wip_mir_log.debug("{}", .{func.fmtWipMir(result_index)}) else wip_mir_log.debug(" | uses-mem", .{}); return result_index; } @@ -979,7 +1118,7 @@ fn gen(func: *Func) !void { .r = .ra, .m = .{ .base = .{ .frame = .ret_addr }, - .mod = .{ .rm = .{ .size = .dword } }, + .mod = .{ .size = .dword, .unsigned = false }, }, } }, }); @@ -990,7 +1129,7 @@ fn gen(func: *Func) !void { .r = .ra, .m = .{ .base = .{ .frame = .ret_addr }, - .mod = .{ .rm = .{ .size = .dword } }, + .mod = .{ .size = .dword, .unsigned = false }, }, } }, }); @@ -1001,7 +1140,7 @@ fn gen(func: *Func) !void { .r = .s0, .m = .{ .base = .{ .frame = .base_ptr }, - .mod = .{ .rm = .{ .size = .dword } }, + .mod = .{ .size = .dword, .unsigned = false }, }, } }, }); @@ -1012,7 +1151,7 @@ fn gen(func: *Func) !void { .r = .s0, .m = .{ .base = .{ .frame = .base_ptr }, - .mod = .{ .rm = .{ .size = .dword } }, + .mod = .{ .size = .dword, .unsigned = false }, }, } }, }); @@ -1072,36 +1211,47 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip)) continue; + wip_mir_log.debug("{}", .{func.fmtAir(inst)}); + verbose_tracking_log.debug("{}", .{func.fmtTracking()}); const old_air_bookkeeping = func.air_bookkeeping; try func.inst_tracking.ensureUnusedCapacity(func.gpa, 1); - switch (air_tags[@intFromEnum(inst)]) { + const tag: Air.Inst.Tag = air_tags[@intFromEnum(inst)]; + switch (tag) { // zig fmt: off - .ptr_add => try func.airPtrArithmetic(inst, .ptr_add), - .ptr_sub => try func.airPtrArithmetic(inst, .ptr_sub), + .add, + .add_wrap, + .sub, + .sub_wrap, - .add => try func.airBinOp(inst, .add), - .sub => try func.airBinOp(inst, .sub), + .mul, + .mul_wrap, + .div_trunc, - .add_safe, - .sub_safe, - .mul_safe, - => return func.fail("TODO implement safety_checked_instructions", .{}), + .shl, .shl_exact, + .shr, .shr_exact, - .add_wrap => try func.airAddWrap(inst), - .add_sat => try func.airAddSat(inst), - .sub_wrap => try func.airSubWrap(inst), - .sub_sat => try func.airSubSat(inst), - .mul => try func.airMul(inst), - .mul_wrap => try func.airMulWrap(inst), - .mul_sat => try func.airMulSat(inst), - .rem => try func.airRem(inst), - .mod => try func.airMod(inst), - .shl, .shl_exact => try func.airShl(inst), - .shl_sat => try func.airShlSat(inst), - .min => try func.airMinMax(inst, .min), - .max => try func.airMinMax(inst, .max), - .slice => try func.airSlice(inst), + .bool_and, + .bool_or, + .bit_and, + .bit_or, + + .xor, + + .min, + .max, + => try func.airBinOp(inst, tag), + + + .ptr_add, + .ptr_sub => try func.airPtrArithmetic(inst, tag), + + .rem, + .mod, + .div_float, + .div_floor, + .div_exact, + => return func.fail("TODO: {s}", .{@tagName(tag)}), .sqrt, .sin, @@ -1124,24 +1274,33 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { .mul_with_overflow => try func.airMulWithOverflow(inst), .shl_with_overflow => try func.airShlWithOverflow(inst), - .div_float, .div_trunc, .div_floor, .div_exact => try func.airDiv(inst), - .cmp_lt => try func.airCmp(inst), - .cmp_lte => try func.airCmp(inst), - .cmp_eq => try func.airCmp(inst), - .cmp_gte => try func.airCmp(inst), - .cmp_gt => try func.airCmp(inst), - .cmp_neq => try func.airCmp(inst), + .add_sat => try func.airAddSat(inst), + .sub_sat => try func.airSubSat(inst), + .mul_sat => try func.airMulSat(inst), + .shl_sat => try func.airShlSat(inst), + + .add_safe, + .sub_safe, + .mul_safe, + => return func.fail("TODO implement safety_checked_instructions", .{}), + + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + => try func.airCmp(inst, tag), .cmp_vector => try func.airCmpVector(inst), .cmp_lt_errors_len => try func.airCmpLtErrorsLen(inst), - .bool_and => try func.airBoolOp(inst), - .bool_or => try func.airBoolOp(inst), - .bit_and => try func.airBitAnd(inst), - .bit_or => try func.airBitOr(inst), - .xor => try func.airXor(inst), - .shr, .shr_exact => try func.airShr(inst), + .slice => try func.airSlice(inst), + .array_to_slice => try func.airArrayToSlice(inst), + + .slice_ptr => try func.airSlicePtr(inst), + .slice_len => try func.airSliceLen(inst), .alloc => try func.airAlloc(inst), .ret_ptr => try func.airRetPtr(inst), @@ -1181,7 +1340,6 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { .store_safe => try func.airStore(inst, true), .struct_field_ptr=> try func.airStructFieldPtr(inst), .struct_field_val=> try func.airStructFieldVal(inst), - .array_to_slice => try func.airArrayToSlice(inst), .float_from_int => try func.airFloatFromInt(inst), .int_from_float => try func.airIntFromFloat(inst), .cmpxchg_strong => try func.airCmpxchg(inst), @@ -1229,7 +1387,6 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { .atomic_store_monotonic => try func.airAtomicStore(inst, .monotonic), .atomic_store_release => try func.airAtomicStore(inst, .release), .atomic_store_seq_cst => try func.airAtomicStore(inst, .seq_cst), - .struct_field_ptr_index_0 => try func.airStructFieldPtrIndex(inst, 0), .struct_field_ptr_index_1 => try func.airStructFieldPtrIndex(inst, 1), .struct_field_ptr_index_2 => try func.airStructFieldPtrIndex(inst, 2), @@ -1238,15 +1395,15 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { .field_parent_ptr => try func.airFieldParentPtr(inst), .switch_br => try func.airSwitchBr(inst), - .slice_ptr => try func.airSlicePtr(inst), - .slice_len => try func.airSliceLen(inst), .ptr_slice_len_ptr => try func.airPtrSliceLenPtr(inst), .ptr_slice_ptr_ptr => try func.airPtrSlicePtrPtr(inst), .array_elem_val => try func.airArrayElemVal(inst), + .slice_elem_val => try func.airSliceElemVal(inst), .slice_elem_ptr => try func.airSliceElemPtr(inst), + .ptr_elem_val => try func.airPtrElemVal(inst), .ptr_elem_ptr => try func.airPtrElemPtr(inst), @@ -1330,6 +1487,7 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { } } } + verbose_tracking_log.debug("{}", .{func.fmtTracking()}); } fn getValue(func: *Func, value: MCValue, inst: ?Air.Inst.Index) !void { @@ -1563,7 +1721,7 @@ fn truncateRegister(func: *Func, ty: Type, reg: Register) !void { .i_type = .{ .rd = reg, .rs1 = reg, - .imm12 = Immediate.s(shift), + .imm12 = Immediate.u(shift), }, }, }); @@ -1574,25 +1732,49 @@ fn truncateRegister(func: *Func, ty: Type, reg: Register) !void { .i_type = .{ .rd = reg, .rs1 = reg, - .imm12 = Immediate.s(shift), + .imm12 = Immediate.u(shift), }, }, }); }, .unsigned => { const mask = ~@as(u64, 0) >> shift; - const tmp_reg = try func.copyToTmpRegister(Type.usize, .{ .immediate = mask }); - _ = try func.addInst(.{ - .tag = .@"and", - .ops = .rrr, - .data = .{ - .r_type = .{ - .rd = reg, - .rs1 = reg, - .rs2 = tmp_reg, + if (mask < 256) { + _ = try func.addInst(.{ + .tag = .andi, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = reg, + .rs1 = reg, + .imm12 = Immediate.u(@intCast(mask)), + }, }, - }, - }); + }); + } else { + _ = try func.addInst(.{ + .tag = .slli, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = reg, + .rs1 = reg, + .imm12 = Immediate.u(shift), + }, + }, + }); + _ = try func.addInst(.{ + .tag = .srli, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = reg, + .rs1 = reg, + .imm12 = Immediate.u(shift), + }, + }, + }); + } }, } } @@ -1673,9 +1855,8 @@ fn regTempClassForType(func: *Func, ty: Type) RegisterManager.RegisterBitSet { }; } -fn allocRegOrMem(func: *Func, inst: Air.Inst.Index, reg_ok: bool) !MCValue { +fn allocRegOrMem(func: *Func, elem_ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue { const zcu = func.bin_file.comp.module.?; - const elem_ty = func.typeOfIndex(inst); const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse { return func.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(zcu)}); @@ -1714,66 +1895,15 @@ fn allocReg(func: *Func, reg_class: abi.RegisterClass) !struct { Register, Regis return .{ reg, lock }; } -const PromoteOptions = struct { - /// zeroes out the register before loading in the operand - /// - /// if the operand is already a register, it will truncate with 0 - zero: bool = false, -}; - /// Similar to `allocReg` but will copy the MCValue into the Register unless `operand` is already /// a register, in which case it will return a possible lock to that register. -fn promoteReg(func: *Func, ty: Type, operand: MCValue, options: PromoteOptions) !struct { Register, ?RegisterLock } { - const zcu = func.bin_file.comp.module.?; - const bit_size = ty.bitSize(zcu); - +fn promoteReg(func: *Func, ty: Type, operand: MCValue) !struct { Register, ?RegisterLock } { if (operand == .register) { const op_reg = operand.register; - if (options.zero and op_reg.class() == .int) { - // we make sure to emit the truncate manually because binOp will call this function - // and it could cause an infinite loop - - _ = try func.addInst(.{ - .tag = .slli, - .ops = .rri, - .data = .{ - .i_type = .{ - .imm12 = Immediate.u(64 - bit_size), - .rd = op_reg, - .rs1 = op_reg, - }, - }, - }); - - _ = try func.addInst(.{ - .tag = .srli, - .ops = .rri, - .data = .{ - .i_type = .{ - .imm12 = Immediate.u(64 - bit_size), - .rd = op_reg, - .rs1 = op_reg, - }, - }, - }); - } - return .{ op_reg, func.register_manager.lockReg(operand.register) }; } const reg, const lock = try func.allocReg(func.typeRegClass(ty)); - - if (options.zero and reg.class() == .int) { - _ = try func.addInst(.{ - .tag = .pseudo, - .ops = .pseudo_mv, - .data = .{ .rr = .{ - .rd = reg, - .rs = .zero, - } }, - }); - } - try func.genSetReg(ty, reg, operand); return .{ reg, lock }; } @@ -1793,14 +1923,19 @@ fn elemOffset(func: *Func, index_ty: Type, index: MCValue, elem_size: u64) !Regi const lock = func.register_manager.lockRegAssumeUnused(reg); defer func.register_manager.unlockReg(lock); - const result = try func.binOp( + const result_reg, const result_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(result_lock); + + try func.genBinOp( .mul, .{ .register = reg }, index_ty, .{ .immediate = elem_size }, index_ty, + result_reg, ); - break :blk result.register; + + break :blk result_reg; }, } }; @@ -1892,7 +2027,7 @@ fn airIntCast(func: *Func, inst: Air.Inst.Index) !void { math.divCeil(u16, dst_int_info.bits, 64) catch unreachable == math.divCeil(u32, src_storage_bits, 64) catch unreachable and func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: { - const dst_mcv = try func.allocRegOrMem(inst, true); + const dst_mcv = try func.allocRegOrMem(dst_ty, inst, true); try func.genCopy(min_ty, dst_mcv, src_mcv); break :dst dst_mcv; }; @@ -1904,7 +2039,7 @@ fn airIntCast(func: *Func, inst: Air.Inst.Index) !void { break :result null; // TODO break :result dst_mcv; - } orelse return func.fail("TODO implement airIntCast from {} to {}", .{ + } orelse return func.fail("TODO: implement airIntCast from {} to {}", .{ src_ty.fmt(zcu), dst_ty.fmt(zcu), }); @@ -1948,7 +2083,7 @@ fn airNot(func: *Func, inst: Air.Inst.Index) !void { if (func.reuseOperand(inst, ty_op.operand, 0, operand) and operand == .register) operand.register else - (try func.allocRegOrMem(inst, true)).register; + (try func.allocRegOrMem(func.typeOfIndex(inst), inst, true)).register; _ = try func.addInst(.{ .tag = .pseudo, @@ -1970,106 +2105,6 @@ fn airNot(func: *Func, inst: Air.Inst.Index) !void { return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airMinMax( - func: *Func, - inst: Air.Inst.Index, - comptime tag: enum { - max, - min, - }, -) !void { - const zcu = func.bin_file.comp.module.?; - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.typeOf(bin_op.lhs); - const rhs_ty = func.typeOf(bin_op.rhs); - - const int_info = lhs_ty.intInfo(zcu); - - if (int_info.bits > 64) return func.fail("TODO: > 64 bit @min", .{}); - - const lhs_reg, const lhs_lock = blk: { - if (lhs == .register) break :blk .{ lhs.register, func.register_manager.lockReg(lhs.register) }; - - const lhs_reg, const lhs_lock = try func.allocReg(.int); - try func.genSetReg(lhs_ty, lhs_reg, lhs); - break :blk .{ lhs_reg, lhs_lock }; - }; - defer if (lhs_lock) |lock| func.register_manager.unlockReg(lock); - - const rhs_reg, const rhs_lock = blk: { - if (rhs == .register) break :blk .{ rhs.register, func.register_manager.lockReg(rhs.register) }; - - const rhs_reg, const rhs_lock = try func.allocReg(.int); - try func.genSetReg(rhs_ty, rhs_reg, rhs); - break :blk .{ rhs_reg, rhs_lock }; - }; - defer if (rhs_lock) |lock| func.register_manager.unlockReg(lock); - - const mask_reg, const mask_lock = try func.allocReg(.int); - defer func.register_manager.unlockReg(mask_lock); - - const result_reg, const result_lock = try func.allocReg(.int); - defer func.register_manager.unlockReg(result_lock); - - _ = try func.addInst(.{ - .tag = if (int_info.signedness == .unsigned) .sltu else .slt, - .ops = .rrr, - .data = .{ .r_type = .{ - .rd = mask_reg, - .rs1 = lhs_reg, - .rs2 = rhs_reg, - } }, - }); - - _ = try func.addInst(.{ - .tag = .sub, - .ops = .rrr, - .data = .{ .r_type = .{ - .rd = mask_reg, - .rs1 = .zero, - .rs2 = mask_reg, - } }, - }); - - _ = try func.addInst(.{ - .tag = .xor, - .ops = .rrr, - .data = .{ .r_type = .{ - .rd = result_reg, - .rs1 = lhs_reg, - .rs2 = rhs_reg, - } }, - }); - - _ = try func.addInst(.{ - .tag = .@"and", - .ops = .rrr, - .data = .{ .r_type = .{ - .rd = mask_reg, - .rs1 = result_reg, - .rs2 = mask_reg, - } }, - }); - - _ = try func.addInst(.{ - .tag = .xor, - .ops = .rrr, - .data = .{ .r_type = .{ - .rd = result_reg, - .rs1 = if (tag == .min) rhs_reg else lhs_reg, - .rs2 = mask_reg, - } }, - }); - - break :result .{ .register = result_reg }; - }; - return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - fn airSlice(func: *Func, inst: Air.Inst.Index) !void { const zcu = func.bin_file.comp.module.?; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -2094,417 +2129,487 @@ fn airSlice(func: *Func, inst: Air.Inst.Index) !void { } fn airBinOp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { + const zcu = func.bin_file.comp.module.?; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.typeOf(bin_op.lhs); - const rhs_ty = func.typeOf(bin_op.rhs); + const dst_mcv = try func.binOp(inst, tag, bin_op.lhs, bin_op.rhs); - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - break :result try func.binOp(tag, lhs, lhs_ty, rhs, rhs_ty); - }; - return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + const dst_ty = func.typeOfIndex(inst); + if (dst_ty.isAbiInt(zcu)) { + const abi_size: u32 = @intCast(dst_ty.abiSize(zcu)); + const bit_size: u32 = @intCast(dst_ty.bitSize(zcu)); + if (abi_size * 8 > bit_size) { + const dst_lock = switch (dst_mcv) { + .register => |dst_reg| func.register_manager.lockRegAssumeUnused(dst_reg), + else => null, + }; + defer if (dst_lock) |lock| func.register_manager.unlockReg(lock); + + if (dst_mcv.isRegister()) { + try func.truncateRegister(dst_ty, dst_mcv.getReg().?); + } else { + const tmp_reg, const tmp_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(tmp_lock); + + const hi_ty = try zcu.intType(.unsigned, @intCast((dst_ty.bitSize(zcu) - 1) % 64 + 1)); + const hi_mcv = dst_mcv.address().offset(@intCast(bit_size / 64 * 8)).deref(); + try func.genSetReg(hi_ty, tmp_reg, hi_mcv); + try func.truncateRegister(dst_ty, tmp_reg); + try func.genCopy(hi_ty, hi_mcv, .{ .register = tmp_reg }); + } + } + } + + return func.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none }); } fn binOp( func: *Func, - tag: Air.Inst.Tag, - lhs: MCValue, - lhs_ty: Type, - rhs: MCValue, - rhs_ty: Type, -) InnerError!MCValue { + maybe_inst: ?Air.Inst.Index, + air_tag: Air.Inst.Tag, + lhs_air: Air.Inst.Ref, + rhs_air: Air.Inst.Ref, +) !MCValue { + _ = maybe_inst; const zcu = func.bin_file.comp.module.?; + const lhs_ty = func.typeOf(lhs_air); + const rhs_ty = func.typeOf(rhs_air); - switch (tag) { - // Arithmetic operations on integers and floats - .add, - .sub, - .mul, - .div_float, + if (lhs_ty.isRuntimeFloat()) libcall: { + const float_bits = lhs_ty.floatBits(func.target.*); + const type_needs_libcall = switch (float_bits) { + 16 => true, + 32, 64 => false, + 80, 128 => true, + else => unreachable, + }; + switch (air_tag) { + .rem, .mod => {}, + else => if (!type_needs_libcall) break :libcall, + } + return func.fail("binOp libcall runtime-float ops", .{}); + } + + if (lhs_ty.bitSize(zcu) > 64) return func.fail("TODO: binOp >= 64 bits", .{}); + + const lhs_mcv = try func.resolveInst(lhs_air); + const rhs_mcv = try func.resolveInst(rhs_air); + + const class_for_dst_ty: abi.RegisterClass = switch (air_tag) { + // will always return int register no matter the input .cmp_eq, .cmp_neq, - .cmp_gt, - .cmp_gte, .cmp_lt, .cmp_lte, + .cmp_gt, + .cmp_gte, + => .int, + + else => func.typeRegClass(lhs_ty), + }; + + const dst_reg, const dst_lock = try func.allocReg(class_for_dst_ty); + defer func.register_manager.unlockReg(dst_lock); + + try func.genBinOp( + air_tag, + lhs_mcv, + lhs_ty, + rhs_mcv, + rhs_ty, + dst_reg, + ); + + return .{ .register = dst_reg }; +} + +/// Does the same thing as binOp however is meant to be used internally to the backend. +/// +/// The `dst_reg` argument is meant to be caller-locked. Asserts that the binOp result can be +/// fit into the register. +/// +/// Assumes that the `dst_reg` class is correct. +fn genBinOp( + func: *Func, + tag: Air.Inst.Tag, + lhs_mcv: MCValue, + lhs_ty: Type, + rhs_mcv: MCValue, + rhs_ty: Type, + dst_reg: Register, +) !void { + const zcu = func.bin_file.comp.module.?; + const bit_size = lhs_ty.bitSize(zcu); + assert(bit_size <= 64); + + const is_unsigned = lhs_ty.isUnsignedInt(zcu); + + const lhs_reg, const maybe_lhs_lock = try func.promoteReg(lhs_ty, lhs_mcv); + const rhs_reg, const maybe_rhs_lock = try func.promoteReg(rhs_ty, rhs_mcv); + + defer if (maybe_lhs_lock) |lock| func.register_manager.unlockReg(lock); + defer if (maybe_rhs_lock) |lock| func.register_manager.unlockReg(lock); + + switch (tag) { + .add, + .add_wrap, + .sub, + .sub_wrap, + .mul, + .mul_wrap, => { - assert(lhs_ty.eql(rhs_ty, zcu)); + if (!math.isPowerOfTwo(bit_size)) + return func.fail( + "TODO: genBinOp {s} non-pow 2, found {}", + .{ @tagName(tag), bit_size }, + ); + switch (lhs_ty.zigTypeTag(zcu)) { + .Int => { + const mir_tag: Mir.Inst.Tag = switch (tag) { + .add, .add_wrap => switch (bit_size) { + 8, 16, 64 => .add, + 32 => .addw, + else => unreachable, + }, + .sub, .sub_wrap => switch (bit_size) { + 8, 16, 32 => .subw, + 64 => .sub, + else => unreachable, + }, + .mul, .mul_wrap => switch (bit_size) { + 8, 16, 64 => .mul, + 32 => .mulw, + else => unreachable, + }, + else => unreachable, + }; + + _ = try func.addInst(.{ + .tag = mir_tag, + .ops = .rrr, + .data = .{ + .r_type = .{ + .rd = dst_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + }, + }, + }); + + // truncate when the instruction is larger than the bit size. + switch (bit_size) { + 8, 16 => try func.truncateRegister(lhs_ty, dst_reg), + 32 => {}, // addw/subw affects the first 32-bits + 64 => {}, // add/sub affects the entire register + else => unreachable, + } + }, .Float => { - const float_bits = lhs_ty.floatBits(zcu.getTarget()); - const float_reg_bits: u32 = if (func.hasFeature(.d)) 64 else 32; - if (float_bits <= float_reg_bits) { - return func.binOpFloat(tag, lhs, lhs_ty, rhs, rhs_ty); - } else { - return func.fail("TODO: binary operations for floats with bits > {d}", .{float_reg_bits}); - } + const mir_tag: Mir.Inst.Tag = switch (tag) { + .add => switch (bit_size) { + 32 => .fadds, + 64 => .faddd, + else => unreachable, + }, + .sub => switch (bit_size) { + 32 => .fsubs, + 64 => .fsubd, + else => unreachable, + }, + .mul => switch (bit_size) { + 32 => .fmuls, + 64 => .fmuld, + else => unreachable, + }, + else => unreachable, + }; + + _ = try func.addInst(.{ + .tag = mir_tag, + .ops = .rrr, + .data = .{ + .r_type = .{ + .rd = dst_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + }, + }, + }); }, - .Vector => return func.fail("TODO binary operations on vectors", .{}), - .Int, .Enum, .ErrorSet => { - const int_info = lhs_ty.intInfo(zcu); - if (int_info.bits <= 64) { - return func.binOpRegister(tag, lhs, lhs_ty, rhs, rhs_ty); - } else { - return func.fail("TODO binary operations on int with bits > 64", .{}); - } - }, - else => |x| return func.fail("TOOD: binOp {s}", .{@tagName(x)}), + else => unreachable, } }, .ptr_add, .ptr_sub, => { - switch (lhs_ty.zigTypeTag(zcu)) { - .Pointer => { - const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize(zcu)) { - .One => ptr_ty.childType(zcu).childType(zcu), // ptr to array, so get array element type - else => ptr_ty.childType(zcu), - }; - const elem_size = elem_ty.abiSize(zcu); + const tmp_reg = try func.copyToTmpRegister(rhs_ty, .{ .register = rhs_reg }); + const tmp_mcv = MCValue{ .register = tmp_reg }; + const tmp_lock = func.register_manager.lockRegAssumeUnused(tmp_reg); + defer func.register_manager.unlockReg(tmp_lock); - if (elem_size == 1) { - const base_tag: Air.Inst.Tag = switch (tag) { - .ptr_add => .add, - .ptr_sub => .sub, - else => unreachable, - }; + // RISC-V has no immediate mul, so we copy the size to a temporary register + const elem_size = lhs_ty.elemType2(zcu).abiSize(zcu); + const elem_size_reg = try func.copyToTmpRegister(Type.usize, .{ .immediate = elem_size }); - return try func.binOpRegister(base_tag, lhs, lhs_ty, rhs, rhs_ty); - } else { - const offset = try func.binOp( - .mul, - rhs, - Type.usize, - .{ .immediate = elem_size }, - Type.usize, - ); + try func.genBinOp( + .mul, + tmp_mcv, + rhs_ty, + .{ .register = elem_size_reg }, + Type.usize, + tmp_reg, + ); - const addr = try func.binOp( - tag, - lhs, - Type.manyptr_u8, - offset, - Type.usize, - ); - return addr; - } + try func.genBinOp( + switch (tag) { + .ptr_add => .add, + .ptr_sub => .sub, + else => unreachable, }, - else => unreachable, + lhs_mcv, + Type.usize, // we know it's a pointer, so it'll be usize. + tmp_mcv, + Type.usize, + dst_reg, + ); + }, + + .bit_and, + .bit_or, + .bool_and, + .bool_or, + => { + _ = try func.addInst(.{ + .tag = switch (tag) { + .bit_and, .bool_and => .@"and", + .bit_or, .bool_or => .@"or", + else => unreachable, + }, + .ops = .rrr, + .data = .{ + .r_type = .{ + .rd = dst_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + }, + }, + }); + + switch (tag) { + .bool_and, + .bool_or, + => try func.truncateRegister(Type.bool, dst_reg), + else => {}, } }, - // These instructions have unsymteric bit sizes on RHS and LHS. - .shr, - .shl, + .div_trunc, => { - switch (lhs_ty.zigTypeTag(zcu)) { - .Float => return func.fail("TODO binary operations on floats", .{}), - .Vector => return func.fail("TODO binary operations on vectors", .{}), - .Int => { - const int_info = lhs_ty.intInfo(zcu); - if (int_info.bits <= 64) { - return func.binOpRegister(tag, lhs, lhs_ty, rhs, rhs_ty); - } else { - return func.fail("TODO binary operations on int with bits > 64", .{}); - } + if (!math.isPowerOfTwo(bit_size)) + return func.fail( + "TODO: genBinOp {s} non-pow 2, found {}", + .{ @tagName(tag), bit_size }, + ); + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .div_trunc => switch (bit_size) { + 8, 16, 32 => if (is_unsigned) .divuw else .divw, + 64 => if (is_unsigned) .divu else .div, + else => unreachable, }, else => unreachable, - } - }, - else => return func.fail("TODO binOp {}", .{tag}), - } -} + }; -fn binOpRegister( - func: *Func, - tag: Air.Inst.Tag, - lhs: MCValue, - lhs_ty: Type, - rhs: MCValue, - rhs_ty: Type, -) !MCValue { - const lhs_reg, const lhs_lock = try func.promoteReg(lhs_ty, lhs, .{ .zero = true }); - defer if (lhs_lock) |lock| func.register_manager.unlockReg(lock); - - const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs, .{ .zero = true }); - defer if (rhs_lock) |lock| func.register_manager.unlockReg(lock); - - const dest_reg, const dest_lock = try func.allocReg(.int); - defer func.register_manager.unlockReg(dest_lock); - - const mir_tag: Mir.Inst.Tag = switch (tag) { - .add => .add, - .sub => .sub, - .mul => .mul, - - .shl => .sllw, - .shr => .srlw, - - .cmp_eq, - .cmp_neq, - .cmp_gt, - .cmp_gte, - .cmp_lt, - .cmp_lte, - => .pseudo, - - else => return func.fail("TODO: binOpRegister {s}", .{@tagName(tag)}), - }; - - switch (mir_tag) { - .add, - .sub, - .mul, - .sllw, - .srlw, - => { _ = try func.addInst(.{ .tag = mir_tag, .ops = .rrr, .data = .{ .r_type = .{ - .rd = dest_reg, + .rd = dst_reg, .rs1 = lhs_reg, .rs2 = rhs_reg, }, }, }); + + if (!is_unsigned) { + // truncate when the instruction is larger than the bit size. + switch (bit_size) { + 8, 16 => try func.truncateRegister(lhs_ty, dst_reg), + 32 => {}, // divw affects the first 32-bits + 64 => {}, // div affects the entire register + else => unreachable, + } + } }, - .pseudo => { - const pseudo_op = switch (tag) { - .cmp_eq, - .cmp_neq, - .cmp_gt, - .cmp_gte, - .cmp_lt, - .cmp_lte, - => .pseudo_compare, + .shr, + .shr_exact, + .shl, + .shl_exact, + => { + if (!math.isPowerOfTwo(bit_size)) + return func.fail( + "TODO: genBinOp {s} non-pow 2, found {}", + .{ @tagName(tag), bit_size }, + ); + + // it's important that the shift amount is exact + try func.truncateRegister(rhs_ty, rhs_reg); + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .shl, .shl_exact => switch (bit_size) { + 8, 16, 64 => .sll, + 32 => .sllw, + else => unreachable, + }, + .shr, .shr_exact => switch (bit_size) { + 8, 16, 64 => .srl, + 32 => .srlw, + else => unreachable, + }, else => unreachable, }; - _ = try func.addInst(.{ - .tag = .pseudo, - .ops = pseudo_op, - .data = .{ - .compare = .{ - .rd = dest_reg, - .rs1 = lhs_reg, - .rs2 = rhs_reg, - .op = switch (tag) { - .cmp_eq => .eq, - .cmp_neq => .neq, - .cmp_gt => .gt, - .cmp_gte => .gte, - .cmp_lt => .lt, - .cmp_lte => .lte, - else => unreachable, - }, - .size = func.memSize(lhs_ty), - }, - }, - }); - }, - - else => unreachable, - } - - return MCValue{ .register = dest_reg }; -} - -fn binOpFloat( - func: *Func, - tag: Air.Inst.Tag, - lhs: MCValue, - lhs_ty: Type, - rhs: MCValue, - rhs_ty: Type, -) !MCValue { - const zcu = func.bin_file.comp.module.?; - const float_bits = lhs_ty.floatBits(zcu.getTarget()); - - const lhs_reg, const lhs_lock = try func.promoteReg(lhs_ty, lhs, .{}); - defer if (lhs_lock) |lock| func.register_manager.unlockReg(lock); - - const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs, .{}); - defer if (rhs_lock) |lock| func.register_manager.unlockReg(lock); - - const mir_tag: Mir.Inst.Tag = switch (tag) { - .add => if (float_bits == 32) .fadds else .faddd, - .sub => if (float_bits == 32) .fsubs else .fsubd, - .mul => if (float_bits == 32) .fmuls else .fmuld, - .div_float => if (float_bits == 32) .fdivs else .fdivd, - - .cmp_eq, - .cmp_neq, - .cmp_gt, - .cmp_gte, - .cmp_lt, - .cmp_lte, - => .pseudo, - - else => return func.fail("TODO: binOpFloat mir_tag {s}", .{@tagName(tag)}), - }; - - const return_class: abi.RegisterClass = switch (tag) { - .add, - .sub, - .mul, - .div_float, - => .float, - - .cmp_eq, - .cmp_neq, - .cmp_gt, - .cmp_gte, - .cmp_lt, - .cmp_lte, - => .int, - else => unreachable, - }; - - const dest_reg, const dest_lock = try func.allocReg(return_class); - defer func.register_manager.unlockReg(dest_lock); - - switch (tag) { - .add, - .sub, - .mul, - .div_float, - => { _ = try func.addInst(.{ .tag = mir_tag, .ops = .rrr, .data = .{ .r_type = .{ - .rd = dest_reg, + .rd = dst_reg, .rs1 = lhs_reg, .rs2 = rhs_reg, } }, }); + + switch (bit_size) { + 8, 16 => try func.truncateRegister(lhs_ty, dst_reg), + 32 => {}, + 64 => {}, + else => unreachable, + } }, + // TODO: move the isel logic out of lower and into here. .cmp_eq, .cmp_neq, - .cmp_gt, - .cmp_gte, .cmp_lt, .cmp_lte, + .cmp_gt, + .cmp_gte, => { _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_compare, .data = .{ .compare = .{ - .rd = dest_reg, - .rs1 = lhs_reg, - .rs2 = rhs_reg, .op = switch (tag) { .cmp_eq => .eq, .cmp_neq => .neq, - .cmp_gt => .gt, - .cmp_gte => .gte, .cmp_lt => .lt, .cmp_lte => .lte, + .cmp_gt => .gt, + .cmp_gte => .gte, else => unreachable, }, - .size = func.memSize(lhs_ty), + .rd = dst_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + .ty = lhs_ty, }, }, }); }, - else => unreachable, - } + // A branchless @min/@max sequence. + // + // Assume that a0 and a1 are the lhs and rhs respectively. + // Also assume that a2 is the destination register. + // + // Algorithm: + // slt s0, a0, a1 + // sub s0, zero, s0 + // xor a2, a0, a1 + // and s0, a2, s0 + // xor a2, a0, s0 # a0 is @min, a1 is @max + // + // "slt s0, a0, a1" will set s0 to 1 if a0 is less than a1, and 1 otherwise. + // + // "sub s0, zero, s0" will set all the bits of s0 to 1 if it was 1, otherwise it'll remain at 0. + // + // "xor a2, a0, a1" stores the bitwise XOR of a0 and a1 in a2. Effectively getting the difference between them. + // + // "and a0, a2, s0" here we mask the result of the XOR with the negated s0. If a0 < a1, s0 is -1, which + // doesn't change the bits of a2. If a0 >= a1, s0 is 0, nullifying a2. + // + // "xor a2, a0, s0" the final XOR operation adjusts a2 to be the minimum value of a0 and a1. If a0 was less than + // a1, s0 was -1, flipping all the bits in a2 and effectively restoring a0. If a0 was greater than or equal to a1, + // s0 was 0, leaving a2 unchanged as a0. + .min, .max => { + const int_info = lhs_ty.intInfo(zcu); - return MCValue{ .register = dest_reg }; + const mask_reg, const mask_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(mask_lock); + + _ = try func.addInst(.{ + .tag = if (int_info.signedness == .unsigned) .sltu else .slt, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = mask_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + } }, + }); + + _ = try func.addInst(.{ + .tag = .sub, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = mask_reg, + .rs1 = .zero, + .rs2 = mask_reg, + } }, + }); + + _ = try func.addInst(.{ + .tag = .xor, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = dst_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + } }, + }); + + _ = try func.addInst(.{ + .tag = .@"and", + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = mask_reg, + .rs1 = dst_reg, + .rs2 = mask_reg, + } }, + }); + + _ = try func.addInst(.{ + .tag = .xor, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = dst_reg, + .rs1 = if (tag == .min) rhs_reg else lhs_reg, + .rs2 = mask_reg, + } }, + }); + }, + else => return func.fail("TODO: genBinOp {}", .{tag}), + } } fn airPtrArithmetic(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.typeOf(bin_op.lhs); - const rhs_ty = func.typeOf(bin_op.rhs); - - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - break :result try func.binOp(tag, lhs, lhs_ty, rhs, rhs_ty); - }; - return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn airAddWrap(func: *Func, inst: Air.Inst.Index) !void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement addwrap for {}", .{func.target.cpu.arch}); - return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn airAddSat(func: *Func, inst: Air.Inst.Index) !void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement add_sat for {}", .{func.target.cpu.arch}); - return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn airSubWrap(func: *Func, inst: Air.Inst.Index) !void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - // RISCV arthemtic instructions already wrap, so this is simply a sub binOp with - // no overflow checks. - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.typeOf(bin_op.lhs); - const rhs_ty = func.typeOf(bin_op.rhs); - - break :result try func.binOp(.sub, lhs, lhs_ty, rhs, rhs_ty); - }; - return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn airSubSat(func: *Func, inst: Air.Inst.Index) !void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement sub_sat for {}", .{func.target.cpu.arch}); - return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn airMul(func: *Func, inst: Air.Inst.Index) !void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.typeOf(bin_op.lhs); - const rhs_ty = func.typeOf(bin_op.rhs); - - break :result try func.binOp(.mul, lhs, lhs_ty, rhs, rhs_ty); - }; - return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn airDiv(func: *Func, inst: Air.Inst.Index) !void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.typeOf(bin_op.lhs); - const rhs_ty = func.typeOf(bin_op.rhs); - - break :result try func.binOp(.div_float, lhs, lhs_ty, rhs, rhs_ty); - }; - return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn airMulWrap(func: *Func, inst: Air.Inst.Index) !void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement mulwrap for {}", .{func.target.cpu.arch}); - return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn airMulSat(func: *Func, inst: Air.Inst.Index) !void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement mul_sat for {}", .{func.target.cpu.arch}); - return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + const dst_mcv = try func.binOp(inst, tag, bin_op.lhs, bin_op.rhs); + return func.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void { @@ -2513,19 +2618,16 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void { const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - const lhs = try func.resolveInst(extra.lhs); - const rhs = try func.resolveInst(extra.rhs); const lhs_ty = func.typeOf(extra.lhs); - const rhs_ty = func.typeOf(extra.rhs); const int_info = lhs_ty.intInfo(zcu); const tuple_ty = func.typeOfIndex(inst); - const result_mcv = try func.allocRegOrMem(inst, false); + const result_mcv = try func.allocRegOrMem(tuple_ty, inst, false); const offset = result_mcv.load_frame; if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { - const add_result = try func.binOp(.add, lhs, lhs_ty, rhs, rhs_ty); + const add_result = try func.binOp(null, .add, extra.lhs, extra.rhs); const add_result_reg = try func.copyToTmpRegister(lhs_ty, add_result); const add_result_reg_lock = func.register_manager.lockRegAssumeUnused(add_result_reg); defer func.register_manager.unlockReg(add_result_reg_lock); @@ -2542,7 +2644,7 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void { .i_type = .{ .rd = shift_reg, .rs1 = add_result_reg, - .imm12 = Immediate.s(shift_amount), + .imm12 = Immediate.u(shift_amount), }, }, }); @@ -2554,7 +2656,7 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void { .i_type = .{ .rd = shift_reg, .rs1 = shift_reg, - .imm12 = Immediate.s(shift_amount), + .imm12 = Immediate.u(shift_amount), }, }, }); @@ -2566,18 +2668,23 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void { add_result, ); - const overflow_mcv = try func.binOp( + const overflow_reg, const overflow_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(overflow_lock); + + try func.genBinOp( .cmp_neq, .{ .register = shift_reg }, lhs_ty, .{ .register = add_result_reg }, lhs_ty, + overflow_reg, ); + try func.genSetMem( .{ .frame = offset.index }, offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), Type.u1, - overflow_mcv, + .{ .register = overflow_reg }, ); break :result result_mcv; @@ -2602,49 +2709,64 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void { const int_info = lhs_ty.intInfo(zcu); - if (!math.isPowerOfTwo(int_info.bits) or !(int_info.bits >= 8)) { + if (!math.isPowerOfTwo(int_info.bits) or int_info.bits < 8) { return func.fail("TODO: airSubWithOverflow non-power of 2 and less than 8 bits", .{}); } + if (int_info.bits > 64) { + return func.fail("TODO: airSubWithOverflow > 64 bits", .{}); + } + const tuple_ty = func.typeOfIndex(inst); - const result_mcv = try func.allocRegOrMem(inst, false); + const result_mcv = try func.allocRegOrMem(tuple_ty, inst, false); const offset = result_mcv.load_frame; - const lhs_reg, const lhs_lock = try func.promoteReg(lhs_ty, lhs, .{}); + const dest_mcv = try func.binOp(null, .sub, extra.lhs, extra.rhs); + assert(dest_mcv == .register); + const dest_reg = dest_mcv.register; + + try func.genSetMem( + .{ .frame = offset.index }, + offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))), + lhs_ty, + .{ .register = dest_reg }, + ); + + const lhs_reg, const lhs_lock = try func.promoteReg(lhs_ty, lhs); defer if (lhs_lock) |lock| func.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs, .{}); + const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs); defer if (rhs_lock) |lock| func.register_manager.unlockReg(lock); - const dest_reg, const dest_lock = try func.allocReg(.int); - defer func.register_manager.unlockReg(dest_lock); + const overflow_reg = try func.copyToTmpRegister(Type.usize, .{ .immediate = 0 }); + + const overflow_lock = func.register_manager.lockRegAssumeUnused(overflow_reg); + defer func.register_manager.unlockReg(overflow_lock); switch (int_info.signedness) { - .unsigned => return func.fail("TODO: airSubWithOverflow unsigned", .{}), + .unsigned => { + _ = try func.addInst(.{ + .tag = .sltu, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = overflow_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + } }, + }); + + try func.genSetMem( + .{ .frame = offset.index }, + offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), + Type.u1, + .{ .register = overflow_reg }, + ); + + break :result result_mcv; + }, .signed => { switch (int_info.bits) { 64 => { - // result - _ = try func.addInst(.{ - .tag = .sub, - .ops = .rrr, - .data = .{ .r_type = .{ - .rd = dest_reg, - .rs1 = lhs_reg, - .rs2 = rhs_reg, - } }, - }); - - try func.genSetMem( - .{ .frame = offset.index }, - offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))), - lhs_ty, - .{ .register = dest_reg }, - ); - - // overflow check - const overflow_reg = try func.copyToTmpRegister(Type.usize, .{ .immediate = 0 }); - _ = try func.addInst(.{ .tag = .slt, .ops = .rrr, @@ -2675,19 +2797,20 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void { } }, }); - const overflow_mcv = try func.binOp( + try func.genBinOp( .cmp_neq, .{ .register = overflow_reg }, Type.usize, .{ .register = rhs_reg }, Type.usize, + overflow_reg, ); try func.genSetMem( .{ .frame = offset.index }, offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), Type.u1, - overflow_mcv, + .{ .register = overflow_reg }, ); break :result result_mcv; @@ -2702,16 +2825,42 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void { } fn airMulWithOverflow(func: *Func, inst: Air.Inst.Index) !void { - //const tag = func.air.instructions.items(.tag)[@intFromEnum(inst)]; + const zcu = func.bin_file.comp.module.?; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; - const zcu = func.bin_file.comp.module.?; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { const lhs = try func.resolveInst(extra.lhs); const rhs = try func.resolveInst(extra.rhs); const lhs_ty = func.typeOf(extra.lhs); const rhs_ty = func.typeOf(extra.rhs); + const tuple_ty = func.typeOfIndex(inst); + + // genSetReg needs to support register_offset src_mcv for this to be true. + const result_mcv = try func.allocRegOrMem(tuple_ty, inst, false); + + const result_off: i32 = @intCast(tuple_ty.structFieldOffset(0, zcu)); + const overflow_off: i32 = @intCast(tuple_ty.structFieldOffset(1, zcu)); + + const dest_reg, const dest_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(dest_lock); + + try func.genBinOp( + .mul, + lhs, + lhs_ty, + rhs, + rhs_ty, + dest_reg, + ); + + try func.genCopy( + lhs_ty, + result_mcv.offset(result_off), + .{ .register = dest_reg }, + ); + switch (lhs_ty.zigTypeTag(zcu)) { else => |x| return func.fail("TODO: airMulWithOverflow {s}", .{@tagName(x)}), .Int => { @@ -2719,74 +2868,53 @@ fn airMulWithOverflow(func: *Func, inst: Air.Inst.Index) !void { const int_info = lhs_ty.intInfo(zcu); switch (int_info.bits) { 1...32 => { - if (func.hasFeature(.m)) { - const dest = try func.binOp(.mul, lhs, lhs_ty, rhs, rhs_ty); + if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { + if (int_info.signedness == .unsigned) { + switch (int_info.bits) { + 1...8 => { + const max_val = std.math.pow(u16, 2, int_info.bits) - 1; - const add_result_lock = func.register_manager.lockRegAssumeUnused(dest.register); - defer func.register_manager.unlockReg(add_result_lock); + const add_reg, const add_lock = try func.promoteReg(lhs_ty, lhs); + defer if (add_lock) |lock| func.register_manager.unlockReg(lock); - const tuple_ty = func.typeOfIndex(inst); + const overflow_reg, const overflow_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(overflow_lock); - const result_mcv = try func.allocRegOrMem(inst, true); + _ = try func.addInst(.{ + .tag = .andi, + .ops = .rri, + .data = .{ .i_type = .{ + .rd = overflow_reg, + .rs1 = add_reg, + .imm12 = Immediate.s(max_val), + } }, + }); - const result_off: i32 = @intCast(tuple_ty.structFieldOffset(0, zcu)); - const overflow_off: i32 = @intCast(tuple_ty.structFieldOffset(1, zcu)); + try func.genBinOp( + .cmp_neq, + .{ .register = overflow_reg }, + lhs_ty, + .{ .register = add_reg }, + lhs_ty, + overflow_reg, + ); - try func.genCopy( - lhs_ty, - result_mcv.offset(result_off), - dest, - ); + try func.genCopy( + lhs_ty, + result_mcv.offset(overflow_off), + .{ .register = overflow_reg }, + ); - if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { - if (int_info.signedness == .unsigned) { - switch (int_info.bits) { - 1...8 => { - const max_val = std.math.pow(u16, 2, int_info.bits) - 1; + break :result result_mcv; + }, - const add_reg, const add_lock = try func.promoteReg(lhs_ty, lhs, .{}); - defer if (add_lock) |lock| func.register_manager.unlockReg(lock); - - const overflow_reg, const overflow_lock = try func.allocReg(.int); - defer func.register_manager.unlockReg(overflow_lock); - - _ = try func.addInst(.{ - .tag = .andi, - .ops = .rri, - .data = .{ .i_type = .{ - .rd = overflow_reg, - .rs1 = add_reg, - .imm12 = Immediate.s(max_val), - } }, - }); - - const overflow_mcv = try func.binOp( - .cmp_neq, - .{ .register = overflow_reg }, - lhs_ty, - .{ .register = add_reg }, - lhs_ty, - ); - - try func.genCopy( - lhs_ty, - result_mcv.offset(overflow_off), - overflow_mcv, - ); - - break :result result_mcv; - }, - - else => return func.fail("TODO: airMulWithOverflow check for size {d}", .{int_info.bits}), - } - } else { - return func.fail("TODO: airMulWithOverflow calculate carry for signed addition", .{}); + else => return func.fail("TODO: airMulWithOverflow check for size {d}", .{int_info.bits}), } } else { - return func.fail("TODO: airMulWithOverflow with < 8 bits or non-pow of 2", .{}); + return func.fail("TODO: airMulWithOverflow calculate carry for signed addition", .{}); } } else { - return func.fail("TODO: emulate mul for targets without M feature", .{}); + return func.fail("TODO: airMulWithOverflow with < 8 bits or non-pow of 2", .{}); } }, else => return func.fail("TODO: airMulWithOverflow larger than 32-bit mul", .{}), @@ -2799,123 +2927,32 @@ fn airMulWithOverflow(func: *Func, inst: Air.Inst.Index) !void { } fn airShlWithOverflow(func: *Func, inst: Air.Inst.Index) !void { - _ = inst; - return func.fail("TODO implement airShlWithOverflow for {}", .{func.target.cpu.arch}); -} - -fn airRem(func: *Func, inst: Air.Inst.Index) !void { const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement rem for {}", .{func.target.cpu.arch}); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airShlWithOverflow", .{}); return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airMod(func: *Func, inst: Air.Inst.Index) !void { +fn airAddSat(func: *Func, inst: Air.Inst.Index) !void { const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement zcu for {}", .{func.target.cpu.arch}); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airAddSat", .{}); return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airBitAnd(func: *Func, inst: Air.Inst.Index) !void { +fn airSubSat(func: *Func, inst: Air.Inst.Index) !void { const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); - - const lhs_ty = func.typeOf(bin_op.lhs); - const rhs_ty = func.typeOf(bin_op.rhs); - - const lhs_reg, const lhs_lock = try func.promoteReg(lhs_ty, lhs, .{}); - defer if (lhs_lock) |lock| func.register_manager.unlockReg(lock); - - const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs, .{}); - defer if (rhs_lock) |lock| func.register_manager.unlockReg(lock); - - const dest_reg, const dest_lock = try func.allocReg(.int); - defer func.register_manager.unlockReg(dest_lock); - - _ = try func.addInst(.{ - .tag = .@"and", - .ops = .rrr, - .data = .{ .r_type = .{ - .rd = dest_reg, - .rs1 = lhs_reg, - .rs2 = rhs_reg, - } }, - }); - - break :result .{ .register = dest_reg }; - }; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airSubSat", .{}); return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airBitOr(func: *Func, inst: Air.Inst.Index) !void { +fn airMulSat(func: *Func, inst: Air.Inst.Index) !void { const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); - - const lhs_ty = func.typeOf(bin_op.lhs); - const rhs_ty = func.typeOf(bin_op.rhs); - - const lhs_reg, const lhs_lock = try func.promoteReg(lhs_ty, lhs, .{}); - defer if (lhs_lock) |lock| func.register_manager.unlockReg(lock); - - const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs, .{}); - defer if (rhs_lock) |lock| func.register_manager.unlockReg(lock); - - const dest_reg, const dest_lock = try func.allocReg(.int); - defer func.register_manager.unlockReg(dest_lock); - - _ = try func.addInst(.{ - .tag = .@"or", - .ops = .rrr, - .data = .{ .r_type = .{ - .rd = dest_reg, - .rs1 = lhs_reg, - .rs2 = rhs_reg, - } }, - }); - - break :result .{ .register = dest_reg }; - }; - return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn airXor(func: *Func, inst: Air.Inst.Index) !void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement xor for {}", .{func.target.cpu.arch}); - return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn airShl(func: *Func, inst: Air.Inst.Index) !void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.typeOf(bin_op.lhs); - const rhs_ty = func.typeOf(bin_op.rhs); - - break :result try func.binOp(.shl, lhs, lhs_ty, rhs, rhs_ty); - }; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airMulSat", .{}); return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airShlSat(func: *Func, inst: Air.Inst.Index) !void { const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement shl_sat for {}", .{func.target.cpu.arch}); - return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn airShr(func: *Func, inst: Air.Inst.Index) !void { - const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.typeOf(bin_op.lhs); - const rhs_ty = func.typeOf(bin_op.rhs); - - break :result try func.binOp(.shr, lhs, lhs_ty, rhs, rhs_ty); - }; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airShlSat", .{}); return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -2935,7 +2972,7 @@ fn airOptionalPayload(func: *Func, inst: Air.Inst.Index) !void { break :result opt_mcv; } - const pl_mcv = try func.allocRegOrMem(inst, true); + const pl_mcv = try func.allocRegOrMem(pl_ty, inst, true); try func.genCopy(pl_ty, pl_mcv, opt_mcv); break :result pl_mcv; }; @@ -2978,15 +3015,15 @@ fn airUnwrapErrErr(func: *Func, inst: Air.Inst.Index) !void { const eu_lock = func.register_manager.lockReg(reg); defer if (eu_lock) |lock| func.register_manager.unlockReg(lock); - var result = try func.copyToNewRegister(inst, operand); - + const result = try func.copyToNewRegister(inst, operand); if (err_off > 0) { - result = try func.binOp( + try func.genBinOp( .shr, result, err_union_ty, .{ .immediate = @as(u6, @intCast(err_off * 8)) }, Type.u8, + result.register, ); } break :result result; @@ -3031,19 +3068,18 @@ fn genUnwrapErrUnionPayloadMir( const eu_lock = func.register_manager.lockReg(reg); defer if (eu_lock) |lock| func.register_manager.unlockReg(lock); - var result: MCValue = .{ .register = try func.copyToTmpRegister(err_union_ty, err_union) }; - + const result_reg = try func.copyToTmpRegister(err_union_ty, err_union); if (payload_off > 0) { - result = try func.binOp( + try func.genBinOp( .shr, - result, + .{ .register = result_reg }, err_union_ty, .{ .immediate = @as(u6, @intCast(payload_off * 8)) }, Type.u8, + result_reg, ); } - - break :result result; + break :result .{ .register = result_reg }; }, else => return func.fail("TODO implement genUnwrapErrUnionPayloadMir for {}", .{err_union}), } @@ -3108,7 +3144,7 @@ fn airWrapOptional(func: *Func, inst: Air.Inst.Index) !void { }; defer if (pl_lock) |lock| func.register_manager.unlockReg(lock); - const opt_mcv = try func.allocRegOrMem(inst, true); + const opt_mcv = try func.allocRegOrMem(opt_ty, inst, true); try func.genCopy(pl_ty, opt_mcv, pl_mcv); if (!same_repr) { @@ -3237,7 +3273,7 @@ fn airSlicePtr(func: *Func, inst: Air.Inst.Index) !void { const src_mcv = try func.resolveInst(ty_op.operand); if (func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; - const dst_mcv = try func.allocRegOrMem(inst, true); + const dst_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, true); const dst_ty = func.typeOfIndex(inst); try func.genCopy(dst_ty, dst_mcv, src_mcv); break :result dst_mcv; @@ -3249,6 +3285,8 @@ fn airSliceLen(func: *Func, inst: Air.Inst.Index) !void { const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { const src_mcv = try func.resolveInst(ty_op.operand); + const ty = func.typeOfIndex(inst); + switch (src_mcv) { .load_frame => |frame_addr| { const len_mcv: MCValue = .{ .load_frame = .{ @@ -3257,7 +3295,7 @@ fn airSliceLen(func: *Func, inst: Air.Inst.Index) !void { } }; if (func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result len_mcv; - const dst_mcv = try func.allocRegOrMem(inst, true); + const dst_mcv = try func.allocRegOrMem(ty, inst, true); try func.genCopy(Type.usize, dst_mcv, len_mcv); break :result dst_mcv; }, @@ -3266,7 +3304,7 @@ fn airSliceLen(func: *Func, inst: Air.Inst.Index) !void { if (func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result len_mcv; - const dst_mcv = try func.allocRegOrMem(inst, true); + const dst_mcv = try func.allocRegOrMem(ty, inst, true); try func.genCopy(Type.usize, dst_mcv, len_mcv); break :result dst_mcv; }, @@ -3289,41 +3327,19 @@ fn airPtrSlicePtrPtr(func: *Func, inst: Air.Inst.Index) !void { } fn airSliceElemVal(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; - const is_volatile = false; // TODO + const mod = func.bin_file.comp.module.?; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - if (!is_volatile and func.liveness.isUnused(inst)) return func.finishAir( - inst, - .unreach, - .{ bin_op.lhs, bin_op.rhs, .none }, - ); const result: MCValue = result: { - const slice_mcv = try func.resolveInst(bin_op.lhs); - const index_mcv = try func.resolveInst(bin_op.rhs); + const elem_ty = func.typeOfIndex(inst); + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; const slice_ty = func.typeOf(bin_op.lhs); - - const slice_ptr_field_type = slice_ty.slicePtrFieldType(zcu); - - const index_lock: ?RegisterLock = if (index_mcv == .register) - func.register_manager.lockRegAssumeUnused(index_mcv.register) - else - null; - defer if (index_lock) |reg| func.register_manager.unlockReg(reg); - - const base_mcv: MCValue = switch (slice_mcv) { - .load_frame, - .load_symbol, - => .{ .register = try func.copyToTmpRegister(slice_ptr_field_type, slice_mcv) }, - else => return func.fail("TODO slice_elem_val when slice is {}", .{slice_mcv}), - }; - - const dest = try func.allocRegOrMem(inst, true); - const addr = try func.binOp(.ptr_add, base_mcv, slice_ptr_field_type, index_mcv, Type.usize); - try func.load(dest, addr, slice_ptr_field_type); - - break :result dest; + const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); + const elem_ptr = try func.genSliceElemPtr(bin_op.lhs, bin_op.rhs); + const dst_mcv = try func.allocRegOrMem(elem_ty, inst, false); + try func.load(dst_mcv, elem_ptr, slice_ptr_field_type); + break :result dst_mcv; }; return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -3331,14 +3347,58 @@ fn airSliceElemVal(func: *Func, inst: Air.Inst.Index) !void { fn airSliceElemPtr(func: *Func, inst: Air.Inst.Index) !void { const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement slice_elem_ptr for {}", .{func.target.cpu.arch}); - return func.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); + const dst_mcv = try func.genSliceElemPtr(extra.lhs, extra.rhs); + return func.finishAir(inst, dst_mcv, .{ extra.lhs, extra.rhs, .none }); +} + +fn genSliceElemPtr(func: *Func, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { + const zcu = func.bin_file.comp.module.?; + const slice_ty = func.typeOf(lhs); + const slice_mcv = try func.resolveInst(lhs); + const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) { + .register => |reg| func.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (slice_mcv_lock) |lock| func.register_manager.unlockReg(lock); + + const elem_ty = slice_ty.childType(zcu); + const elem_size = elem_ty.abiSize(zcu); + + const index_ty = func.typeOf(rhs); + const index_mcv = try func.resolveInst(rhs); + const index_mcv_lock: ?RegisterLock = switch (index_mcv) { + .register => |reg| func.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (index_mcv_lock) |lock| func.register_manager.unlockReg(lock); + + const offset_reg = try func.elemOffset(index_ty, index_mcv, elem_size); + const offset_reg_lock = func.register_manager.lockRegAssumeUnused(offset_reg); + defer func.register_manager.unlockReg(offset_reg_lock); + + const addr_reg, const addr_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(addr_lock); + try func.genSetReg(Type.usize, addr_reg, slice_mcv); + + _ = try func.addInst(.{ + .tag = .add, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = addr_reg, + .rs1 = addr_reg, + .rs2 = offset_reg, + } }, + }); + + return .{ .register = addr_reg }; } fn airArrayElemVal(func: *Func, inst: Air.Inst.Index) !void { const zcu = func.bin_file.comp.module.?; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const result_ty = func.typeOfIndex(inst); + const array_ty = func.typeOf(bin_op.lhs); const array_mcv = try func.resolveInst(bin_op.lhs); @@ -3367,7 +3427,7 @@ fn airArrayElemVal(func: *Func, inst: Air.Inst.Index) !void { const offset_lock = func.register_manager.lockRegAssumeUnused(offset_reg); defer func.register_manager.unlockReg(offset_lock); - const dst_mcv = try func.allocRegOrMem(inst, false); + const dst_mcv = try func.allocRegOrMem(result_ty, inst, false); _ = try func.addInst(.{ .tag = .add, .ops = .rrr, @@ -3427,7 +3487,19 @@ fn airPtrElemPtr(func: *Func, inst: Air.Inst.Index) !void { const offset_reg_lock = func.register_manager.lockRegAssumeUnused(offset_reg); defer func.register_manager.unlockReg(offset_reg_lock); - break :result try func.binOp(.ptr_add, base_ptr_mcv, base_ptr_ty, .{ .register = offset_reg }, base_ptr_ty); + const result_reg, const result_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(result_lock); + + try func.genBinOp( + .ptr_add, + base_ptr_mcv, + base_ptr_ty, + .{ .register = offset_reg }, + Type.usize, + result_reg, + ); + + break :result MCValue{ .register = result_reg }; }; return func.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } @@ -3487,7 +3559,7 @@ fn airAbs(func: *Func, inst: Air.Inst.Index) !void { .data = .{ .i_type = .{ .rd = temp_reg, .rs1 = operand_reg, - .imm12 = Immediate.s(63), + .imm12 = Immediate.u(63), } }, }); @@ -3695,7 +3767,7 @@ fn airLoad(func: *Func, inst: Air.Inst.Index) !void { // The MCValue that holds the pointer can be re-used as the value. break :blk ptr; } else { - break :blk try func.allocRegOrMem(inst, true); + break :blk try func.allocRegOrMem(elem_ty, inst, true); } }; @@ -3876,7 +3948,7 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void { .tag = .srli, .ops = .rri, .data = .{ .i_type = .{ - .imm12 = Immediate.s(@intCast(field_off)), + .imm12 = Immediate.u(@intCast(field_off)), .rd = dst_reg, .rs1 = dst_reg, } }, @@ -3911,7 +3983,7 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void { func.reuseOperand(inst, operand, 0, src_mcv)) off_mcv else dst: { - const dst_mcv = try func.allocRegOrMem(inst, true); + const dst_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, true); try func.genCopy(field_ty, dst_mcv, off_mcv); break :dst dst_mcv; }; @@ -3972,7 +4044,7 @@ fn airArg(func: *Func, inst: Air.Inst.Index) !void { const src_mcv = func.args[arg_index]; const arg_ty = func.typeOfIndex(inst); - const dst_mcv = try func.allocRegOrMem(inst, false); + const dst_mcv = try func.allocRegOrMem(arg_ty, inst, false); log.debug("airArg {} -> {}", .{ src_mcv, dst_mcv }); @@ -4004,13 +4076,13 @@ fn airBreakpoint(func: *Func) !void { } fn airRetAddr(func: *Func, inst: Air.Inst.Index) !void { - const dst_mcv = try func.allocRegOrMem(inst, true); + const dst_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, true); try func.genCopy(Type.usize, dst_mcv, .{ .load_frame = .{ .index = .ret_addr } }); return func.finishAir(inst, dst_mcv, .{ .none, .none, .none }); } fn airFrameAddress(func: *Func, inst: Air.Inst.Index) !void { - const dst_mcv = try func.allocRegOrMem(inst, true); + const dst_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, true); try func.genCopy(Type.usize, dst_mcv, .{ .lea_frame = .{ .index = .base_ptr } }); return func.finishAir(inst, dst_mcv, .{ .none, .none, .none }); } @@ -4196,10 +4268,7 @@ fn genCall( if (func.mod.pic) { return func.fail("TODO: genCall pic", .{}); } else { - _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); - const got_addr = sym.zigGotAddress(elf_file); - try func.genSetReg(Type.usize, .ra, .{ .memory = @intCast(got_addr) }); - + try func.genSetReg(Type.usize, .ra, .{ .load_symbol = .{ .sym = sym.esym_index } }); _ = try func.addInst(.{ .tag = .jalr, .ops = .rri, @@ -4323,14 +4392,11 @@ fn airRetLoad(func: *Func, inst: Air.Inst.Index) !void { try func.exitlude_jump_relocs.append(func.gpa, index); } -fn airCmp(func: *Func, inst: Air.Inst.Index) !void { - const tag = func.air.instructions.items(.tag)[@intFromEnum(inst)]; +fn airCmp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const zcu = func.bin_file.comp.module.?; const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - const lhs = try func.resolveInst(bin_op.lhs); - const rhs = try func.resolveInst(bin_op.rhs); const lhs_ty = func.typeOf(bin_op.lhs); switch (lhs_ty.zigTypeTag(zcu)) { @@ -4346,7 +4412,7 @@ fn airCmp(func: *Func, inst: Air.Inst.Index) !void { .Int => lhs_ty, .Bool => Type.u1, .Pointer => Type.usize, - .ErrorSet => Type.u16, + .ErrorSet => Type.anyerror, .Optional => blk: { const payload_ty = lhs_ty.optionalChild(zcu); if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { @@ -4362,7 +4428,7 @@ fn airCmp(func: *Func, inst: Air.Inst.Index) !void { const int_info = int_ty.intInfo(zcu); if (int_info.bits <= 64) { - break :result try func.binOp(tag, lhs, int_ty, rhs, int_ty); + break :result try func.binOp(inst, tag, bin_op.lhs, bin_op.rhs); } else { return func.fail("TODO riscv cmp for ints > 64 bits", .{}); } @@ -4373,7 +4439,7 @@ fn airCmp(func: *Func, inst: Air.Inst.Index) !void { if (float_bits > float_reg_size) { return func.fail("TODO: airCmp float > 64/32 bits", .{}); } - break :result try func.binOpFloat(tag, lhs, lhs_ty, rhs, lhs_ty); + break :result try func.binOp(inst, tag, bin_op.lhs, bin_op.rhs); }, else => unreachable, } @@ -4537,7 +4603,7 @@ fn isNull(func: *Func, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC else .{ .off = @intCast(pl_ty.abiSize(zcu)), .ty = Type.bool }; - const return_mcv = try func.allocRegOrMem(inst, true); + const return_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, true); assert(return_mcv == .register); // should not be larger 8 bytes const return_reg = return_mcv.register; @@ -4569,7 +4635,7 @@ fn isNull(func: *Func, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC some_info.ty, .{ .immediate = 0 }, ), - .size = .byte, + .ty = Type.bool, }, }, }); @@ -4601,7 +4667,7 @@ fn isNull(func: *Func, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC some_info.ty, .{ .immediate = 0 }, ), - .size = .byte, + .ty = Type.bool, }, }, }); @@ -4623,9 +4689,9 @@ fn airIsNull(func: *Func, inst: Air.Inst.Index) !void { fn airIsNullPtr(func: *Func, inst: Air.Inst.Index) !void { const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try func.resolveInst(un_op); - _ = operand; // autofix + _ = operand; const ty = func.typeOf(un_op); - _ = ty; // autofix + _ = ty; if (true) return func.fail("TODO: airIsNullPtr", .{}); @@ -4656,9 +4722,9 @@ fn airIsNonNull(func: *Func, inst: Air.Inst.Index) !void { fn airIsNonNullPtr(func: *Func, inst: Air.Inst.Index) !void { const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try func.resolveInst(un_op); - _ = operand; // autofix + _ = operand; const ty = func.typeOf(un_op); - _ = ty; // autofix + _ = ty; if (true) return func.fail("TODO: airIsNonNullPtr", .{}); @@ -4685,7 +4751,7 @@ fn airIsErrPtr(func: *Func, inst: Air.Inst.Index) !void { // The MCValue that holds the pointer can be re-used as the value. break :blk operand_ptr; } else { - break :blk try func.allocRegOrMem(inst, true); + break :blk try func.allocRegOrMem(func.typeOfIndex(inst), inst, true); } }; try func.load(operand, operand_ptr, func.typeOf(un_op)); @@ -4701,45 +4767,44 @@ fn airIsErrPtr(func: *Func, inst: Air.Inst.Index) !void { /// /// Result is in the return register. fn isErr(func: *Func, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue { + _ = maybe_inst; const zcu = func.bin_file.comp.module.?; const err_ty = eu_ty.errorUnionSet(zcu); if (err_ty.errorSetIsEmpty(zcu)) return MCValue{ .immediate = 0 }; // always false - - _ = maybe_inst; - const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), zcu)); + const return_reg, const return_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(return_lock); + switch (eu_mcv) { .register => |reg| { const eu_lock = func.register_manager.lockReg(reg); defer if (eu_lock) |lock| func.register_manager.unlockReg(lock); - const return_reg = try func.copyToTmpRegister(eu_ty, eu_mcv); - const return_lock = func.register_manager.lockRegAssumeUnused(return_reg); - defer func.register_manager.unlockReg(return_lock); - - var return_mcv: MCValue = .{ .register = return_reg }; + try func.genCopy(eu_ty, .{ .register = return_reg }, eu_mcv); if (err_off > 0) { - return_mcv = try func.binOp( + try func.genBinOp( .shr, - return_mcv, + .{ .register = return_reg }, eu_ty, .{ .immediate = @as(u6, @intCast(err_off * 8)) }, Type.u8, + return_reg, ); } - return try func.binOp( + try func.genBinOp( .cmp_neq, - return_mcv, - Type.u16, + .{ .register = return_reg }, + Type.anyerror, .{ .immediate = 0 }, - Type.u16, + Type.u8, + return_reg, ); }, .load_frame => |frame_addr| { - return func.binOp( + try func.genBinOp( .cmp_neq, .{ .load_frame = .{ .index = frame_addr.index, @@ -4748,10 +4813,13 @@ fn isErr(func: *Func, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) Type.anyerror, .{ .immediate = 0 }, Type.anyerror, + return_reg, ); }, else => return func.fail("TODO implement isErr for {}", .{eu_mcv}), } + + return .{ .register = return_reg }; } fn airIsNonErr(func: *Func, inst: Air.Inst.Index) !void { @@ -4799,7 +4867,7 @@ fn airIsNonErrPtr(func: *Func, inst: Air.Inst.Index) !void { // The MCValue that holds the pointer can be re-used as the value. break :blk operand_ptr; } else { - break :blk try func.allocRegOrMem(inst, true); + break :blk try func.allocRegOrMem(func.typeOfIndex(inst), inst, true); } }; const operand_ptr_ty = func.typeOf(un_op); @@ -4916,16 +4984,18 @@ fn airSwitchBr(func: *Func, inst: Air.Inst.Index) !void { // switch branches must be comptime-known, so this is stored in an immediate const item_mcv = try func.resolveInst(item); - const cmp_mcv: MCValue = try func.binOp( + const cmp_reg, const cmp_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(cmp_lock); + + try func.genBinOp( .cmp_neq, condition, condition_ty, item_mcv, condition_ty, + cmp_reg, ); - const cmp_reg = try func.copyToTmpRegister(Type.bool, cmp_mcv); - if (!(i < relocs.len - 1)) { _ = try func.addInst(.{ .tag = .pseudo, @@ -5019,7 +5089,7 @@ fn airBr(func: *Func, inst: Air.Inst.Index) !void { break :result block_tracking.short; } - const dst_mcv = if (first_br) try func.allocRegOrMem(br.block_inst, true) else dst: { + const dst_mcv = if (first_br) try func.allocRegOrMem(block_ty, br.block_inst, true) else dst: { try func.getValue(block_tracking.short, br.block_inst); break :dst block_tracking.short; }; @@ -5063,10 +5133,10 @@ fn airBoolOp(func: *Func, inst: Air.Inst.Index) !void { const lhs_ty = Type.bool; const rhs_ty = Type.bool; - const lhs_reg, const lhs_lock = try func.promoteReg(lhs_ty, lhs, .{}); + const lhs_reg, const lhs_lock = try func.promoteReg(lhs_ty, lhs); defer if (lhs_lock) |lock| func.register_manager.unlockReg(lock); - const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs, .{}); + const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs); defer if (rhs_lock) |lock| func.register_manager.unlockReg(lock); const result_reg, const result_lock = try func.allocReg(.int); @@ -5262,7 +5332,7 @@ fn genCopy(func: *Func, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { const src_info: ?struct { addr_reg: Register, addr_lock: ?RegisterLock } = switch (src_mcv) { .register_pair, .memory, .indirect, .load_frame => null, .load_symbol => src: { - const src_addr_reg, const src_addr_lock = try func.promoteReg(Type.usize, src_mcv.address(), .{}); + const src_addr_reg, const src_addr_lock = try func.promoteReg(Type.usize, src_mcv.address()); errdefer func.register_manager.unlockReg(src_addr_lock); break :src .{ .addr_reg = src_addr_reg, .addr_lock = src_addr_lock }; @@ -5557,7 +5627,7 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError! .data = .{ .i_type = .{ .rd = reg, .rs1 = reg, - .imm12 = Immediate.s(32), + .imm12 = Immediate.u(32), } }, }); @@ -5604,10 +5674,9 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError! .m = .{ .base = .{ .frame = frame.index }, .mod = .{ - .rm = .{ - .size = func.memSize(ty), - .disp = frame.off, - }, + .size = func.memSize(ty), + .unsigned = ty.isUnsignedInt(zcu), + .disp = frame.off, }, }, } }, @@ -5622,7 +5691,7 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError! .data = .{ .i_type = .{ .rd = reg, .rs1 = reg, - .imm12 = Immediate.s(0), + .imm12 = Immediate.u(0), } }, }); }, @@ -5636,19 +5705,17 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError! .register_offset => |reg_off| .{ .base = .{ .reg = reg_off.reg }, .mod = .{ - .rm = .{ - .size = func.memSize(ty), - .disp = reg_off.off, - }, + .size = func.memSize(ty), + .disp = reg_off.off, + .unsigned = false, }, }, .lea_frame => |frame| .{ .base = .{ .frame = frame.index }, .mod = .{ - .rm = .{ - .size = func.memSize(ty), - .disp = frame.off, - }, + .size = func.memSize(ty), + .disp = frame.off, + .unsigned = false, }, }, else => unreachable, @@ -5787,9 +5854,10 @@ fn genSetMem( .r = reg, .m = .{ .base = .{ .frame = frame_index }, - .mod = .{ .rm = .{ + .mod = .{ .size = Memory.Size.fromByteSize(src_size), - } }, + .unsigned = false, + }, }, } }, }); @@ -5802,10 +5870,11 @@ fn genSetMem( .r = reg, .m = .{ .base = base, - .mod = .{ .rm = .{ + .mod = .{ .size = func.memSize(ty), .disp = disp, - } }, + .unsigned = false, + }, }, } }, }); @@ -5820,7 +5889,7 @@ fn genSetMem( .immediate => { // TODO: remove this lock in favor of a copyToTmpRegister when we load 64 bit immediates with // a register allocation. - const reg, const reg_lock = try func.promoteReg(ty, src_mcv, .{}); + const reg, const reg_lock = try func.promoteReg(ty, src_mcv); defer if (reg_lock) |lock| func.register_manager.unlockReg(lock); return func.genSetMem(base, disp, ty, .{ .register = reg }); @@ -5833,9 +5902,10 @@ fn airIntFromPtr(func: *Func, inst: Air.Inst.Index) !void { const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result = result: { const src_mcv = try func.resolveInst(un_op); + const src_ty = func.typeOfIndex(inst); if (func.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv; - const dst_mcv = try func.allocRegOrMem(inst, true); + const dst_mcv = try func.allocRegOrMem(src_ty, inst, true); const dst_ty = func.typeOfIndex(inst); try func.genCopy(dst_ty, dst_mcv, src_mcv); break :result dst_mcv; @@ -5858,7 +5928,7 @@ fn airBitCast(func: *Func, inst: Air.Inst.Index) !void { const dst_mcv = if (dst_ty.abiSize(zcu) <= src_ty.abiSize(zcu) and func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: { - const dst_mcv = try func.allocRegOrMem(inst, true); + const dst_mcv = try func.allocRegOrMem(dst_ty, inst, true); try func.genCopy(switch (math.order(dst_ty.abiSize(zcu), src_ty.abiSize(zcu))) { .lt => dst_ty, .eq => if (!dst_mcv.isMemory() or src_mcv.isMemory()) dst_ty else src_ty, @@ -6080,7 +6150,7 @@ fn airErrorName(func: *Func, inst: Air.Inst.Index) !void { .tag = .slli, .ops = .rri, .data = .{ .i_type = .{ - .imm12 = Immediate.s(4), + .imm12 = Immediate.u(4), .rd = err_reg, .rs1 = err_reg, } }, @@ -6104,7 +6174,7 @@ fn airErrorName(func: *Func, inst: Air.Inst.Index) !void { .r = start_reg, .m = .{ .base = .{ .reg = addr_reg }, - .mod = .{ .off = 0 }, + .mod = .{ .size = .dword, .unsigned = true }, }, }, }, @@ -6118,13 +6188,13 @@ fn airErrorName(func: *Func, inst: Air.Inst.Index) !void { .r = end_reg, .m = .{ .base = .{ .reg = addr_reg }, - .mod = .{ .off = 8 }, + .mod = .{ .size = .dword, .unsigned = true }, }, }, }, }); - const dst_mcv = try func.allocRegOrMem(inst, false); + const dst_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, false); const frame = dst_mcv.load_frame; try func.genSetMem( .{ .frame = frame.index }, diff --git a/src/arch/riscv64/Encoding.zig b/src/arch/riscv64/Encoding.zig index 6f412935e4..b280b8a483 100644 --- a/src/arch/riscv64/Encoding.zig +++ b/src/arch/riscv64/Encoding.zig @@ -4,6 +4,7 @@ data: Data, const OpCode = enum(u7) { OP = 0b0110011, OP_IMM = 0b0010011, + OP_IMM_32 = 0b0011011, OP_32 = 0b0111011, BRANCH = 0b1100011, @@ -41,17 +42,22 @@ const Enc = struct { funct3: u3, funct7: u7, }, - /// funct3 + offset - fo: struct { - funct3: u3, - offset: u12 = 0, - }, /// funct5 + rm + fmt fmt: struct { funct5: u5, rm: u3, fmt: Fmt, }, + /// funct3 + f: struct { + funct3: u3, + }, + /// typ + funct3 + has_5 + sh: struct { + typ: u6, + funct3: u3, + has_5: bool, + }, /// U-type none, }, @@ -72,10 +78,14 @@ pub const Mnemonic = enum { sltiu, xori, andi, + slli, srli, srai, - sllw, + + slliw, + srliw, + sraiw, addi, jalr, @@ -98,19 +108,48 @@ pub const Mnemonic = enum { // R Type add, + addw, + sub, + subw, @"and", @"or", - sub, slt, - mul, sltu, xor, + sll, + srl, + sra, + + sllw, + srlw, + sraw, + // System ecall, ebreak, unimp, + // M extension + mul, + mulw, + + mulh, + mulhu, + mulhsu, + + div, + divu, + + divw, + divuw, + + rem, + remu, + + remw, + remuw, + // F extension (32-bit float) fadds, fsubs, @@ -170,19 +209,56 @@ pub const Mnemonic = enum { .slt => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b010, .funct7 = 0b0000000 } } }, .mul => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000001 } } }, + .mulh => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0000001 } } }, + .mulhsu => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b010, .funct7 = 0b0000001 } } }, + .mulhu => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b011, .funct7 = 0b0000001 } } }, + + .div => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b100, .funct7 = 0b0000001 } } }, + .divu => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0000001 } } }, + + .rem => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b110, .funct7 = 0b0000001 } } }, + .remu => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b111, .funct7 = 0b0000001 } } }, + + .sll => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0000000 } } }, + .srl => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0000000 } } }, + .sra => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0100000 } } }, // OP_IMM - .addi => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b000 } } }, - .andi => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b111 } } }, - .xori => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b100 } } }, + .addi => .{ .opcode = .OP_IMM, .data = .{ .f = .{ .funct3 = 0b000 } } }, + .andi => .{ .opcode = .OP_IMM, .data = .{ .f = .{ .funct3 = 0b111 } } }, + .xori => .{ .opcode = .OP_IMM, .data = .{ .f = .{ .funct3 = 0b100 } } }, - .sltiu => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b011 } } }, + .sltiu => .{ .opcode = .OP_IMM, .data = .{ .f = .{ .funct3 = 0b011 } } }, - .slli => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b001 } } }, - .srli => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b101 } } }, - .srai => .{ .opcode = .OP_IMM, .data = .{ .fo = .{ .funct3 = 0b101, .offset = 1 << 10 } } }, + .slli => .{ .opcode = .OP_IMM, .data = .{ .sh = .{ .typ = 0b000000, .funct3 = 0b001, .has_5 = true } } }, + .srli => .{ .opcode = .OP_IMM, .data = .{ .sh = .{ .typ = 0b000000, .funct3 = 0b101, .has_5 = true } } }, + .srai => .{ .opcode = .OP_IMM, .data = .{ .sh = .{ .typ = 0b010000, .funct3 = 0b101, .has_5 = true } } }, + + + // OP_IMM_32 + + .slliw => .{ .opcode = .OP_IMM_32, .data = .{ .sh = .{ .typ = 0b000000, .funct3 = 0b001, .has_5 = false } } }, + .srliw => .{ .opcode = .OP_IMM_32, .data = .{ .sh = .{ .typ = 0b000000, .funct3 = 0b101, .has_5 = false } } }, + .sraiw => .{ .opcode = .OP_IMM_32, .data = .{ .sh = .{ .typ = 0b010000, .funct3 = 0b101, .has_5 = false } } }, + + + // OP_32 + + .addw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000000 } } }, + .subw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0100000 } } }, + .mulw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000001 } } }, + + .divw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b100, .funct7 = 0b0000001 } } }, + .divuw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0000001 } } }, + + .remw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b110, .funct7 = 0b0000001 } } }, + .remuw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b111, .funct7 = 0b0000001 } } }, + + .sllw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0000000 } } }, + .srlw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0000000 } } }, + .sraw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0100000 } } }, // OP_FP @@ -226,43 +302,38 @@ pub const Mnemonic = enum { // LOAD - .lb => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b000 } } }, - .lh => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b001 } } }, - .lw => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b010 } } }, - .ld => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b011 } } }, - .lbu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b100 } } }, - .lhu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b101 } } }, - .lwu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b110 } } }, + .lb => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b000 } } }, + .lh => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b001 } } }, + .lw => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b010 } } }, + .ld => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b011 } } }, + .lbu => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b100 } } }, + .lhu => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b101 } } }, + .lwu => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b110 } } }, // STORE - .sb => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b000 } } }, - .sh => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b001 } } }, - .sw => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b010 } } }, - .sd => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b011 } } }, + .sb => .{ .opcode = .STORE, .data = .{ .f = .{ .funct3 = 0b000 } } }, + .sh => .{ .opcode = .STORE, .data = .{ .f = .{ .funct3 = 0b001 } } }, + .sw => .{ .opcode = .STORE, .data = .{ .f = .{ .funct3 = 0b010 } } }, + .sd => .{ .opcode = .STORE, .data = .{ .f = .{ .funct3 = 0b011 } } }, // LOAD_FP - .flw => .{ .opcode = .LOAD_FP, .data = .{ .fo = .{ .funct3 = 0b010 } } }, - .fld => .{ .opcode = .LOAD_FP, .data = .{ .fo = .{ .funct3 = 0b011 } } }, + .flw => .{ .opcode = .LOAD_FP, .data = .{ .f = .{ .funct3 = 0b010 } } }, + .fld => .{ .opcode = .LOAD_FP, .data = .{ .f = .{ .funct3 = 0b011 } } }, // STORE_FP - .fsw => .{ .opcode = .STORE_FP, .data = .{ .fo = .{ .funct3 = 0b010 } } }, - .fsd => .{ .opcode = .STORE_FP, .data = .{ .fo = .{ .funct3 = 0b011 } } }, + .fsw => .{ .opcode = .STORE_FP, .data = .{ .f = .{ .funct3 = 0b010 } } }, + .fsd => .{ .opcode = .STORE_FP, .data = .{ .f = .{ .funct3 = 0b011 } } }, // JALR - .jalr => .{ .opcode = .JALR, .data = .{ .fo = .{ .funct3 = 0b000 } } }, - - - // OP_32 - - .sllw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0000000 } } }, + .jalr => .{ .opcode = .JALR, .data = .{ .f = .{ .funct3 = 0b000 } } }, // LUI @@ -282,18 +353,18 @@ pub const Mnemonic = enum { // BRANCH - .beq => .{ .opcode = .BRANCH, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + .beq => .{ .opcode = .BRANCH, .data = .{ .f = .{ .funct3 = 0b000 } } }, // SYSTEM - .ecall => .{ .opcode = .SYSTEM, .data = .{ .fo = .{ .funct3 = 0b000 } } }, - .ebreak => .{ .opcode = .SYSTEM, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + .ecall => .{ .opcode = .SYSTEM, .data = .{ .f = .{ .funct3 = 0b000 } } }, + .ebreak => .{ .opcode = .SYSTEM, .data = .{ .f = .{ .funct3 = 0b000 } } }, // NONE - .unimp => .{ .opcode = .NONE, .data = .{ .fo = .{ .funct3 = 0b000 } } }, + .unimp => .{ .opcode = .NONE, .data = .{ .f = .{ .funct3 = 0b000 } } }, // zig fmt: on @@ -320,10 +391,15 @@ pub const InstEnc = enum { .sltiu, .xori, .andi, + .slli, .srli, .srai, + .slliw, + .srliw, + .sraiw, + .ld, .lw, .lwu, @@ -357,14 +433,41 @@ pub const InstEnc = enum { .slt, .sltu, + + .sll, + .srl, + .sra, + .sllw, - .mul, + .srlw, + .sraw, + + .div, + .divu, + .divw, + .divuw, + + .rem, + .remu, + .remw, + .remuw, + .xor, - .add, - .sub, .@"and", .@"or", + .add, + .addw, + + .sub, + .subw, + + .mul, + .mulw, + .mulh, + .mulhu, + .mulhsu, + .fadds, .faddd, @@ -516,12 +619,12 @@ pub const Data = union(InstEnc) { .imm0_11 = switch (mnem) { .ecall => 0x000, .ebreak => 0x001, - .unimp => 0, + .unimp => 0x000, else => unreachable, }, .opcode = @intFromEnum(enc.opcode), - .funct3 = enc.data.fo.funct3, + .funct3 = enc.data.f.funct3, }, }; }, @@ -567,20 +670,32 @@ pub const Data = union(InstEnc) { .imm5_11 = @truncate(umm >> 5), .opcode = @intFromEnum(enc.opcode), - .funct3 = enc.data.fo.funct3, + .funct3 = enc.data.f.funct3, }, }; }, .I => { assert(ops.len == 3); return .{ - .I = .{ - .rd = ops[0].reg.encodeId(), - .rs1 = ops[1].reg.encodeId(), - .imm0_11 = ops[2].imm.asBits(u12) + enc.data.fo.offset, + .I = switch (enc.data) { + .f => |f| .{ + .rd = ops[0].reg.encodeId(), + .rs1 = ops[1].reg.encodeId(), + .imm0_11 = ops[2].imm.asBits(u12), - .opcode = @intFromEnum(enc.opcode), - .funct3 = enc.data.fo.funct3, + .opcode = @intFromEnum(enc.opcode), + .funct3 = f.funct3, + }, + .sh => |sh| .{ + .rd = ops[0].reg.encodeId(), + .rs1 = ops[1].reg.encodeId(), + .imm0_11 = (@as(u12, sh.typ) << 6) | + if (sh.has_5) ops[2].imm.asBits(u6) else (@as(u6, 0) | ops[2].imm.asBits(u5)), + + .opcode = @intFromEnum(enc.opcode), + .funct3 = sh.funct3, + }, + else => unreachable, }, }; }, @@ -629,7 +744,7 @@ pub const Data = union(InstEnc) { .imm12 = @truncate(umm >> 12), .opcode = @intFromEnum(enc.opcode), - .funct3 = enc.data.fo.funct3, + .funct3 = enc.data.f.funct3, }, }; }, diff --git a/src/arch/riscv64/Lower.zig b/src/arch/riscv64/Lower.zig index 560f8349df..247cf64647 100644 --- a/src/arch/riscv64/Lower.zig +++ b/src/arch/riscv64/Lower.zig @@ -44,6 +44,8 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { insts: []const Instruction, relocs: []const Reloc, } { + const zcu = lower.bin_file.comp.module.?; + lower.result_insts = undefined; lower.result_relocs = undefined; errdefer lower.result_insts = undefined; @@ -75,13 +77,14 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { const dest_reg_class = dest_reg.class(); const float = dest_reg_class == .float; - const src_size = rm.m.mod.size(); + const src_size = rm.m.mod.size; + const unsigned = rm.m.mod.unsigned; const tag: Encoding.Mnemonic = if (!float) switch (src_size) { - .byte => .lb, - .hword => .lh, - .word => .lw, + .byte => if (unsigned) .lbu else .lb, + .hword => if (unsigned) .lhu else .lh, + .word => if (unsigned) .lwu else .lw, .dword => .ld, } else switch (src_size) { @@ -103,7 +106,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { const float = src_reg_class == .float; // TODO: do we actually need this? are all stores not usize? - const dest_size = rm.m.mod.size(); + const dest_size = rm.m.mod.size; const tag: Encoding.Mnemonic = if (!float) switch (dest_size) { @@ -181,10 +184,12 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { try lower.emit(.lui, &.{ .{ .reg = dst_reg }, - .{ .imm = lower.reloc(.{ .load_symbol_reloc = .{ - .atom_index = data.atom_index, - .sym_index = data.sym_index, - } }) }, + .{ .imm = lower.reloc(.{ + .load_symbol_reloc = .{ + .atom_index = data.atom_index, + .sym_index = data.sym_index, + }, + }) }, }); // the above reloc implies this one @@ -237,7 +242,14 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { const rs2 = compare.rs2; const class = rs1.class(); - const size = compare.size.bitSize(); + const ty = compare.ty; + const size = std.math.ceilPowerOfTwo(u64, ty.bitSize(zcu)) catch { + return lower.fail("pseudo_compare size {}", .{ty.bitSize(zcu)}); + }; + + const is_unsigned = ty.isUnsignedInt(zcu); + + const less_than: Encoding.Mnemonic = if (is_unsigned) .sltu else .slt; switch (class) { .int => switch (op) { @@ -268,14 +280,14 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }); }, .gt => { - try lower.emit(.sltu, &.{ + try lower.emit(less_than, &.{ .{ .reg = rd }, .{ .reg = rs1 }, .{ .reg = rs2 }, }); }, .gte => { - try lower.emit(.sltu, &.{ + try lower.emit(less_than, &.{ .{ .reg = rd }, .{ .reg = rs1 }, .{ .reg = rs2 }, @@ -288,14 +300,14 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }); }, .lt => { - try lower.emit(.slt, &.{ + try lower.emit(less_than, &.{ .{ .reg = rd }, .{ .reg = rs1 }, .{ .reg = rs2 }, }); }, .lte => { - try lower.emit(.slt, &.{ + try lower.emit(less_than, &.{ .{ .reg = rd }, .{ .reg = rs2 }, .{ .reg = rs1 }, diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 76822c3968..0753b142b1 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -39,16 +39,28 @@ pub const Inst = struct { unimp, add, + addw, sub, + subw, sltu, slt, - srli, slli, + srli, srai, + + slliw, + srliw, + sraiw, + + sll, + srl, + sra, + sllw, srlw, + sraw, jal, @@ -69,6 +81,17 @@ pub const Inst = struct { // M extension mul, + mulw, + + div, + divu, + divw, + divuw, + + rem, + remu, + remw, + remuw, // F extension (32-bit float) fadds, @@ -216,7 +239,7 @@ pub const Inst = struct { lt, lte, }, - size: Memory.Size, + ty: Type, }, reloc: struct { @@ -408,6 +431,8 @@ pub const RegisterList = struct { const Mir = @This(); const std = @import("std"); const builtin = @import("builtin"); +const Type = @import("../../type.zig").Type; + const assert = std.debug.assert; const bits = @import("bits.zig"); diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig index a18f445816..cb398ef620 100644 --- a/src/arch/riscv64/bits.zig +++ b/src/arch/riscv64/bits.zig @@ -19,19 +19,10 @@ pub const Memory = struct { reloc: Symbol, }; - pub const Mod = union(enum(u1)) { - rm: struct { - size: Size, - disp: i32 = 0, - }, - off: i32, - - pub fn size(mod: Mod) Size { - return switch (mod) { - .rm => |rm| rm.size, - .off => Size.dword, // assumed to be a register size - }; - } + pub const Mod = struct { + size: Size, + unsigned: bool, + disp: i32 = 0, }; pub const Size = enum(u4) { @@ -76,10 +67,7 @@ pub const Memory = struct { /// Asserts `mem` can be represented as a `FrameLoc`. pub fn toFrameLoc(mem: Memory, mir: Mir) Mir.FrameLoc { - const offset: i32 = switch (mem.mod) { - .off => |off| off, - .rm => |rm| rm.disp, - }; + const offset: i32 = mem.mod.disp; switch (mem.base) { .reg => |reg| { @@ -130,24 +118,6 @@ pub const Immediate = union(enum) { }; } - pub fn asUnsigned(imm: Immediate, bit_size: u64) u64 { - return switch (imm) { - .signed => |x| switch (bit_size) { - 1, 8 => @as(u8, @bitCast(@as(i8, @intCast(x)))), - 16 => @as(u16, @bitCast(@as(i16, @intCast(x)))), - 32, 64 => @as(u32, @bitCast(x)), - else => unreachable, - }, - .unsigned => |x| switch (bit_size) { - 1, 8 => @as(u8, @intCast(x)), - 16 => @as(u16, @intCast(x)), - 32 => @as(u32, @intCast(x)), - 64 => x, - else => unreachable, - }, - }; - } - pub fn asBits(imm: Immediate, comptime T: type) T { const int_info = @typeInfo(T).Int; if (int_info.signedness != .unsigned) @compileError("Immediate.asBits needs unsigned T"); diff --git a/src/arch/riscv64/encoder.zig b/src/arch/riscv64/encoder.zig index 5be753c426..54d1549ebe 100644 --- a/src/arch/riscv64/encoder.zig +++ b/src/arch/riscv64/encoder.zig @@ -32,6 +32,31 @@ pub const Instruction = struct { pub fn encode(inst: Instruction, writer: anytype) !void { try writer.writeInt(u32, inst.encoding.data.toU32(), .little); } + + pub fn format( + inst: Instruction, + comptime fmt: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) !void { + std.debug.assert(fmt.len == 0); + + const encoding = inst.encoding; + + try writer.print("{s} ", .{@tagName(encoding.mnemonic)}); + + var i: u32 = 0; + while (i < inst.ops.len and inst.ops[i] != .none) : (i += 1) { + if (i != inst.ops.len and i != 0) try writer.writeAll(", "); + + switch (@as(Instruction.Operand, inst.ops[i])) { + .none => unreachable, // it's sliced out above + .reg => |reg| try writer.writeAll(@tagName(reg)), + .imm => |imm| try writer.print("{d}", .{imm.asSigned(64)}), + .mem => unreachable, // there is no "mem" operand in the actual instructions + } + } + } }; const std = @import("std"); diff --git a/test/behavior/align.zig b/test/behavior/align.zig index bbb786af78..1ede6ad433 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -249,7 +249,6 @@ fn testBytesAlign(b: u8) !void { } test "@alignCast slices" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 9b3c66f1ba..f5fa95c770 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -50,7 +50,6 @@ fn getArrayLen(a: []const u32) usize { test "array concat with undefined" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -89,7 +88,6 @@ test "array concat with tuple" { test "array init with concat" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = 'a'; var i: [4]u8 = [2]u8{ a, 'b' } ++ [2]u8{ 'c', 'd' }; @@ -99,7 +97,6 @@ test "array init with concat" { test "array init with mult" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = 'a'; var i: [8]u8 = [2]u8{ a, 'b' } ** 4; @@ -244,7 +241,6 @@ fn plusOne(x: u32) u32 { test "single-item pointer to array indexing and slicing" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSingleItemPtrArrayIndexSlice(); try comptime testSingleItemPtrArrayIndexSlice(); @@ -270,7 +266,6 @@ fn doSomeMangling(array: *[4]u8) void { test "implicit cast zero sized array ptr to slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var b = "".*; @@ -309,7 +304,6 @@ const Str = struct { a: []Sub }; test "set global var array via slice embedded in struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var s = Str{ .a = s_array[0..] }; @@ -376,7 +370,6 @@ test "comptime evaluating function that takes array by value" { test "runtime initialize array elem and then implicit cast to slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var two: i32 = 2; _ = &two; @@ -387,7 +380,6 @@ test "runtime initialize array elem and then implicit cast to slice" { test "array literal as argument to function" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry(two: i32) !void { @@ -416,7 +408,6 @@ test "double nested array to const slice cast in array literal" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry(two: i32) !void { @@ -522,7 +513,6 @@ test "type deduction for array subscript expression" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -542,6 +532,7 @@ test "sentinel element count towards the ABI size calculation" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -956,7 +947,6 @@ test "array initialized with string literal" { test "array initialized with array with sentinel" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: u32, diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index 0262b59bde..fb61247b11 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -709,7 +709,6 @@ test "result location is optional inside error union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x = maybe(true) catch unreachable; try expect(x.? == 42); @@ -1091,8 +1090,6 @@ test "orelse coercion as function argument" { } test "runtime-known globals initialized with undefined" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { var array: [10]u32 = [_]u32{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; var vp: [*]u32 = undefined; diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index 779fbfc795..6d513a4ac7 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -165,6 +165,7 @@ test "@bitCast packed structs at runtime and comptime" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Full = packed struct { number: u16, @@ -191,6 +192,7 @@ test "@bitCast packed structs at runtime and comptime" { test "@bitCast extern structs at runtime and comptime" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Full = extern struct { number: u16, @@ -225,6 +227,7 @@ test "bitcast packed struct to integer and back" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const LevelUpMove = packed struct { move_id: u9, diff --git a/test/behavior/byval_arg_var.zig b/test/behavior/byval_arg_var.zig index 6b48769500..3a82ca86ad 100644 --- a/test/behavior/byval_arg_var.zig +++ b/test/behavior/byval_arg_var.zig @@ -5,7 +5,6 @@ var result: []const u8 = "wrong"; test "pass string literal byvalue to a generic var param" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; start(); diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 53616a82ce..acad6b8f41 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -266,7 +266,6 @@ fn MakeType(comptime T: type) type { test "implicit cast from *[N]T to [*c]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: [4]u16 = [4]u16{ 0, 1, 2, 3 }; var y: [*c]u16 = &x; @@ -343,7 +342,6 @@ test "array coercion to undefined at runtime" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @setRuntimeSafety(true); @@ -409,7 +407,6 @@ test "peer type unsigned int to signed" { test "expected [*c]const u8, found [*:0]const u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [*:0]const u8 = "hello"; _ = &a; @@ -496,7 +493,6 @@ test "peer type resolution: [0]u8, []const u8, and anyerror![]u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() anyerror!void { @@ -577,7 +573,6 @@ fn testPeerErrorAndArray2(x: u8) anyerror![]const u8 { test "single-item pointer of array to slice to unknown length pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCastPtrOfArrayToSliceAndPtr(); try comptime testCastPtrOfArrayToSliceAndPtr(); @@ -673,6 +668,7 @@ test "@floatCast cast down" { test "peer type resolution: unreachable, error set, unreachable" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Error = error{ FileDescriptorAlreadyPresentInSet, @@ -834,7 +830,6 @@ test "peer cast *[0]T to E![]const T" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buffer: [5]u8 = "abcde".*; const buf: anyerror![]const u8 = buffer[0..]; @@ -850,7 +845,6 @@ test "peer cast *[0]T to []const T" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buffer: [5]u8 = "abcde".*; const buf: []const u8 = buffer[0..]; @@ -895,7 +889,6 @@ test "peer resolution of string literals" { test "peer cast [:x]T to []T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -912,7 +905,6 @@ test "peer cast [:x]T to []T" { test "peer cast [N:x]T to [N]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -929,7 +921,6 @@ test "peer cast [N:x]T to [N]T" { test "peer cast *[N:x]T to *[N]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -987,7 +978,6 @@ test "peer cast [:x]T to [*:x]T" { test "peer type resolution implicit cast to return type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1008,7 +998,6 @@ test "peer type resolution implicit cast to return type" { test "peer type resolution implicit cast to variable type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1052,7 +1041,6 @@ test "cast between C pointer with different but compatible types" { test "peer type resolve string lit with sentinel-terminated mutable slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var array: [4:0]u8 = undefined; array[4] = 0; // TODO remove this when #4372 is solved @@ -1119,7 +1107,6 @@ test "implicit cast from [*]T to ?*anyopaque" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = [_]u8{ 3, 2, 1 }; var runtime_zero: usize = 0; @@ -1319,7 +1306,6 @@ test "*const [N]null u8 to ?[]const u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1363,7 +1349,6 @@ test "assignment to optional pointer result loc" { } test "cast between *[N]void and []void" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var a: [4]void = undefined; @@ -1618,8 +1603,6 @@ test "optional slice passed as parameter coerced to allowzero many pointer" { } test "single item pointer to pointer to array to slice" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var x: i32 = 1234; try expect(@as([]const i32, @as(*[1]i32, &x))[0] == 1234); const z1 = @as([]const i32, @as(*[1]i32, &x)); @@ -1662,8 +1645,6 @@ test "@volatileCast without a result location" { } test "coercion from single-item pointer to @as to slice" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var x: u32 = 1; // Why the following line gets a compile error? @@ -1728,7 +1709,6 @@ test "peer type resolution: same array type with sentinel" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [2:0]u32 = .{ 0, 1 }; var b: [2:0]u32 = .{ 2, 3 }; @@ -1751,7 +1731,6 @@ test "peer type resolution: array with sentinel and array without sentinel" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [2:0]u32 = .{ 0, 1 }; var b: [2]u32 = .{ 2, 3 }; @@ -1956,7 +1935,6 @@ test "peer type resolution: array and tuple" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var arr: [3]i32 = .{ 1, 2, 3 }; _ = &arr; @@ -2281,7 +2259,6 @@ test "peer type resolution: arrays of compatible types" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var e0: u8 = 3; var e1: u8 = 2; diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index 42138c1c15..d1a58c9278 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -618,6 +618,7 @@ test "enum with specified tag values" { test "non-exhaustive enum" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const E = enum(u8) { a, b, _ }; diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 2863c5db6c..314a16ebb3 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -856,7 +856,6 @@ test "alignment of wrapping an error union payload" { test "compare error union and error set" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: anyerror = error.Foo; var b: anyerror!u32 = error.Bar; diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 492b204842..b5dcda280b 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -395,6 +395,7 @@ test "return 0 from function that has u0 return type" { test "statically initialized struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; st_init_str_foo.x += 1; try expect(st_init_str_foo.x == 14); @@ -740,7 +741,6 @@ test "array concatenation of function calls" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = oneItem(3) ++ oneItem(4); try expect(std.mem.eql(i32, &a, &[_]i32{ 3, 4 })); @@ -750,7 +750,6 @@ test "array multiplication of function calls" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = oneItem(3) ** scalar(2); try expect(std.mem.eql(i32, &a, &[_]i32{ 3, 3 })); @@ -768,7 +767,6 @@ test "array concatenation peer resolves element types - value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = [2]u3{ 1, 7 }; var b = [3]u8{ 200, 225, 255 }; @@ -786,6 +784,7 @@ test "array concatenation peer resolves element types - pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = [2]u3{ 1, 7 }; var b = [3]u8{ 200, 225, 255 }; diff --git a/test/behavior/for.zig b/test/behavior/for.zig index 66c89d1b8c..4f873bbbe4 100644 --- a/test/behavior/for.zig +++ b/test/behavior/for.zig @@ -200,7 +200,6 @@ test "for on slice with allowzero ptr" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(slice: []const u8) !void { @@ -216,7 +215,6 @@ test "for on slice with allowzero ptr" { test "else continue outer for" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var i: usize = 6; var buf: [5]u8 = undefined; @@ -313,7 +311,6 @@ test "slice and two counters, one is offset and one is runtime" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const slice: []const u8 = "blah"; var start: usize = 0; @@ -343,7 +340,6 @@ test "two slices, one captured by-ref" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [10]u8 = undefined; const slice1: []const u8 = "blah"; @@ -400,7 +396,6 @@ test "inline for with slice as the comptime-known" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const comptime_slice = "hello"; var runtime_i: usize = 3; diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index 46c400750c..6bd627dfe3 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -158,7 +158,6 @@ test "generic fn with implicit cast" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(getFirstByte(u8, &[_]u8{13}) == 13); try expect(getFirstByte(u16, &[_]u16{ @@ -320,7 +319,6 @@ test "generic function instantiation non-duplicates" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.os.tag == .wasi) return error.SkipZigTest; const S = struct { diff --git a/test/behavior/globals.zig b/test/behavior/globals.zig index 17b8c4b823..f7a23b725f 100644 --- a/test/behavior/globals.zig +++ b/test/behavior/globals.zig @@ -29,7 +29,6 @@ test "slices pointing at the same address as global array." { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const a = [_]u8{ 1, 2, 3 }; diff --git a/test/behavior/if.zig b/test/behavior/if.zig index ef0862bb70..a82d9a5c61 100644 --- a/test/behavior/if.zig +++ b/test/behavior/if.zig @@ -45,7 +45,6 @@ var global_with_err: anyerror!u32 = error.SomeError; test "unwrap mutable global var" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (global_with_val) |v| { try expect(v == 0); diff --git a/test/behavior/inline_switch.zig b/test/behavior/inline_switch.zig index 1c1654f3b0..d0621ad198 100644 --- a/test/behavior/inline_switch.zig +++ b/test/behavior/inline_switch.zig @@ -76,7 +76,6 @@ test "inline switch unions" { test "inline else bool" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = true; _ = &a; diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 509bfbb16a..fefcf4b0e8 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -592,8 +592,6 @@ fn testSignedWrappingEval(x: i32) !void { } test "signed negation wrapping" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testSignedNegationWrappingEval(minInt(i16)); try comptime testSignedNegationWrappingEval(minInt(i16)); } @@ -664,8 +662,6 @@ test "bit shift a u1" { } test "truncating shift right" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testShrTrunc(maxInt(u16)); try comptime testShrTrunc(maxInt(u16)); } @@ -1455,8 +1451,6 @@ fn testShlExact(x: u8) !void { } test "exact shift right" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testShrExact(0b10110100); try comptime testShrExact(0b10110100); } diff --git a/test/behavior/nan.zig b/test/behavior/nan.zig index e177afa9d0..fc5ce4d0f9 100644 --- a/test/behavior/nan.zig +++ b/test/behavior/nan.zig @@ -26,7 +26,6 @@ test "nan memory equality" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // signaled try testing.expect(mem.eql(u8, mem.asBytes(&snan_u16), mem.asBytes(&snan_f16))); diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig index 9282184c3e..f9c71d3bea 100644 --- a/test/behavior/optional.zig +++ b/test/behavior/optional.zig @@ -488,7 +488,6 @@ const NoReturn = struct { test "optional of noreturn used with if" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; NoReturn.a = 64; if (NoReturn.loop()) |_| { @@ -500,7 +499,6 @@ test "optional of noreturn used with if" { test "optional of noreturn used with orelse" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; NoReturn.a = 64; const val = NoReturn.testOrelse(); diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index be69bf8213..88e5457627 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -258,6 +258,7 @@ test "nested packed struct unaligned" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet const S1 = packed struct { @@ -330,6 +331,7 @@ test "byte-aligned field pointer offsets" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = packed struct { @@ -491,7 +493,6 @@ test "@intFromPtr on a packed struct field" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (native_endian != .little) return error.SkipZigTest; const S = struct { @@ -515,7 +516,6 @@ test "@intFromPtr on a packed struct field unaligned and nested" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet const S1 = packed struct { diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index cbd3033e7d..c574f487b3 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -201,7 +201,6 @@ test "allowzero pointer and slice" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var ptr: [*]allowzero i32 = @ptrFromInt(0); const opt_ptr: ?[*]allowzero i32 = ptr; @@ -440,7 +439,6 @@ test "indexing array with sentinel returns correct type" { test "element pointer to slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -488,7 +486,6 @@ test "element pointer arithmetic to slice" { test "array slicing to slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig index b5a628e197..11afc9474a 100644 --- a/test/behavior/ptrcast.zig +++ b/test/behavior/ptrcast.zig @@ -232,7 +232,6 @@ test "implicit optional pointer to optional anyopaque pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [4]u8 = "aoeu".*; const x: ?[*]u8 = &buf; @@ -245,7 +244,6 @@ test "@ptrCast slice to slice" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(slice: []u32) []i32 { diff --git a/test/behavior/reflection.zig b/test/behavior/reflection.zig index aea84bc45a..f07b5a512e 100644 --- a/test/behavior/reflection.zig +++ b/test/behavior/reflection.zig @@ -28,6 +28,7 @@ fn dummy(a: bool, b: i32, c: f32) i32 { test "reflection: @field" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var f = Foo{ .one = 42, diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig index 5d78acb241..b6206df491 100644 --- a/test/behavior/sizeof_and_typeof.zig +++ b/test/behavior/sizeof_and_typeof.zig @@ -81,7 +81,6 @@ const P = packed struct { test "@offsetOf" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // Packed structs have fixed memory layout try expect(@offsetOf(P, "a") == 0); diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index 2375977a4e..e1576ca302 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -67,7 +67,6 @@ test "comptime slice of undefined pointer of length 0" { test "implicitly cast array of size 0 to slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var msg = [_]u8{}; try assertLenIsZero(&msg); @@ -233,7 +232,6 @@ test "runtime safety lets us slice from len..len" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var an_array = [_]u8{ 1, 2, 3 }; try expect(mem.eql(u8, sliceFromLenToLen(an_array[0..], 3, 3), "")); @@ -289,7 +287,6 @@ fn sliceSum(comptime q: []const u8) i32 { test "slice type with custom alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const LazilyResolvedType = struct { anything: i32, @@ -347,7 +344,6 @@ test "empty array to slice" { test "@ptrCast slice to pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -402,7 +398,6 @@ test "slice syntax resulting in pointer-to-array" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = struct { @@ -622,7 +617,6 @@ test "slice syntax resulting in pointer-to-array" { test "slice pointer-to-array null terminated" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; comptime { var array = [5:0]u8{ 1, 2, 3, 4, 5 }; @@ -641,7 +635,6 @@ test "slice pointer-to-array null terminated" { test "slice pointer-to-array zero length" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; comptime { { @@ -676,7 +669,6 @@ test "type coercion of pointer to anon struct literal to pointer to slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const U = union { @@ -786,7 +778,6 @@ test "slicing array with sentinel as end index" { test "slicing slice with sentinel as end index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn do() !void { @@ -857,7 +848,6 @@ test "global slice field access" { } test "slice of void" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var n: usize = 10; @@ -981,7 +971,6 @@ test "get address of element of zero-sized slice" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = struct { @@ -996,7 +985,6 @@ test "sentinel-terminated 0-length slices" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const u32s: [4]u32 = [_]u32{ 0, 1, 2, 3 }; diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index e27adafa79..520c3ff409 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -68,6 +68,7 @@ const SmallStruct = struct { test "lower unnamed constants" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo = SmallStruct{ .a = 1, .b = 255 }; try expect(foo.first() == 1); @@ -874,6 +875,7 @@ test "packed struct field passed to generic function" { test "anonymous struct literal syntax" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Point = struct { @@ -1103,6 +1105,7 @@ test "packed struct with undefined initializers" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const P = packed struct { @@ -1365,6 +1368,7 @@ test "store to comptime field" { test "struct field init value is size of the struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const namespace = struct { const S = extern struct { diff --git a/test/behavior/struct_contains_slice_of_itself.zig b/test/behavior/struct_contains_slice_of_itself.zig index 6f6d829567..adb1c31047 100644 --- a/test/behavior/struct_contains_slice_of_itself.zig +++ b/test/behavior/struct_contains_slice_of_itself.zig @@ -13,7 +13,6 @@ const NodeAligned = struct { test "struct contains slice of itself" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var other_nodes = [_]Node{ Node{ @@ -54,7 +53,6 @@ test "struct contains slice of itself" { test "struct contains aligned slice of itself" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var other_nodes = [_]NodeAligned{ NodeAligned{ diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index 6d82392958..8c5fcda8c2 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -118,7 +118,6 @@ fn trueIfBoolFalseOtherwise(comptime T: type) bool { test "switching on booleans" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSwitchOnBools(); try comptime testSwitchOnBools(); @@ -277,7 +276,6 @@ fn testSwitchEnumPtrCapture() !void { test "switch handles all cases of number" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSwitchHandleAllCases(); try comptime testSwitchHandleAllCases(); @@ -647,7 +645,6 @@ test "switch prong pointer capture alignment" { test "switch on pointer type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const X = struct { @@ -912,7 +909,6 @@ test "peer type resolution on switch captures ignores unused payload bits" { test "switch prong captures range" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn a(b: []u3, c: u3) void { diff --git a/test/behavior/this.zig b/test/behavior/this.zig index 3f8fe13316..3638168a4f 100644 --- a/test/behavior/this.zig +++ b/test/behavior/this.zig @@ -27,6 +27,7 @@ test "this refer to module call private fn" { test "this refer to container" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var pt: Point(i32) = undefined; pt.x = 12; diff --git a/test/behavior/undefined.zig b/test/behavior/undefined.zig index bc613585d3..8d31c95ea5 100644 --- a/test/behavior/undefined.zig +++ b/test/behavior/undefined.zig @@ -91,7 +91,6 @@ test "reslice of undefined global var slice" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var stack_buf: [100]u8 = [_]u8{0} ** 100; buf = &stack_buf; diff --git a/test/behavior/union.zig b/test/behavior/union.zig index c720d5c908..d2009f57df 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -2044,6 +2044,7 @@ test "extern union initialized via reintepreted struct field initializer" { test "packed union initialized via reintepreted struct field initializer" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd }; @@ -2064,6 +2065,7 @@ test "packed union initialized via reintepreted struct field initializer" { test "store of comptime reinterpreted memory to extern union" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd }; @@ -2086,6 +2088,7 @@ test "store of comptime reinterpreted memory to extern union" { test "store of comptime reinterpreted memory to packed union" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd }; diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index e32f5af9a3..5a4da799c2 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -166,7 +166,6 @@ test "array to vector" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { diff --git a/test/behavior/while.zig b/test/behavior/while.zig index 32bae6aeb3..71641ea265 100644 --- a/test/behavior/while.zig +++ b/test/behavior/while.zig @@ -379,7 +379,6 @@ test "while loop with comptime true condition needs no else block to return valu test "int returned from switch in while" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u32 = 3; const val: usize = while (true) switch (x) { diff --git a/test/behavior/widening.zig b/test/behavior/widening.zig index 16f97550b5..5033d76313 100644 --- a/test/behavior/widening.zig +++ b/test/behavior/widening.zig @@ -32,7 +32,6 @@ test "implicit unsigned integer to signed integer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u8 = 250; var b: i16 = a; @@ -80,7 +79,6 @@ test "cast small unsigned to larger signed" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(castSmallUnsignedToLargerSigned1(200) == @as(i16, 200)); try expect(castSmallUnsignedToLargerSigned2(9999) == @as(i64, 9999)); From 3967e00047cc9a2f6f2284ada37f47cfb2f2688f Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 26 May 2024 02:54:50 -0700 Subject: [PATCH 21/24] riscv: disable failing test --- test/behavior/cast.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index acad6b8f41..9af0f35f8d 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -1845,6 +1845,7 @@ test "peer type resolution: three-way resolution combines error set and optional if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO const E = error{Foo}; var a: E = error.Foo; From 35303084761d450f0250f2ade23a326a254ce98d Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sat, 8 Jun 2024 23:47:12 -0700 Subject: [PATCH 22/24] test: refactor `mainSimple` added some comments to make it easier for future contributors. --- lib/compiler/test_runner.zig | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index 27c5761f9f..f4f9fcc695 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -219,21 +219,30 @@ pub fn log( /// Simpler main(), exercising fewer language features, so that /// work-in-progress backends can handle it. pub fn mainSimple() anyerror!void { - const enable_print = true; - const print_all = true; - const print_summary = false; + // is the backend capable of printing to stderr? + const enable_print = switch (builtin.zig_backend) { + .stage2_riscv64 => true, + else => false, + }; + // is the backend capable of using std.fmt.format to print a summary at the end? + const print_summary = switch (builtin.zig_backend) { + else => false, + }; var passed: u64 = 0; var skipped: u64 = 0; var failed: u64 = 0; - const stderr = if (enable_print) std.io.getStdErr() else {}; + + // we don't want to bring in File and Writer if the backend doesn't support it + const stderr = if (comptime enable_print) std.io.getStdErr() else {}; + for (builtin.test_functions) |test_fn| { - if (enable_print and print_all) { + if (enable_print) { stderr.writeAll(test_fn.name) catch {}; stderr.writeAll("... ") catch {}; } test_fn.func() catch |err| { - if (enable_print and !print_all) { + if (enable_print) { stderr.writeAll(test_fn.name) catch {}; stderr.writeAll("... ") catch {}; } @@ -247,10 +256,10 @@ pub fn mainSimple() anyerror!void { skipped += 1; continue; }; - if (enable_print and print_all) stderr.writeAll("PASS\n") catch {}; + if (enable_print) stderr.writeAll("PASS\n") catch {}; passed += 1; } - if (print_summary) { + if (enable_print and print_summary) { stderr.writer().print("{} passed, {} skipped, {} failed\n", .{ passed, skipped, failed }) catch {}; } if (failed != 0) std.process.exit(1); From 4ce9bfff1985e92e93849d53ece922699fdd3ae4 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sat, 8 Jun 2024 23:49:26 -0700 Subject: [PATCH 23/24] ZigObject: include all RISCs in `minFunctionAlignment` --- src/target.zig | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/target.zig b/src/target.zig index 8f6473ba8e..dccc91382b 100644 --- a/src/target.zig +++ b/src/target.zig @@ -433,7 +433,17 @@ pub fn defaultFunctionAlignment(target: std.Target) Alignment { pub fn minFunctionAlignment(target: std.Target) Alignment { return switch (target.cpu.arch) { - .riscv64 => .@"2", + .arm, + .armeb, + .aarch64, + .aarch64_32, + .aarch64_be, + .riscv32, + .riscv64, + .sparc, + .sparcel, + .sparc64, + => .@"2", else => .@"1", }; } From e60c5811473df6c092aa772b63f2cf5d27b7ff46 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Thu, 13 Jun 2024 04:42:26 -0700 Subject: [PATCH 24/24] test: disable-enable riscv tests --- test/behavior/cast.zig | 2 +- test/behavior/error.zig | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 9af0f35f8d..6cc881b64d 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -1845,7 +1845,7 @@ test "peer type resolution: three-way resolution combines error set and optional if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = error{Foo}; var a: E = error.Foo; diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 314a16ebb3..e0f0b224c1 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -1080,6 +1080,7 @@ test "result location initialization of error union with OPV payload" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: u0,