diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index dc82545e54..f4f9fcc695 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -12,10 +12,8 @@ var cmdline_buffer: [4096]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&cmdline_buffer); pub fn main() void { - if (builtin.zig_backend == .stage2_riscv64) return mainExtraSimple() catch @panic("test failure"); - - if (builtin.zig_backend == .stage2_aarch64) { - return mainSimple() catch @panic("test failure"); + if (builtin.zig_backend == .stage2_riscv64) { + return mainSimple() catch @panic("test failure\n"); } const args = std.process.argsAlloc(fba.allocator()) catch @@ -221,20 +219,30 @@ pub fn log( /// Simpler main(), exercising fewer language features, so that /// work-in-progress backends can handle it. pub fn mainSimple() anyerror!void { - const enable_print = false; - const print_all = false; + // is the backend capable of printing to stderr? + const enable_print = switch (builtin.zig_backend) { + .stage2_riscv64 => true, + else => false, + }; + // is the backend capable of using std.fmt.format to print a summary at the end? + const print_summary = switch (builtin.zig_backend) { + else => false, + }; var passed: u64 = 0; var skipped: u64 = 0; var failed: u64 = 0; - const stderr = if (enable_print) std.io.getStdErr() else {}; + + // we don't want to bring in File and Writer if the backend doesn't support it + const stderr = if (comptime enable_print) std.io.getStdErr() else {}; + for (builtin.test_functions) |test_fn| { - if (enable_print and print_all) { + if (enable_print) { stderr.writeAll(test_fn.name) catch {}; stderr.writeAll("... ") catch {}; } test_fn.func() catch |err| { - if (enable_print and !print_all) { + if (enable_print) { stderr.writeAll(test_fn.name) catch {}; stderr.writeAll("... ") catch {}; } @@ -248,27 +256,11 @@ pub fn mainSimple() anyerror!void { skipped += 1; continue; }; - if (enable_print and print_all) stderr.writeAll("PASS\n") catch {}; + if (enable_print) stderr.writeAll("PASS\n") catch {}; passed += 1; } - if (enable_print) { + if (enable_print and print_summary) { stderr.writer().print("{} passed, {} skipped, {} failed\n", .{ passed, skipped, failed }) catch {}; - if (failed != 0) std.process.exit(1); } -} - -pub fn mainExtraSimple() !void { - var fail_count: u8 = 0; - - for (builtin.test_functions) |test_fn| { - test_fn.func() catch |err| { - if (err != error.SkipZigTest) { - fail_count += 1; - continue; - } - continue; - }; - } - - if (fail_count != 0) std.process.exit(1); + if (failed != 0) std.process.exit(1); } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 762251bc44..115d5697a5 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -22,6 +22,8 @@ const DW = std.dwarf; const leb128 = std.leb; const log = std.log.scoped(.riscv_codegen); const tracking_log = std.log.scoped(.tracking); +const verbose_tracking_log = std.log.scoped(.verbose_tracking); +const wip_mir_log = std.log.scoped(.wip_mir); const build_options = @import("build_options"); const codegen = @import("../../codegen.zig"); const Alignment = InternPool.Alignment; @@ -32,29 +34,17 @@ const DebugInfoOutput = codegen.DebugInfoOutput; const bits = @import("bits.zig"); const abi = @import("abi.zig"); +const Lower = @import("Lower.zig"); + const Register = bits.Register; const Immediate = bits.Immediate; const Memory = bits.Memory; const FrameIndex = bits.FrameIndex; const RegisterManager = abi.RegisterManager; const RegisterLock = RegisterManager.RegisterLock; -const callee_preserved_regs = abi.callee_preserved_regs; -/// General Purpose -const gp = abi.RegisterClass.gp; -/// Function Args -const fa = abi.RegisterClass.fa; -/// Function Returns -const fr = abi.RegisterClass.fr; -/// Temporary Use -const tp = abi.RegisterClass.tp; const InnerError = CodeGenError || error{OutOfRegisters}; -const RegisterView = enum(u1) { - caller, - callee, -}; - gpa: Allocator, air: Air, mod: *Package.Module, @@ -172,6 +162,14 @@ const MCValue = union(enum) { }; } + fn isRegister(mcv: MCValue) bool { + return switch (mcv) { + .register => true, + .register_offset => |reg_off| return reg_off.off == 0, + else => false, + }; + } + fn isMutable(mcv: MCValue) bool { return switch (mcv) { .none => unreachable, @@ -295,14 +293,15 @@ const MCValue = union(enum) { const Branch = struct { inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{}, - fn deinit(self: *Branch, gpa: Allocator) void { - self.inst_table.deinit(gpa); - self.* = undefined; + fn deinit(func: *Branch, gpa: Allocator) void { + func.inst_table.deinit(gpa); + func.* = undefined; } }; const InstTrackingMap = std.AutoArrayHashMapUnmanaged(Air.Inst.Index, InstTracking); const ConstTrackingMap = std.AutoArrayHashMapUnmanaged(InternPool.Index, InstTracking); + const InstTracking = struct { long: MCValue, short: MCValue, @@ -331,33 +330,37 @@ const InstTracking = struct { }, .short = result }; } - fn getReg(self: InstTracking) ?Register { - return self.short.getReg(); + fn getReg(inst_tracking: InstTracking) ?Register { + return inst_tracking.short.getReg(); } - fn getRegs(self: *const InstTracking) []const Register { - return self.short.getRegs(); + fn getRegs(inst_tracking: *const InstTracking) []const Register { + return inst_tracking.short.getRegs(); } - fn spill(self: *InstTracking, function: *Self, inst: Air.Inst.Index) !void { - if (std.meta.eql(self.long, self.short)) return; // Already spilled + fn spill(inst_tracking: *InstTracking, function: *Func, inst: Air.Inst.Index) !void { + if (std.meta.eql(inst_tracking.long, inst_tracking.short)) return; // Already spilled // Allocate or reuse frame index - switch (self.long) { - .none => self.long = try function.allocRegOrMem(inst, false), + switch (inst_tracking.long) { + .none => inst_tracking.long = try function.allocRegOrMem( + function.typeOfIndex(inst), + inst, + false, + ), .load_frame => {}, - .reserved_frame => |index| self.long = .{ .load_frame = .{ .index = index } }, + .reserved_frame => |index| inst_tracking.long = .{ .load_frame = .{ .index = index } }, else => unreachable, } - tracking_log.debug("spill %{d} from {} to {}", .{ inst, self.short, self.long }); - try function.genCopy(function.typeOfIndex(inst), self.long, self.short); + tracking_log.debug("spill %{d} from {} to {}", .{ inst, inst_tracking.short, inst_tracking.long }); + try function.genCopy(function.typeOfIndex(inst), inst_tracking.long, inst_tracking.short); } - fn reuseFrame(self: *InstTracking) void { - switch (self.long) { - .reserved_frame => |index| self.long = .{ .load_frame = .{ .index = index } }, + fn reuseFrame(inst_tracking: *InstTracking) void { + switch (inst_tracking.long) { + .reserved_frame => |index| inst_tracking.long = .{ .load_frame = .{ .index = index } }, else => {}, } - self.short = switch (self.long) { + inst_tracking.short = switch (inst_tracking.long) { .none, .unreach, .undef, @@ -367,7 +370,7 @@ const InstTracking = struct { .lea_frame, .load_symbol, .lea_symbol, - => self.long, + => inst_tracking.long, .dead, .register, .register_pair, @@ -379,14 +382,14 @@ const InstTracking = struct { }; } - fn trackSpill(self: *InstTracking, function: *Self, inst: Air.Inst.Index) !void { - try function.freeValue(self.short); - self.reuseFrame(); - tracking_log.debug("%{d} => {} (spilled)", .{ inst, self.* }); + fn trackSpill(inst_tracking: *InstTracking, function: *Func, inst: Air.Inst.Index) !void { + try function.freeValue(inst_tracking.short); + inst_tracking.reuseFrame(); + tracking_log.debug("%{d} => {} (spilled)", .{ inst, inst_tracking.* }); } - fn verifyMaterialize(self: InstTracking, target: InstTracking) void { - switch (self.long) { + fn verifyMaterialize(inst_tracking: InstTracking, target: InstTracking) void { + switch (inst_tracking.long) { .none, .unreach, .undef, @@ -395,7 +398,7 @@ const InstTracking = struct { .lea_frame, .load_symbol, .lea_symbol, - => assert(std.meta.eql(self.long, target.long)), + => assert(std.meta.eql(inst_tracking.long, target.long)), .load_frame, .reserved_frame, => switch (target.long) { @@ -416,73 +419,73 @@ const InstTracking = struct { } fn materialize( - self: *InstTracking, - function: *Self, + inst_tracking: *InstTracking, + function: *Func, inst: Air.Inst.Index, target: InstTracking, ) !void { - self.verifyMaterialize(target); - try self.materializeUnsafe(function, inst, target); + inst_tracking.verifyMaterialize(target); + try inst_tracking.materializeUnsafe(function, inst, target); } fn materializeUnsafe( - self: InstTracking, - function: *Self, + inst_tracking: InstTracking, + function: *Func, inst: Air.Inst.Index, target: InstTracking, ) !void { const ty = function.typeOfIndex(inst); - if ((self.long == .none or self.long == .reserved_frame) and target.long == .load_frame) - try function.genCopy(ty, target.long, self.short); - try function.genCopy(ty, target.short, self.short); + if ((inst_tracking.long == .none or inst_tracking.long == .reserved_frame) and target.long == .load_frame) + try function.genCopy(ty, target.long, inst_tracking.short); + try function.genCopy(ty, target.short, inst_tracking.short); } - fn trackMaterialize(self: *InstTracking, inst: Air.Inst.Index, target: InstTracking) void { - self.verifyMaterialize(target); + fn trackMaterialize(inst_tracking: *InstTracking, inst: Air.Inst.Index, target: InstTracking) void { + inst_tracking.verifyMaterialize(target); // Don't clobber reserved frame indices - self.long = if (target.long == .none) switch (self.long) { + inst_tracking.long = if (target.long == .none) switch (inst_tracking.long) { .load_frame => |addr| .{ .reserved_frame = addr.index }, - .reserved_frame => self.long, + .reserved_frame => inst_tracking.long, else => target.long, } else target.long; - self.short = target.short; - tracking_log.debug("%{d} => {} (materialize)", .{ inst, self.* }); + inst_tracking.short = target.short; + tracking_log.debug("%{d} => {} (materialize)", .{ inst, inst_tracking.* }); } - fn resurrect(self: *InstTracking, inst: Air.Inst.Index, scope_generation: u32) void { - switch (self.short) { + fn resurrect(inst_tracking: *InstTracking, inst: Air.Inst.Index, scope_generation: u32) void { + switch (inst_tracking.short) { .dead => |die_generation| if (die_generation >= scope_generation) { - self.reuseFrame(); - tracking_log.debug("%{d} => {} (resurrect)", .{ inst, self.* }); + inst_tracking.reuseFrame(); + tracking_log.debug("%{d} => {} (resurrect)", .{ inst, inst_tracking.* }); }, else => {}, } } - fn die(self: *InstTracking, function: *Self, inst: Air.Inst.Index) !void { - if (self.short == .dead) return; - try function.freeValue(self.short); - self.short = .{ .dead = function.scope_generation }; - tracking_log.debug("%{d} => {} (death)", .{ inst, self.* }); + fn die(inst_tracking: *InstTracking, function: *Func, inst: Air.Inst.Index) !void { + if (inst_tracking.short == .dead) return; + try function.freeValue(inst_tracking.short); + inst_tracking.short = .{ .dead = function.scope_generation }; + tracking_log.debug("%{d} => {} (death)", .{ inst, inst_tracking.* }); } fn reuse( - self: *InstTracking, - function: *Self, + inst_tracking: *InstTracking, + function: *Func, new_inst: ?Air.Inst.Index, old_inst: Air.Inst.Index, ) void { - self.short = .{ .dead = function.scope_generation }; + inst_tracking.short = .{ .dead = function.scope_generation }; if (new_inst) |inst| - tracking_log.debug("%{d} => {} (reuse %{d})", .{ inst, self.*, old_inst }) + tracking_log.debug("%{d} => {} (reuse %{d})", .{ inst, inst_tracking.*, old_inst }) else - tracking_log.debug("tmp => {} (reuse %{d})", .{ self.*, old_inst }); + tracking_log.debug("tmp => {} (reuse %{d})", .{ inst_tracking.*, old_inst }); } - fn liveOut(self: *InstTracking, function: *Self, inst: Air.Inst.Index) void { - for (self.getRegs()) |reg| { + fn liveOut(inst_tracking: *InstTracking, function: *Func, inst: Air.Inst.Index) void { + for (inst_tracking.getRegs()) |reg| { if (function.register_manager.isRegFree(reg)) { - tracking_log.debug("%{d} => {} (live-out)", .{ inst, self.* }); + tracking_log.debug("%{d} => {} (live-out)", .{ inst, inst_tracking.* }); continue; } @@ -509,18 +512,18 @@ const InstTracking = struct { // Perform side-effects of freeValue manually. function.register_manager.freeReg(reg); - tracking_log.debug("%{d} => {} (live-out %{d})", .{ inst, self.*, tracked_inst }); + tracking_log.debug("%{d} => {} (live-out %{d})", .{ inst, inst_tracking.*, tracked_inst }); } } pub fn format( - self: InstTracking, + inst_tracking: InstTracking, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (!std.meta.eql(self.long, self.short)) try writer.print("|{}| ", .{self.long}); - try writer.print("{}", .{self.short}); + if (!std.meta.eql(inst_tracking.long, inst_tracking.short)) try writer.print("|{}| ", .{inst_tracking.long}); + try writer.print("{}", .{inst_tracking.short}); } }; @@ -560,19 +563,13 @@ const FrameAlloc = struct { } }; -const StackAllocation = struct { - inst: ?Air.Inst.Index, - /// TODO: make the size inferred from the bits of the inst - size: u32, -}; - const BlockData = struct { relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{}, state: State, - fn deinit(self: *BlockData, gpa: Allocator) void { - self.relocs.deinit(gpa); - self.* = undefined; + fn deinit(bd: *BlockData, gpa: Allocator) void { + bd.relocs.deinit(gpa); + bd.* = undefined; } }; @@ -584,31 +581,31 @@ const State = struct { scope_generation: u32, }; -fn initRetroactiveState(self: *Self) State { +fn initRetroactiveState(func: *Func) State { var state: State = undefined; - state.inst_tracking_len = @intCast(self.inst_tracking.count()); - state.scope_generation = self.scope_generation; + state.inst_tracking_len = @intCast(func.inst_tracking.count()); + state.scope_generation = func.scope_generation; return state; } -fn saveRetroactiveState(self: *Self, state: *State) !void { - const free_registers = self.register_manager.free_registers; +fn saveRetroactiveState(func: *Func, state: *State) !void { + const free_registers = func.register_manager.free_registers; var it = free_registers.iterator(.{ .kind = .unset }); while (it.next()) |index| { - const tracked_inst = self.register_manager.registers[index]; + const tracked_inst = func.register_manager.registers[index]; state.registers[index] = tracked_inst; - state.reg_tracking[index] = self.inst_tracking.get(tracked_inst).?; + state.reg_tracking[index] = func.inst_tracking.get(tracked_inst).?; } state.free_registers = free_registers; } -fn saveState(self: *Self) !State { - var state = self.initRetroactiveState(); - try self.saveRetroactiveState(&state); +fn saveState(func: *Func) !State { + var state = func.initRetroactiveState(); + try func.saveRetroactiveState(&state); return state; } -fn restoreState(self: *Self, state: State, deaths: []const Air.Inst.Index, comptime opts: struct { +fn restoreState(func: *Func, state: State, deaths: []const Air.Inst.Index, comptime opts: struct { emit_instructions: bool, update_tracking: bool, resurrect: bool, @@ -616,80 +613,81 @@ fn restoreState(self: *Self, state: State, deaths: []const Air.Inst.Index, compt }) !void { if (opts.close_scope) { for ( - self.inst_tracking.keys()[state.inst_tracking_len..], - self.inst_tracking.values()[state.inst_tracking_len..], - ) |inst, *tracking| try tracking.die(self, inst); - self.inst_tracking.shrinkRetainingCapacity(state.inst_tracking_len); + func.inst_tracking.keys()[state.inst_tracking_len..], + func.inst_tracking.values()[state.inst_tracking_len..], + ) |inst, *tracking| try tracking.die(func, inst); + func.inst_tracking.shrinkRetainingCapacity(state.inst_tracking_len); } if (opts.resurrect) for ( - self.inst_tracking.keys()[0..state.inst_tracking_len], - self.inst_tracking.values()[0..state.inst_tracking_len], + func.inst_tracking.keys()[0..state.inst_tracking_len], + func.inst_tracking.values()[0..state.inst_tracking_len], ) |inst, *tracking| tracking.resurrect(inst, state.scope_generation); - for (deaths) |death| try self.processDeath(death); + for (deaths) |death| try func.processDeath(death); const ExpectedContents = [@typeInfo(RegisterManager.TrackedRegisters).Array.len]RegisterLock; var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = if (opts.update_tracking) - {} else std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + {} else std.heap.stackFallback(@sizeOf(ExpectedContents), func.gpa); var reg_locks = if (opts.update_tracking) {} else try std.ArrayList(RegisterLock).initCapacity( stack.get(), @typeInfo(ExpectedContents).Array.len, ); defer if (!opts.update_tracking) { - for (reg_locks.items) |lock| self.register_manager.unlockReg(lock); + for (reg_locks.items) |lock| func.register_manager.unlockReg(lock); reg_locks.deinit(); }; for (0..state.registers.len) |index| { - const current_maybe_inst = if (self.register_manager.free_registers.isSet(index)) + const current_maybe_inst = if (func.register_manager.free_registers.isSet(index)) null else - self.register_manager.registers[index]; + func.register_manager.registers[index]; const target_maybe_inst = if (state.free_registers.isSet(index)) null else state.registers[index]; if (std.debug.runtime_safety) if (target_maybe_inst) |target_inst| - assert(self.inst_tracking.getIndex(target_inst).? < state.inst_tracking_len); + assert(func.inst_tracking.getIndex(target_inst).? < state.inst_tracking_len); if (opts.emit_instructions) { if (current_maybe_inst) |current_inst| { - try self.inst_tracking.getPtr(current_inst).?.spill(self, current_inst); + try func.inst_tracking.getPtr(current_inst).?.spill(func, current_inst); } if (target_maybe_inst) |target_inst| { - const target_tracking = self.inst_tracking.getPtr(target_inst).?; - try target_tracking.materialize(self, target_inst, state.reg_tracking[index]); + const target_tracking = func.inst_tracking.getPtr(target_inst).?; + try target_tracking.materialize(func, target_inst, state.reg_tracking[index]); } } if (opts.update_tracking) { if (current_maybe_inst) |current_inst| { - try self.inst_tracking.getPtr(current_inst).?.trackSpill(self, current_inst); + try func.inst_tracking.getPtr(current_inst).?.trackSpill(func, current_inst); } - { + blk: { + const inst = target_maybe_inst orelse break :blk; const reg = RegisterManager.regAtTrackedIndex(@intCast(index)); - self.register_manager.freeReg(reg); - self.register_manager.getRegAssumeFree(reg, target_maybe_inst); + func.register_manager.freeReg(reg); + func.register_manager.getRegAssumeFree(reg, inst); } if (target_maybe_inst) |target_inst| { - self.inst_tracking.getPtr(target_inst).?.trackMaterialize( + func.inst_tracking.getPtr(target_inst).?.trackMaterialize( target_inst, state.reg_tracking[index], ); } } else if (target_maybe_inst) |_| - try reg_locks.append(self.register_manager.lockRegIndexAssumeUnused(@intCast(index))); + try reg_locks.append(func.register_manager.lockRegIndexAssumeUnused(@intCast(index))); } if (opts.update_tracking and std.debug.runtime_safety) { - assert(self.register_manager.free_registers.eql(state.free_registers)); + assert(func.register_manager.free_registers.eql(state.free_registers)); var used_reg_it = state.free_registers.iterator(.{ .kind = .unset }); while (used_reg_it.next()) |index| - assert(self.register_manager.registers[index] == state.registers[index]); + assert(func.register_manager.registers[index] == state.registers[index]); } } -const Self = @This(); +const Func = @This(); const CallView = enum(u1) { callee, @@ -725,7 +723,7 @@ pub fn generate( } try branch_stack.append(.{}); - var function = Self{ + var function = Func{ .gpa = gpa, .air = air, .mod = mod, @@ -760,6 +758,8 @@ pub fn generate( function.mir_extra.deinit(gpa); } + wip_mir_log.debug("{}:", .{function.fmtDecl(func.owner_decl)}); + try function.frame_allocs.resize(gpa, FrameIndex.named_count); function.frame_allocs.set( @intFromEnum(FrameIndex.stack_frame), @@ -774,7 +774,7 @@ pub fn generate( ); const fn_info = zcu.typeToFunc(fn_type).?; - var call_info = function.resolveCallingConventionValues(fn_info) catch |err| switch (err) { + var call_info = function.resolveCallingConventionValues(fn_info, &.{}) catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, error.OutOfRegisters => return Result{ .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), @@ -865,51 +865,171 @@ pub fn generate( } } -fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { - const gpa = self.gpa; +const FormatWipMirData = struct { + func: *Func, + inst: Mir.Inst.Index, +}; +fn formatWipMir( + data: FormatWipMirData, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, +) @TypeOf(writer).Error!void { + const comp = data.func.bin_file.comp; + const mod = comp.root_mod; + var lower = Lower{ + .bin_file = data.func.bin_file, + .allocator = data.func.gpa, + .mir = .{ + .instructions = data.func.mir_instructions.slice(), + .extra = data.func.mir_extra.items, + .frame_locs = data.func.frame_locs.slice(), + }, + .cc = .Unspecified, + .src_loc = data.func.src_loc, + .output_mode = comp.config.output_mode, + .link_mode = comp.config.link_mode, + .pic = mod.pic, + }; + var first = true; + for ((lower.lowerMir(data.inst) catch |err| switch (err) { + error.LowerFail => { + defer { + lower.err_msg.?.deinit(data.func.gpa); + lower.err_msg = null; + } + try writer.writeAll(lower.err_msg.?.msg); + return; + }, + error.OutOfMemory, error.InvalidInstruction => |e| { + try writer.writeAll(switch (e) { + error.OutOfMemory => "Out of memory", + error.InvalidInstruction => "CodeGen failed to find a viable instruction.", + }); + return; + }, + else => |e| return e, + }).insts) |lowered_inst| { + if (!first) try writer.writeAll("\ndebug(wip_mir): "); + try writer.print(" | {}", .{lowered_inst}); + first = false; + } +} +fn fmtWipMir(func: *Func, inst: Mir.Inst.Index) std.fmt.Formatter(formatWipMir) { + return .{ .data = .{ .func = func, .inst = inst } }; +} - try self.mir_instructions.ensureUnusedCapacity(gpa, 1); +const FormatDeclData = struct { + mod: *Module, + decl_index: InternPool.DeclIndex, +}; +fn formatDecl( + data: FormatDeclData, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, +) @TypeOf(writer).Error!void { + try data.mod.declPtr(data.decl_index).renderFullyQualifiedName(data.mod, writer); +} +fn fmtDecl(func: *Func, decl_index: InternPool.DeclIndex) std.fmt.Formatter(formatDecl) { + return .{ .data = .{ + .mod = func.bin_file.comp.module.?, + .decl_index = decl_index, + } }; +} - const result_index: Mir.Inst.Index = @intCast(self.mir_instructions.len); - self.mir_instructions.appendAssumeCapacity(inst); +const FormatAirData = struct { + func: *Func, + inst: Air.Inst.Index, +}; +fn formatAir( + data: FormatAirData, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, +) @TypeOf(writer).Error!void { + @import("../../print_air.zig").dumpInst( + data.inst, + data.func.bin_file.comp.module.?, + data.func.air, + data.func.liveness, + ); +} +fn fmtAir(func: *Func, inst: Air.Inst.Index) std.fmt.Formatter(formatAir) { + return .{ .data = .{ .func = func, .inst = inst } }; +} + +const FormatTrackingData = struct { + func: *Func, +}; +fn formatTracking( + data: FormatTrackingData, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, +) @TypeOf(writer).Error!void { + var it = data.func.inst_tracking.iterator(); + while (it.next()) |entry| try writer.print("\n%{d} = {}", .{ entry.key_ptr.*, entry.value_ptr.* }); +} +fn fmtTracking(func: *Func) std.fmt.Formatter(formatTracking) { + return .{ .data = .{ .func = func } }; +} + +fn addInst(func: *Func, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { + const gpa = func.gpa; + try func.mir_instructions.ensureUnusedCapacity(gpa, 1); + const result_index: Mir.Inst.Index = @intCast(func.mir_instructions.len); + func.mir_instructions.appendAssumeCapacity(inst); + if (inst.tag != .pseudo or switch (inst.ops) { + else => true, + .pseudo_dbg_prologue_end, + .pseudo_dbg_line_column, + .pseudo_dbg_epilogue_begin, + .pseudo_store_rm, + .pseudo_load_rm, + .pseudo_lea_rm, + .pseudo_mv, + .pseudo_dead, + => false, + }) wip_mir_log.debug("{}", .{func.fmtWipMir(result_index)}) else wip_mir_log.debug(" | uses-mem", .{}); return result_index; } -fn addNop(self: *Self) error{OutOfMemory}!Mir.Inst.Index { - return self.addInst(.{ +fn addNop(func: *Func) error{OutOfMemory}!Mir.Inst.Index { + return func.addInst(.{ .tag = .nop, .ops = .none, .data = undefined, }); } -fn addPseudoNone(self: *Self, ops: Mir.Inst.Ops) !void { - _ = try self.addInst(.{ +fn addPseudoNone(func: *Func, ops: Mir.Inst.Ops) !void { + _ = try func.addInst(.{ .tag = .pseudo, .ops = ops, .data = undefined, }); } -fn addPseudo(self: *Self, ops: Mir.Inst.Ops) !Mir.Inst.Index { - return self.addInst(.{ +fn addPseudo(func: *Func, ops: Mir.Inst.Ops) !Mir.Inst.Index { + return func.addInst(.{ .tag = .pseudo, .ops = ops, .data = undefined, }); } -pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 { +pub fn addExtra(func: *Func, extra: anytype) Allocator.Error!u32 { const fields = std.meta.fields(@TypeOf(extra)); - try self.mir_extra.ensureUnusedCapacity(self.gpa, fields.len); - return self.addExtraAssumeCapacity(extra); + try func.mir_extra.ensureUnusedCapacity(func.gpa, fields.len); + return func.addExtraAssumeCapacity(extra); } -pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { +pub fn addExtraAssumeCapacity(func: *Func, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result: u32 = @intCast(self.mir_extra.items.len); + const result: u32 = @intCast(func.mir_extra.items.len); inline for (fields) |field| { - self.mir_extra.appendAssumeCapacity(switch (field.type) { + func.mir_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), i32 => @bitCast(@field(extra, field.name)), else => @compileError("bad field type"), @@ -918,38 +1038,71 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { return result; } -fn gen(self: *Self) !void { - const mod = self.bin_file.comp.module.?; - const fn_info = mod.typeToFunc(self.fn_type).?; +const required_features = [_]Target.riscv.Feature{ + .d, + .m, +}; + +fn gen(func: *Func) !void { + const mod = func.bin_file.comp.module.?; + const fn_info = mod.typeToFunc(func.fn_type).?; + + inline for (required_features) |feature| { + if (!func.hasFeature(feature)) { + return func.fail( + "target missing required feature {s}", + .{@tagName(feature)}, + ); + } + } if (fn_info.cc != .Naked) { - try self.addPseudoNone(.pseudo_dbg_prologue_end); + try func.addPseudoNone(.pseudo_dbg_prologue_end); - const backpatch_stack_alloc = try self.addPseudo(.pseudo_dead); - const backpatch_ra_spill = try self.addPseudo(.pseudo_dead); - const backpatch_fp_spill = try self.addPseudo(.pseudo_dead); - const backpatch_fp_add = try self.addPseudo(.pseudo_dead); - const backpatch_spill_callee_preserved_regs = try self.addPseudo(.pseudo_dead); + const backpatch_stack_alloc = try func.addPseudo(.pseudo_dead); + const backpatch_ra_spill = try func.addPseudo(.pseudo_dead); + const backpatch_fp_spill = try func.addPseudo(.pseudo_dead); + const backpatch_fp_add = try func.addPseudo(.pseudo_dead); + const backpatch_spill_callee_preserved_regs = try func.addPseudo(.pseudo_dead); - try self.genBody(self.air.getMainBody()); - - for (self.exitlude_jump_relocs.items) |jmp_reloc| { - self.mir_instructions.items(.data)[jmp_reloc].inst = - @intCast(self.mir_instructions.len); + switch (func.ret_mcv.long) { + .none, .unreach => {}, + .indirect => { + // The address where to store the return value for the caller is in a + // register which the callee is free to clobber. Therefore, we purposely + // spill it to stack immediately. + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(Type.usize, mod)); + try func.genSetMem( + .{ .frame = frame_index }, + 0, + Type.usize, + func.ret_mcv.long.address().offset(-func.ret_mcv.short.indirect.off), + ); + func.ret_mcv.long = .{ .load_frame = .{ .index = frame_index } }; + tracking_log.debug("spill {} to {}", .{ func.ret_mcv.long, frame_index }); + }, + else => unreachable, } - try self.addPseudoNone(.pseudo_dbg_epilogue_begin); + try func.genBody(func.air.getMainBody()); - const backpatch_restore_callee_preserved_regs = try self.addPseudo(.pseudo_dead); - const backpatch_ra_restore = try self.addPseudo(.pseudo_dead); - const backpatch_fp_restore = try self.addPseudo(.pseudo_dead); - const backpatch_stack_alloc_restore = try self.addPseudo(.pseudo_dead); - try self.addPseudoNone(.pseudo_ret); + for (func.exitlude_jump_relocs.items) |jmp_reloc| { + func.mir_instructions.items(.data)[jmp_reloc].inst = + @intCast(func.mir_instructions.len); + } - const frame_layout = try self.computeFrameLayout(); + try func.addPseudoNone(.pseudo_dbg_epilogue_begin); + + const backpatch_restore_callee_preserved_regs = try func.addPseudo(.pseudo_dead); + const backpatch_ra_restore = try func.addPseudo(.pseudo_dead); + const backpatch_fp_restore = try func.addPseudo(.pseudo_dead); + const backpatch_stack_alloc_restore = try func.addPseudo(.pseudo_dead); + try func.addPseudoNone(.pseudo_ret); + + const frame_layout = try func.computeFrameLayout(); const need_save_reg = frame_layout.save_reg_list.count() > 0; - self.mir_instructions.set(backpatch_stack_alloc, .{ + func.mir_instructions.set(backpatch_stack_alloc, .{ .tag = .addi, .ops = .rri, .data = .{ .i_type = .{ @@ -958,51 +1111,51 @@ fn gen(self: *Self) !void { .imm12 = Immediate.s(-@as(i32, @intCast(frame_layout.stack_adjust))), } }, }); - self.mir_instructions.set(backpatch_ra_spill, .{ + func.mir_instructions.set(backpatch_ra_spill, .{ .tag = .pseudo, .ops = .pseudo_store_rm, .data = .{ .rm = .{ .r = .ra, .m = .{ .base = .{ .frame = .ret_addr }, - .mod = .{ .rm = .{ .size = .dword } }, + .mod = .{ .size = .dword, .unsigned = false }, }, } }, }); - self.mir_instructions.set(backpatch_ra_restore, .{ + func.mir_instructions.set(backpatch_ra_restore, .{ .tag = .pseudo, .ops = .pseudo_load_rm, .data = .{ .rm = .{ .r = .ra, .m = .{ .base = .{ .frame = .ret_addr }, - .mod = .{ .rm = .{ .size = .dword } }, + .mod = .{ .size = .dword, .unsigned = false }, }, } }, }); - self.mir_instructions.set(backpatch_fp_spill, .{ + func.mir_instructions.set(backpatch_fp_spill, .{ .tag = .pseudo, .ops = .pseudo_store_rm, .data = .{ .rm = .{ .r = .s0, .m = .{ .base = .{ .frame = .base_ptr }, - .mod = .{ .rm = .{ .size = .dword } }, + .mod = .{ .size = .dword, .unsigned = false }, }, } }, }); - self.mir_instructions.set(backpatch_fp_restore, .{ + func.mir_instructions.set(backpatch_fp_restore, .{ .tag = .pseudo, .ops = .pseudo_load_rm, .data = .{ .rm = .{ .r = .s0, .m = .{ .base = .{ .frame = .base_ptr }, - .mod = .{ .rm = .{ .size = .dword } }, + .mod = .{ .size = .dword, .unsigned = false }, }, } }, }); - self.mir_instructions.set(backpatch_fp_add, .{ + func.mir_instructions.set(backpatch_fp_add, .{ .tag = .addi, .ops = .rri, .data = .{ .i_type = .{ @@ -1011,7 +1164,7 @@ fn gen(self: *Self) !void { .imm12 = Immediate.s(@intCast(frame_layout.stack_adjust)), } }, }); - self.mir_instructions.set(backpatch_stack_alloc_restore, .{ + func.mir_instructions.set(backpatch_stack_alloc_restore, .{ .tag = .addi, .ops = .rri, .data = .{ .i_type = .{ @@ -1022,72 +1175,83 @@ fn gen(self: *Self) !void { }); if (need_save_reg) { - self.mir_instructions.set(backpatch_spill_callee_preserved_regs, .{ + func.mir_instructions.set(backpatch_spill_callee_preserved_regs, .{ .tag = .pseudo, .ops = .pseudo_spill_regs, .data = .{ .reg_list = frame_layout.save_reg_list }, }); - self.mir_instructions.set(backpatch_restore_callee_preserved_regs, .{ + func.mir_instructions.set(backpatch_restore_callee_preserved_regs, .{ .tag = .pseudo, .ops = .pseudo_restore_regs, .data = .{ .reg_list = frame_layout.save_reg_list }, }); } } else { - try self.addPseudoNone(.pseudo_dbg_prologue_end); - try self.genBody(self.air.getMainBody()); - try self.addPseudoNone(.pseudo_dbg_epilogue_begin); + try func.addPseudoNone(.pseudo_dbg_prologue_end); + try func.genBody(func.air.getMainBody()); + try func.addPseudoNone(.pseudo_dbg_epilogue_begin); } // Drop them off at the rbrace. - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_dbg_line_column, .data = .{ .pseudo_dbg_line_column = .{ - .line = self.end_di_line, - .column = self.end_di_column, + .line = func.end_di_line, + .column = func.end_di_column, } }, }); } -fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { - const zcu = self.bin_file.comp.module.?; +fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { + const zcu = func.bin_file.comp.module.?; const ip = &zcu.intern_pool; - const air_tags = self.air.instructions.items(.tag); + const air_tags = func.air.instructions.items(.tag); for (body) |inst| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; + if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip)) continue; + wip_mir_log.debug("{}", .{func.fmtAir(inst)}); + verbose_tracking_log.debug("{}", .{func.fmtTracking()}); - const old_air_bookkeeping = self.air_bookkeeping; - try self.inst_tracking.ensureUnusedCapacity(self.gpa, 1); - switch (air_tags[@intFromEnum(inst)]) { + const old_air_bookkeeping = func.air_bookkeeping; + try func.inst_tracking.ensureUnusedCapacity(func.gpa, 1); + const tag: Air.Inst.Tag = air_tags[@intFromEnum(inst)]; + switch (tag) { // zig fmt: off - .ptr_add => try self.airPtrArithmetic(inst, .ptr_add), - .ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub), + .add, + .add_wrap, + .sub, + .sub_wrap, - .add => try self.airBinOp(inst, .add), - .sub => try self.airBinOp(inst, .sub), + .mul, + .mul_wrap, + .div_trunc, - .add_safe, - .sub_safe, - .mul_safe, - => return self.fail("TODO implement safety_checked_instructions", .{}), + .shl, .shl_exact, + .shr, .shr_exact, - .add_wrap => try self.airAddWrap(inst), - .add_sat => try self.airAddSat(inst), - .sub_wrap => try self.airSubWrap(inst), - .sub_sat => try self.airSubSat(inst), - .mul => try self.airMul(inst), - .mul_wrap => try self.airMulWrap(inst), - .mul_sat => try self.airMulSat(inst), - .rem => try self.airRem(inst), - .mod => try self.airMod(inst), - .shl, .shl_exact => try self.airShl(inst), - .shl_sat => try self.airShlSat(inst), - .min => try self.airMinMax(inst, .min), - .max => try self.airMinMax(inst, .max), - .slice => try self.airSlice(inst), + .bool_and, + .bool_or, + .bit_and, + .bit_or, + + .xor, + + .min, + .max, + => try func.airBinOp(inst, tag), + + + .ptr_add, + .ptr_sub => try func.airPtrArithmetic(inst, tag), + + .rem, + .mod, + .div_float, + .div_floor, + .div_exact, + => return func.fail("TODO: {s}", .{@tagName(tag)}), .sqrt, .sin, @@ -1103,157 +1267,164 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .round, .trunc_float, .neg, - => try self.airUnaryMath(inst), + => try func.airUnaryMath(inst), - .add_with_overflow => try self.airAddWithOverflow(inst), - .sub_with_overflow => try self.airSubWithOverflow(inst), - .mul_with_overflow => try self.airMulWithOverflow(inst), - .shl_with_overflow => try self.airShlWithOverflow(inst), + .add_with_overflow => try func.airAddWithOverflow(inst), + .sub_with_overflow => try func.airSubWithOverflow(inst), + .mul_with_overflow => try func.airMulWithOverflow(inst), + .shl_with_overflow => try func.airShlWithOverflow(inst), - .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), - .cmp_lt => try self.airCmp(inst), - .cmp_lte => try self.airCmp(inst), - .cmp_eq => try self.airCmp(inst), - .cmp_gte => try self.airCmp(inst), - .cmp_gt => try self.airCmp(inst), - .cmp_neq => try self.airCmp(inst), + .add_sat => try func.airAddSat(inst), + .sub_sat => try func.airSubSat(inst), + .mul_sat => try func.airMulSat(inst), + .shl_sat => try func.airShlSat(inst), - .cmp_vector => try self.airCmpVector(inst), - .cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst), + .add_safe, + .sub_safe, + .mul_safe, + => return func.fail("TODO implement safety_checked_instructions", .{}), - .bool_and => try self.airBoolOp(inst), - .bool_or => try self.airBoolOp(inst), - .bit_and => try self.airBitAnd(inst), - .bit_or => try self.airBitOr(inst), - .xor => try self.airXor(inst), - .shr, .shr_exact => try self.airShr(inst), + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + => try func.airCmp(inst, tag), - .alloc => try self.airAlloc(inst), - .ret_ptr => try self.airRetPtr(inst), - .arg => try self.airArg(inst), - .assembly => try self.airAsm(inst), - .bitcast => try self.airBitCast(inst), - .block => try self.airBlock(inst), - .br => try self.airBr(inst), - .trap => try self.airTrap(), - .breakpoint => try self.airBreakpoint(), - .ret_addr => try self.airRetAddr(inst), - .frame_addr => try self.airFrameAddress(inst), - .fence => try self.airFence(), - .cond_br => try self.airCondBr(inst), - .dbg_stmt => try self.airDbgStmt(inst), - .fptrunc => try self.airFptrunc(inst), - .fpext => try self.airFpext(inst), - .intcast => try self.airIntCast(inst), - .trunc => try self.airTrunc(inst), - .int_from_bool => try self.airIntFromBool(inst), - .is_non_null => try self.airIsNonNull(inst), - .is_non_null_ptr => try self.airIsNonNullPtr(inst), - .is_null => try self.airIsNull(inst), - .is_null_ptr => try self.airIsNullPtr(inst), - .is_non_err => try self.airIsNonErr(inst), - .is_non_err_ptr => try self.airIsNonErrPtr(inst), - .is_err => try self.airIsErr(inst), - .is_err_ptr => try self.airIsErrPtr(inst), - .load => try self.airLoad(inst), - .loop => try self.airLoop(inst), - .not => try self.airNot(inst), - .int_from_ptr => try self.airIntFromPtr(inst), - .ret => try self.airRet(inst, false), - .ret_safe => try self.airRet(inst, true), - .ret_load => try self.airRetLoad(inst), - .store => try self.airStore(inst, false), - .store_safe => try self.airStore(inst, true), - .struct_field_ptr=> try self.airStructFieldPtr(inst), - .struct_field_val=> try self.airStructFieldVal(inst), - .array_to_slice => try self.airArrayToSlice(inst), - .float_from_int => try self.airFloatFromInt(inst), - .int_from_float => try self.airIntFromFloat(inst), - .cmpxchg_strong => try self.airCmpxchg(inst), - .cmpxchg_weak => try self.airCmpxchg(inst), - .atomic_rmw => try self.airAtomicRmw(inst), - .atomic_load => try self.airAtomicLoad(inst), - .memcpy => try self.airMemcpy(inst), - .memset => try self.airMemset(inst, false), - .memset_safe => try self.airMemset(inst, true), - .set_union_tag => try self.airSetUnionTag(inst), - .get_union_tag => try self.airGetUnionTag(inst), - .clz => try self.airClz(inst), - .ctz => try self.airCtz(inst), - .popcount => try self.airPopcount(inst), - .abs => try self.airAbs(inst), - .byte_swap => try self.airByteSwap(inst), - .bit_reverse => try self.airBitReverse(inst), - .tag_name => try self.airTagName(inst), - .error_name => try self.airErrorName(inst), - .splat => try self.airSplat(inst), - .select => try self.airSelect(inst), - .shuffle => try self.airShuffle(inst), - .reduce => try self.airReduce(inst), - .aggregate_init => try self.airAggregateInit(inst), - .union_init => try self.airUnionInit(inst), - .prefetch => try self.airPrefetch(inst), - .mul_add => try self.airMulAdd(inst), - .addrspace_cast => return self.fail("TODO: addrspace_cast", .{}), + .cmp_vector => try func.airCmpVector(inst), + .cmp_lt_errors_len => try func.airCmpLtErrorsLen(inst), - .@"try" => try self.airTry(inst), - .try_ptr => return self.fail("TODO: try_ptr", .{}), + .slice => try func.airSlice(inst), + .array_to_slice => try func.airArrayToSlice(inst), + + .slice_ptr => try func.airSlicePtr(inst), + .slice_len => try func.airSliceLen(inst), + + .alloc => try func.airAlloc(inst), + .ret_ptr => try func.airRetPtr(inst), + .arg => try func.airArg(inst), + .assembly => try func.airAsm(inst), + .bitcast => try func.airBitCast(inst), + .block => try func.airBlock(inst), + .br => try func.airBr(inst), + .trap => try func.airTrap(), + .breakpoint => try func.airBreakpoint(), + .ret_addr => try func.airRetAddr(inst), + .frame_addr => try func.airFrameAddress(inst), + .fence => try func.airFence(), + .cond_br => try func.airCondBr(inst), + .dbg_stmt => try func.airDbgStmt(inst), + .fptrunc => try func.airFptrunc(inst), + .fpext => try func.airFpext(inst), + .intcast => try func.airIntCast(inst), + .trunc => try func.airTrunc(inst), + .int_from_bool => try func.airIntFromBool(inst), + .is_non_null => try func.airIsNonNull(inst), + .is_non_null_ptr => try func.airIsNonNullPtr(inst), + .is_null => try func.airIsNull(inst), + .is_null_ptr => try func.airIsNullPtr(inst), + .is_non_err => try func.airIsNonErr(inst), + .is_non_err_ptr => try func.airIsNonErrPtr(inst), + .is_err => try func.airIsErr(inst), + .is_err_ptr => try func.airIsErrPtr(inst), + .load => try func.airLoad(inst), + .loop => try func.airLoop(inst), + .not => try func.airNot(inst), + .int_from_ptr => try func.airIntFromPtr(inst), + .ret => try func.airRet(inst, false), + .ret_safe => try func.airRet(inst, true), + .ret_load => try func.airRetLoad(inst), + .store => try func.airStore(inst, false), + .store_safe => try func.airStore(inst, true), + .struct_field_ptr=> try func.airStructFieldPtr(inst), + .struct_field_val=> try func.airStructFieldVal(inst), + .float_from_int => try func.airFloatFromInt(inst), + .int_from_float => try func.airIntFromFloat(inst), + .cmpxchg_strong => try func.airCmpxchg(inst), + .cmpxchg_weak => try func.airCmpxchg(inst), + .atomic_rmw => try func.airAtomicRmw(inst), + .atomic_load => try func.airAtomicLoad(inst), + .memcpy => try func.airMemcpy(inst), + .memset => try func.airMemset(inst, false), + .memset_safe => try func.airMemset(inst, true), + .set_union_tag => try func.airSetUnionTag(inst), + .get_union_tag => try func.airGetUnionTag(inst), + .clz => try func.airClz(inst), + .ctz => try func.airCtz(inst), + .popcount => try func.airPopcount(inst), + .abs => try func.airAbs(inst), + .byte_swap => try func.airByteSwap(inst), + .bit_reverse => try func.airBitReverse(inst), + .tag_name => try func.airTagName(inst), + .error_name => try func.airErrorName(inst), + .splat => try func.airSplat(inst), + .select => try func.airSelect(inst), + .shuffle => try func.airShuffle(inst), + .reduce => try func.airReduce(inst), + .aggregate_init => try func.airAggregateInit(inst), + .union_init => try func.airUnionInit(inst), + .prefetch => try func.airPrefetch(inst), + .mul_add => try func.airMulAdd(inst), + .addrspace_cast => return func.fail("TODO: addrspace_cast", .{}), + + .@"try" => try func.airTry(inst), + .try_ptr => return func.fail("TODO: try_ptr", .{}), .dbg_var_ptr, .dbg_var_val, - => try self.airDbgVar(inst), + => try func.airDbgVar(inst), - .dbg_inline_block => try self.airDbgInlineBlock(inst), + .dbg_inline_block => try func.airDbgInlineBlock(inst), - .call => try self.airCall(inst, .auto), - .call_always_tail => try self.airCall(inst, .always_tail), - .call_never_tail => try self.airCall(inst, .never_tail), - .call_never_inline => try self.airCall(inst, .never_inline), + .call => try func.airCall(inst, .auto), + .call_always_tail => try func.airCall(inst, .always_tail), + .call_never_tail => try func.airCall(inst, .never_tail), + .call_never_inline => try func.airCall(inst, .never_inline), - .atomic_store_unordered => try self.airAtomicStore(inst, .unordered), - .atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic), - .atomic_store_release => try self.airAtomicStore(inst, .release), - .atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst), + .atomic_store_unordered => try func.airAtomicStore(inst, .unordered), + .atomic_store_monotonic => try func.airAtomicStore(inst, .monotonic), + .atomic_store_release => try func.airAtomicStore(inst, .release), + .atomic_store_seq_cst => try func.airAtomicStore(inst, .seq_cst), + .struct_field_ptr_index_0 => try func.airStructFieldPtrIndex(inst, 0), + .struct_field_ptr_index_1 => try func.airStructFieldPtrIndex(inst, 1), + .struct_field_ptr_index_2 => try func.airStructFieldPtrIndex(inst, 2), + .struct_field_ptr_index_3 => try func.airStructFieldPtrIndex(inst, 3), - .struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0), - .struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1), - .struct_field_ptr_index_2 => try self.airStructFieldPtrIndex(inst, 2), - .struct_field_ptr_index_3 => try self.airStructFieldPtrIndex(inst, 3), + .field_parent_ptr => try func.airFieldParentPtr(inst), - .field_parent_ptr => try self.airFieldParentPtr(inst), + .switch_br => try func.airSwitchBr(inst), - .switch_br => try self.airSwitch(inst), - .slice_ptr => try self.airSlicePtr(inst), - .slice_len => try self.airSliceLen(inst), + .ptr_slice_len_ptr => try func.airPtrSliceLenPtr(inst), + .ptr_slice_ptr_ptr => try func.airPtrSlicePtrPtr(inst), - .ptr_slice_len_ptr => try self.airPtrSliceLenPtr(inst), - .ptr_slice_ptr_ptr => try self.airPtrSlicePtrPtr(inst), + .array_elem_val => try func.airArrayElemVal(inst), + + .slice_elem_val => try func.airSliceElemVal(inst), + .slice_elem_ptr => try func.airSliceElemPtr(inst), - .array_elem_val => try self.airArrayElemVal(inst), - .slice_elem_val => try self.airSliceElemVal(inst), - .slice_elem_ptr => try self.airSliceElemPtr(inst), - .ptr_elem_val => try self.airPtrElemVal(inst), - .ptr_elem_ptr => try self.airPtrElemPtr(inst), + .ptr_elem_val => try func.airPtrElemVal(inst), + .ptr_elem_ptr => try func.airPtrElemPtr(inst), .inferred_alloc, .inferred_alloc_comptime => unreachable, - .unreach => self.finishAirBookkeeping(), + .unreach => func.finishAirBookkeeping(), - .optional_payload => try self.airOptionalPayload(inst), - .optional_payload_ptr => try self.airOptionalPayloadPtr(inst), - .optional_payload_ptr_set => try self.airOptionalPayloadPtrSet(inst), - .unwrap_errunion_err => try self.airUnwrapErrErr(inst), - .unwrap_errunion_payload => try self.airUnwrapErrPayload(inst), - .unwrap_errunion_err_ptr => try self.airUnwrapErrErrPtr(inst), - .unwrap_errunion_payload_ptr=> try self.airUnwrapErrPayloadPtr(inst), - .errunion_payload_ptr_set => try self.airErrUnionPayloadPtrSet(inst), - .err_return_trace => try self.airErrReturnTrace(inst), - .set_err_return_trace => try self.airSetErrReturnTrace(inst), - .save_err_return_trace_index=> try self.airSaveErrReturnTraceIndex(inst), + .optional_payload => try func.airOptionalPayload(inst), + .optional_payload_ptr => try func.airOptionalPayloadPtr(inst), + .optional_payload_ptr_set => try func.airOptionalPayloadPtrSet(inst), + .unwrap_errunion_err => try func.airUnwrapErrErr(inst), + .unwrap_errunion_payload => try func.airUnwrapErrPayload(inst), + .unwrap_errunion_err_ptr => try func.airUnwrapErrErrPtr(inst), + .unwrap_errunion_payload_ptr=> try func.airUnwrapErrPayloadPtr(inst), + .errunion_payload_ptr_set => try func.airErrUnionPayloadPtrSet(inst), + .err_return_trace => try func.airErrReturnTrace(inst), + .set_err_return_trace => try func.airSetErrReturnTrace(inst), + .save_err_return_trace_index=> try func.airSaveErrReturnTraceIndex(inst), - .wrap_optional => try self.airWrapOptional(inst), - .wrap_errunion_payload => try self.airWrapErrUnionPayload(inst), - .wrap_errunion_err => try self.airWrapErrUnionErr(inst), + .wrap_optional => try func.airWrapOptional(inst), + .wrap_errunion_payload => try func.airWrapErrUnionPayload(inst), + .wrap_errunion_err => try func.airWrapErrUnionErr(inst), .add_optimized, .sub_optimized, @@ -1274,16 +1445,16 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .cmp_vector_optimized, .reduce_optimized, .int_from_float_optimized, - => return self.fail("TODO implement optimized float mode", .{}), + => return func.fail("TODO implement optimized float mode", .{}), - .is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}), - .error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}), - .vector_store_elem => return self.fail("TODO implement vector_store_elem", .{}), + .is_named_enum_value => return func.fail("TODO implement is_named_enum_value", .{}), + .error_set_has_value => return func.fail("TODO implement error_set_has_value", .{}), + .vector_store_elem => return func.fail("TODO implement vector_store_elem", .{}), - .c_va_arg => return self.fail("TODO implement c_va_arg", .{}), - .c_va_copy => return self.fail("TODO implement c_va_copy", .{}), - .c_va_end => return self.fail("TODO implement c_va_end", .{}), - .c_va_start => return self.fail("TODO implement c_va_start", .{}), + .c_va_arg => return func.fail("TODO implement c_va_arg", .{}), + .c_va_copy => return func.fail("TODO implement c_va_copy", .{}), + .c_va_end => return func.fail("TODO implement c_va_end", .{}), + .c_va_start => return func.fail("TODO implement c_va_start", .{}), .wasm_memory_size => unreachable, .wasm_memory_grow => unreachable, @@ -1294,95 +1465,97 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { // zig fmt: on } - assert(!self.register_manager.lockedRegsExist()); + assert(!func.register_manager.lockedRegsExist()); if (std.debug.runtime_safety) { - if (self.air_bookkeeping < old_air_bookkeeping + 1) { + if (func.air_bookkeeping < old_air_bookkeeping + 1) { std.debug.panic("in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. Look for a missing call to finishAir.", .{ inst, air_tags[@intFromEnum(inst)] }); } { // check consistency of tracked registers - var it = self.register_manager.free_registers.iterator(.{ .kind = .unset }); + var it = func.register_manager.free_registers.iterator(.{ .kind = .unset }); while (it.next()) |index| { - const tracked_inst = self.register_manager.registers[index]; - const tracking = self.getResolvedInstValue(tracked_inst); + const tracked_inst = func.register_manager.registers[index]; + tracking_log.debug("tracked inst: {}", .{tracked_inst}); + const tracking = func.getResolvedInstValue(tracked_inst); for (tracking.getRegs()) |reg| { if (RegisterManager.indexOfRegIntoTracked(reg).? == index) break; - } else return self.fail( - \\%{} takes up these regs: {any}, however those regs don't use it - , .{ index, tracking.getRegs() }); + } else return std.debug.panic( + \\%{} takes up these regs: {any}, however these regs {any}, don't use it + , .{ tracked_inst, tracking.getRegs(), RegisterManager.regAtTrackedIndex(@intCast(index)) }); } } } } + verbose_tracking_log.debug("{}", .{func.fmtTracking()}); } -fn getValue(self: *Self, value: MCValue, inst: ?Air.Inst.Index) !void { - for (value.getRegs()) |reg| try self.register_manager.getReg(reg, inst); +fn getValue(func: *Func, value: MCValue, inst: ?Air.Inst.Index) !void { + for (value.getRegs()) |reg| try func.register_manager.getReg(reg, inst); } -fn getValueIfFree(self: *Self, value: MCValue, inst: ?Air.Inst.Index) void { - for (value.getRegs()) |reg| if (self.register_manager.isRegFree(reg)) - self.register_manager.getRegAssumeFree(reg, inst); +fn getValueIfFree(func: *Func, value: MCValue, inst: ?Air.Inst.Index) void { + for (value.getRegs()) |reg| if (func.register_manager.isRegFree(reg)) + func.register_manager.getRegAssumeFree(reg, inst); } -fn freeValue(self: *Self, value: MCValue) !void { +fn freeValue(func: *Func, value: MCValue) !void { switch (value) { - .register => |reg| self.register_manager.freeReg(reg), - .register_pair => |regs| for (regs) |reg| self.register_manager.freeReg(reg), - .register_offset => |reg_off| self.register_manager.freeReg(reg_off.reg), + .register => |reg| func.register_manager.freeReg(reg), + .register_pair => |regs| for (regs) |reg| func.register_manager.freeReg(reg), + .register_offset => |reg_off| func.register_manager.freeReg(reg_off.reg), else => {}, // TODO process stack allocation death } } -fn feed(self: *Self, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) !void { +fn feed(func: *Func, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) !void { if (bt.feed()) if (operand.toIndex()) |inst| { log.debug("feed inst: %{}", .{inst}); - try self.processDeath(inst); + try func.processDeath(inst); }; } /// Asserts there is already capacity to insert into top branch inst_table. -fn processDeath(self: *Self, inst: Air.Inst.Index) !void { - try self.inst_tracking.getPtr(inst).?.die(self, inst); +fn processDeath(func: *Func, inst: Air.Inst.Index) !void { + try func.inst_tracking.getPtr(inst).?.die(func, inst); } /// Called when there are no operands, and the instruction is always unreferenced. -fn finishAirBookkeeping(self: *Self) void { +fn finishAirBookkeeping(func: *Func) void { if (std.debug.runtime_safety) { - self.air_bookkeeping += 1; + func.air_bookkeeping += 1; } } -fn finishAirResult(self: *Self, inst: Air.Inst.Index, result: MCValue) void { - if (self.liveness.isUnused(inst)) switch (result) { +fn finishAirResult(func: *Func, inst: Air.Inst.Index, result: MCValue) void { + if (func.liveness.isUnused(inst)) switch (result) { .none, .dead, .unreach => {}, else => unreachable, // Why didn't the result die? } else { tracking_log.debug("%{d} => {} (birth)", .{ inst, result }); - self.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(result)); + func.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(result)); // In some cases, an operand may be reused as the result. // If that operand died and was a register, it was freed by // processDeath, so we have to "re-allocate" the register. - self.getValueIfFree(result, inst); + func.getValueIfFree(result, inst); } - self.finishAirBookkeeping(); + func.finishAirBookkeeping(); } fn finishAir( - self: *Self, + func: *Func, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref, ) !void { - var tomb_bits = self.liveness.getTombBits(inst); + var tomb_bits = func.liveness.getTombBits(inst); for (operands) |op| { const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; - try self.processDeath(op.toIndexAllowNone() orelse continue); + try func.processDeath(op.toIndexAllowNone() orelse continue); } - self.finishAirResult(inst, result); + func.finishAirResult(inst, result); } const FrameLayout = struct { @@ -1391,7 +1564,7 @@ const FrameLayout = struct { }; fn setFrameLoc( - self: *Self, + func: *Func, frame_index: FrameIndex, base: Register, offset: *i32, @@ -1399,24 +1572,24 @@ fn setFrameLoc( ) void { const frame_i = @intFromEnum(frame_index); if (aligned) { - const alignment: InternPool.Alignment = self.frame_allocs.items(.abi_align)[frame_i]; + const alignment: InternPool.Alignment = func.frame_allocs.items(.abi_align)[frame_i]; offset.* = if (math.sign(offset.*) < 0) -1 * @as(i32, @intCast(alignment.backward(@intCast(@abs(offset.*))))) else @intCast(alignment.forward(@intCast(@abs(offset.*)))); } - self.frame_locs.set(frame_i, .{ .base = base, .disp = offset.* }); - offset.* += self.frame_allocs.items(.abi_size)[frame_i]; + func.frame_locs.set(frame_i, .{ .base = base, .disp = offset.* }); + offset.* += func.frame_allocs.items(.abi_size)[frame_i]; } -fn computeFrameLayout(self: *Self) !FrameLayout { - const frame_allocs_len = self.frame_allocs.len; - try self.frame_locs.resize(self.gpa, frame_allocs_len); - const stack_frame_order = try self.gpa.alloc(FrameIndex, frame_allocs_len - FrameIndex.named_count); - defer self.gpa.free(stack_frame_order); +fn computeFrameLayout(func: *Func) !FrameLayout { + const frame_allocs_len = func.frame_allocs.len; + try func.frame_locs.resize(func.gpa, frame_allocs_len); + const stack_frame_order = try func.gpa.alloc(FrameIndex, frame_allocs_len - FrameIndex.named_count); + defer func.gpa.free(stack_frame_order); - const frame_size = self.frame_allocs.items(.abi_size); - const frame_align = self.frame_allocs.items(.abi_align); + const frame_size = func.frame_allocs.items(.abi_size); + const frame_align = func.frame_allocs.items(.abi_align); for (stack_frame_order, FrameIndex.named_count..) |*frame_order, frame_index| frame_order.* = @enumFromInt(frame_index); @@ -1433,9 +1606,9 @@ fn computeFrameLayout(self: *Self) !FrameLayout { } var save_reg_list = Mir.RegisterList{}; - for (callee_preserved_regs) |reg| { - if (self.register_manager.isRegAllocated(reg)) { - save_reg_list.push(&callee_preserved_regs, reg); + for (abi.Registers.all_preserved) |reg| { + if (func.register_manager.isRegAllocated(reg)) { + save_reg_list.push(&abi.Registers.all_preserved, reg); } } @@ -1468,11 +1641,11 @@ fn computeFrameLayout(self: *Self) !FrameLayout { // store the ra at total_size - 8, so it's the very first thing in the stack // relative to the fp - self.frame_locs.set( + func.frame_locs.set( @intFromEnum(FrameIndex.ret_addr), .{ .base = .sp, .disp = acc_frame_size - 8 }, ); - self.frame_locs.set( + func.frame_locs.set( @intFromEnum(FrameIndex.base_ptr), .{ .base = .sp, .disp = acc_frame_size - 16 }, ); @@ -1481,11 +1654,11 @@ fn computeFrameLayout(self: *Self) !FrameLayout { // not need to know the size of the first allocation. Stack offsets point at the "bottom" // of variables. var s0_offset: i32 = -acc_frame_size; - self.setFrameLoc(.stack_frame, .s0, &s0_offset, true); - for (stack_frame_order) |frame_index| self.setFrameLoc(frame_index, .s0, &s0_offset, true); - self.setFrameLoc(.args_frame, .s0, &s0_offset, true); - self.setFrameLoc(.call_frame, .s0, &s0_offset, true); - self.setFrameLoc(.spill_frame, .s0, &s0_offset, true); + func.setFrameLoc(.stack_frame, .s0, &s0_offset, true); + for (stack_frame_order) |frame_index| func.setFrameLoc(frame_index, .s0, &s0_offset, true); + func.setFrameLoc(.args_frame, .s0, &s0_offset, true); + func.setFrameLoc(.call_frame, .s0, &s0_offset, true); + func.setFrameLoc(.spill_frame, .s0, &s0_offset, true); return .{ .stack_adjust = @intCast(acc_frame_size), @@ -1493,21 +1666,21 @@ fn computeFrameLayout(self: *Self) !FrameLayout { }; } -fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { - const table = &self.branch_stack.items[self.branch_stack.items.len - 1].inst_table; - try table.ensureUnusedCapacity(self.gpa, additional_count); +fn ensureProcessDeathCapacity(func: *Func, additional_count: usize) !void { + const table = &func.branch_stack.items[func.branch_stack.items.len - 1].inst_table; + try table.ensureUnusedCapacity(func.gpa, additional_count); } -fn memSize(self: *Self, ty: Type) Memory.Size { - const mod = self.bin_file.comp.module.?; +fn memSize(func: *Func, ty: Type) Memory.Size { + const mod = func.bin_file.comp.module.?; return switch (ty.zigTypeTag(mod)) { - .Float => Memory.Size.fromBitSize(ty.floatBits(self.target.*)), + .Float => Memory.Size.fromBitSize(ty.floatBits(func.target.*)), else => Memory.Size.fromByteSize(ty.abiSize(mod)), }; } -fn splitType(self: *Self, ty: Type) ![2]Type { - const zcu = self.bin_file.comp.module.?; +fn splitType(func: *Func, ty: Type) ![2]Type { + const zcu = func.bin_file.comp.module.?; const classes = mem.sliceTo(&abi.classifySystem(ty, zcu), .none); var parts: [2]Type = undefined; if (classes.len == 2) for (&parts, classes, 0..) |*part, class, part_i| { @@ -1524,186 +1697,317 @@ fn splitType(self: *Self, ty: Type) ![2]Type { }, else => unreachable, }, - else => return self.fail("TODO: splitType class {}", .{class}), + else => return func.fail("TODO: splitType class {}", .{class}), }; } else if (parts[0].abiSize(zcu) + parts[1].abiSize(zcu) == ty.abiSize(zcu)) return parts; - return self.fail("TODO implement splitType for {}", .{ty.fmt(zcu)}); + return func.fail("TODO implement splitType for {}", .{ty.fmt(zcu)}); } -fn symbolIndex(self: *Self) !u32 { - const zcu = self.bin_file.comp.module.?; - const decl_index = zcu.funcOwnerDeclIndex(self.func_index); - return switch (self.bin_file.tag) { +/// Truncates the value in the register in place. +/// Clobbers any remaining bits. +fn truncateRegister(func: *Func, ty: Type, reg: Register) !void { + const mod = func.bin_file.comp.module.?; + const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ + .signedness = .unsigned, + .bits = @intCast(ty.bitSize(mod)), + }; + const shift = math.cast(u6, 64 - int_info.bits % 64) orelse return; + switch (int_info.signedness) { + .signed => { + _ = try func.addInst(.{ + .tag = .slli, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = reg, + .rs1 = reg, + .imm12 = Immediate.u(shift), + }, + }, + }); + _ = try func.addInst(.{ + .tag = .srai, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = reg, + .rs1 = reg, + .imm12 = Immediate.u(shift), + }, + }, + }); + }, + .unsigned => { + const mask = ~@as(u64, 0) >> shift; + if (mask < 256) { + _ = try func.addInst(.{ + .tag = .andi, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = reg, + .rs1 = reg, + .imm12 = Immediate.u(@intCast(mask)), + }, + }, + }); + } else { + _ = try func.addInst(.{ + .tag = .slli, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = reg, + .rs1 = reg, + .imm12 = Immediate.u(shift), + }, + }, + }); + _ = try func.addInst(.{ + .tag = .srli, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = reg, + .rs1 = reg, + .imm12 = Immediate.u(shift), + }, + }, + }); + } + }, + } +} + +fn symbolIndex(func: *Func) !u32 { + const zcu = func.bin_file.comp.module.?; + const decl_index = zcu.funcOwnerDeclIndex(func.func_index); + return switch (func.bin_file.tag) { .elf => blk: { - const elf_file = self.bin_file.cast(link.File.Elf).?; + const elf_file = func.bin_file.cast(link.File.Elf).?; const atom_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, decl_index); break :blk atom_index; }, - else => return self.fail("TODO genSetReg load_symbol for {s}", .{@tagName(self.bin_file.tag)}), + else => return func.fail("TODO symbolIndex {s}", .{@tagName(func.bin_file.tag)}), }; } -fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { - const frame_allocs_slice = self.frame_allocs.slice(); +fn allocFrameIndex(func: *Func, alloc: FrameAlloc) !FrameIndex { + const frame_allocs_slice = func.frame_allocs.slice(); const frame_size = frame_allocs_slice.items(.abi_size); const frame_align = frame_allocs_slice.items(.abi_align); const stack_frame_align = &frame_align[@intFromEnum(FrameIndex.stack_frame)]; stack_frame_align.* = stack_frame_align.max(alloc.abi_align); - for (self.free_frame_indices.keys(), 0..) |frame_index, free_i| { + for (func.free_frame_indices.keys(), 0..) |frame_index, free_i| { const abi_size = frame_size[@intFromEnum(frame_index)]; if (abi_size != alloc.abi_size) continue; const abi_align = &frame_align[@intFromEnum(frame_index)]; abi_align.* = abi_align.max(alloc.abi_align); - _ = self.free_frame_indices.swapRemoveAt(free_i); + _ = func.free_frame_indices.swapRemoveAt(free_i); return frame_index; } - const frame_index: FrameIndex = @enumFromInt(self.frame_allocs.len); - try self.frame_allocs.append(self.gpa, alloc); + const frame_index: FrameIndex = @enumFromInt(func.frame_allocs.len); + try func.frame_allocs.append(func.gpa, alloc); log.debug("allocated frame {}", .{frame_index}); return frame_index; } /// Use a pointer instruction as the basis for allocating stack memory. -fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { - const zcu = self.bin_file.comp.module.?; - const ptr_ty = self.typeOfIndex(inst); +fn allocMemPtr(func: *Func, inst: Air.Inst.Index) !FrameIndex { + const zcu = func.bin_file.comp.module.?; + const ptr_ty = func.typeOfIndex(inst); const val_ty = ptr_ty.childType(zcu); - return self.allocFrameIndex(FrameAlloc.init(.{ + return func.allocFrameIndex(FrameAlloc.init(.{ .size = math.cast(u32, val_ty.abiSize(zcu)) orelse { - return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(zcu)}); + return func.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(zcu)}); }, .alignment = ptr_ty.ptrAlignment(zcu).max(.@"1"), })); } -fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const zcu = self.bin_file.comp.module.?; - const elem_ty = self.typeOfIndex(inst); +fn typeRegClass(func: *Func, ty: Type) abi.RegisterClass { + const zcu = func.bin_file.comp.module.?; + return switch (ty.zigTypeTag(zcu)) { + .Float => .float, + .Vector => @panic("TODO: typeRegClass for Vectors"), + inline else => .int, + }; +} + +fn regGeneralClassForType(func: *Func, ty: Type) RegisterManager.RegisterBitSet { + const zcu = func.bin_file.comp.module.?; + return switch (ty.zigTypeTag(zcu)) { + .Float => abi.Registers.Float.general_purpose, + .Vector => @panic("TODO: regGeneralClassForType for Vectors"), + else => abi.Registers.Integer.general_purpose, + }; +} + +fn regTempClassForType(func: *Func, ty: Type) RegisterManager.RegisterBitSet { + const zcu = func.bin_file.comp.module.?; + return switch (ty.zigTypeTag(zcu)) { + .Float => abi.Registers.Float.temporary, + .Vector => @panic("TODO: regTempClassForType for Vectors"), + else => abi.Registers.Integer.temporary, + }; +} + +fn allocRegOrMem(func: *Func, elem_ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue { + const zcu = func.bin_file.comp.module.?; const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(zcu)}); + return func.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(zcu)}); }; - if (reg_ok) { - // Make sure the type can fit in a register before we try to allocate one. - const ptr_bits = self.target.ptrBitWidth(); - const ptr_bytes: u64 = @divExact(ptr_bits, 8); - if (abi_size <= ptr_bytes) { - if (self.register_manager.tryAllocReg(inst, gp)) |reg| { - return .{ .register = reg }; - } + const min_size: u32 = switch (elem_ty.zigTypeTag(zcu)) { + .Float => 4, + .Vector => @panic("allocRegOrMem Vector"), + else => 8, + }; + + if (reg_ok and abi_size <= min_size) { + if (func.register_manager.tryAllocReg(inst, func.regGeneralClassForType(elem_ty))) |reg| { + return .{ .register = reg }; } } - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(elem_ty, zcu)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(elem_ty, zcu)); return .{ .load_frame = .{ .index = frame_index } }; } /// Allocates a register from the general purpose set and returns the Register and the Lock. /// -/// Up to the user to unlock the register later. -fn allocReg(self: *Self) !struct { Register, RegisterLock } { - const reg = try self.register_manager.allocReg(null, gp); - const lock = self.register_manager.lockRegAssumeUnused(reg); +/// Up to the caller to unlock the register later. +fn allocReg(func: *Func, reg_class: abi.RegisterClass) !struct { Register, RegisterLock } { + if (reg_class == .float and !func.hasFeature(.f)) + std.debug.panic("allocReg class == float where F isn't enabled", .{}); + + const class = switch (reg_class) { + .int => abi.Registers.Integer.general_purpose, + .float => abi.Registers.Float.general_purpose, + }; + + const reg = try func.register_manager.allocReg(null, class); + const lock = func.register_manager.lockRegAssumeUnused(reg); return .{ reg, lock }; } -fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Register { +/// Similar to `allocReg` but will copy the MCValue into the Register unless `operand` is already +/// a register, in which case it will return a possible lock to that register. +fn promoteReg(func: *Func, ty: Type, operand: MCValue) !struct { Register, ?RegisterLock } { + if (operand == .register) { + const op_reg = operand.register; + return .{ op_reg, func.register_manager.lockReg(operand.register) }; + } + + const reg, const lock = try func.allocReg(func.typeRegClass(ty)); + try func.genSetReg(ty, reg, operand); + return .{ reg, lock }; +} + +fn elemOffset(func: *Func, index_ty: Type, index: MCValue, elem_size: u64) !Register { const reg: Register = blk: { switch (index) { .immediate => |imm| { // Optimisation: if index MCValue is an immediate, we can multiply in `comptime` // and set the register directly to the scaled offset as an immediate. - const reg = try self.register_manager.allocReg(null, gp); - try self.genSetReg(index_ty, reg, .{ .immediate = imm * elem_size }); + const reg = try func.register_manager.allocReg(null, func.regGeneralClassForType(index_ty)); + try func.genSetReg(index_ty, reg, .{ .immediate = imm * elem_size }); break :blk reg; }, else => { - const reg = try self.copyToTmpRegister(index_ty, index); - const lock = self.register_manager.lockRegAssumeUnused(reg); - defer self.register_manager.unlockReg(lock); + const reg = try func.copyToTmpRegister(index_ty, index); + const lock = func.register_manager.lockRegAssumeUnused(reg); + defer func.register_manager.unlockReg(lock); - const result = try self.binOp( + const result_reg, const result_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(result_lock); + + try func.genBinOp( .mul, .{ .register = reg }, index_ty, .{ .immediate = elem_size }, index_ty, + result_reg, ); - break :blk result.register; + + break :blk result_reg; }, } }; return reg; } -pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { - const tracking = self.inst_tracking.getPtr(inst) orelse return; +pub fn spillInstruction(func: *Func, reg: Register, inst: Air.Inst.Index) !void { + const tracking = func.inst_tracking.getPtr(inst) orelse return; for (tracking.getRegs()) |tracked_reg| { if (tracked_reg.id() == reg.id()) break; } else unreachable; // spilled reg not tracked with spilled instruciton - try tracking.spill(self, inst); - try tracking.trackSpill(self, inst); + try tracking.spill(func, inst); + try tracking.trackSpill(func, inst); } /// Copies a value to a register without tracking the register. The register is not considered /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. -fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { - const reg = try self.register_manager.allocReg(null, tp); - try self.genSetReg(ty, reg, mcv); +fn copyToTmpRegister(func: *Func, ty: Type, mcv: MCValue) !Register { + log.debug("copyToTmpRegister ty: {}", .{ty.fmt(func.bin_file.comp.module.?)}); + const reg = try func.register_manager.allocReg(null, func.regTempClassForType(ty)); + try func.genSetReg(ty, reg, mcv); return reg; } /// Allocates a new register and copies `mcv` into it. /// `reg_owner` is the instruction that gets associated with the register in the register table. /// This can have a side effect of spilling instructions to the stack to free up a register. -fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { - const reg = try self.register_manager.allocReg(reg_owner, gp); - try self.genSetReg(self.typeOfIndex(reg_owner), reg, mcv); +fn copyToNewRegister(func: *Func, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { + const ty = func.typeOfIndex(reg_owner); + const reg = try func.register_manager.allocReg(reg_owner, func.regGeneralClassForType(ty)); + try func.genSetReg(func.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } -fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { - const result = MCValue{ .lea_frame = .{ .index = try self.allocMemPtr(inst) } }; - return self.finishAir(inst, result, .{ .none, .none, .none }); +fn airAlloc(func: *Func, inst: Air.Inst.Index) !void { + const result = MCValue{ .lea_frame = .{ .index = try func.allocMemPtr(inst) } }; + return func.finishAir(inst, result, .{ .none, .none, .none }); } -fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { - const result: MCValue = switch (self.ret_mcv.long) { - .none => .{ .lea_frame = .{ .index = try self.allocMemPtr(inst) } }, +fn airRetPtr(func: *Func, inst: Air.Inst.Index) !void { + const result: MCValue = switch (func.ret_mcv.long) { + .none => .{ .lea_frame = .{ .index = try func.allocMemPtr(inst) } }, .load_frame => .{ .register_offset = .{ - .reg = (try self.copyToNewRegister( + .reg = (try func.copyToNewRegister( inst, - self.ret_mcv.long, + func.ret_mcv.long, )).register, - .off = self.ret_mcv.short.indirect.off, + .off = func.ret_mcv.short.indirect.off, } }, - else => |t| return self.fail("TODO: airRetPtr {s}", .{@tagName(t)}), + else => |t| return func.fail("TODO: airRetPtr {s}", .{@tagName(t)}), }; - return self.finishAir(inst, result, .{ .none, .none, .none }); + return func.finishAir(inst, result, .{ .none, .none, .none }); } -fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airFptrunc for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airFptrunc(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airFptrunc for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airFpext(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airFpext for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airFpext(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airFpext for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const src_ty = self.typeOf(ty_op.operand); - const dst_ty = self.typeOfIndex(inst); +fn airIntCast(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const src_ty = func.typeOf(ty_op.operand); + const dst_ty = func.typeOfIndex(inst); const result: MCValue = result: { const src_int_info = src_ty.intInfo(zcu); @@ -1711,20 +2015,20 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty; - const src_mcv = try self.resolveInst(ty_op.operand); + const src_mcv = try func.resolveInst(ty_op.operand); const src_storage_bits: u16 = switch (src_mcv) { .register => 64, .load_frame => src_int_info.bits, - else => return self.fail("airIntCast from {s}", .{@tagName(src_mcv)}), + else => return func.fail("airIntCast from {s}", .{@tagName(src_mcv)}), }; const dst_mcv = if (dst_int_info.bits <= src_storage_bits and math.divCeil(u16, dst_int_info.bits, 64) catch unreachable == math.divCeil(u32, src_storage_bits, 64) catch unreachable and - self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: { - const dst_mcv = try self.allocRegOrMem(inst, true); - try self.genCopy(min_ty, dst_mcv, src_mcv); + func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: { + const dst_mcv = try func.allocRegOrMem(dst_ty, inst, true); + try func.genCopy(min_ty, dst_mcv, src_mcv); break :dst dst_mcv; }; @@ -1735,53 +2039,53 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { break :result null; // TODO break :result dst_mcv; - } orelse return self.fail("TODO implement airIntCast from {} to {}", .{ + } orelse return func.fail("TODO: implement airIntCast from {} to {}", .{ src_ty.fmt(zcu), dst_ty.fmt(zcu), }); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - if (self.liveness.isUnused(inst)) - return self.finishAir(inst, .unreach, .{ ty_op.operand, .none, .none }); +fn airTrunc(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + if (func.liveness.isUnused(inst)) + return func.finishAir(inst, .unreach, .{ ty_op.operand, .none, .none }); - const operand = try self.resolveInst(ty_op.operand); + const operand = try func.resolveInst(ty_op.operand); _ = operand; - return self.fail("TODO implement trunc for {}", .{self.target.cpu.arch}); - // return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.fail("TODO implement trunc for {}", .{func.target.cpu.arch}); + // return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const operand = try self.resolveInst(un_op); - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else operand; - return self.finishAir(inst, result, .{ un_op, .none, .none }); +fn airIntFromBool(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try func.resolveInst(un_op); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else operand; + return func.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airNot(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const zcu = self.bin_file.comp.module.?; +fn airNot(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const zcu = func.bin_file.comp.module.?; - const operand = try self.resolveInst(ty_op.operand); - const ty = self.typeOf(ty_op.operand); + const operand = try func.resolveInst(ty_op.operand); + const ty = func.typeOf(ty_op.operand); switch (ty.zigTypeTag(zcu)) { .Bool => { const operand_reg = blk: { if (operand == .register) break :blk operand.register; - break :blk try self.copyToTmpRegister(ty, operand); + break :blk try func.copyToTmpRegister(ty, operand); }; const dst_reg: Register = - if (self.reuseOperand(inst, ty_op.operand, 0, operand) and operand == .register) + if (func.reuseOperand(inst, ty_op.operand, 0, operand) and operand == .register) operand.register else - try self.register_manager.allocReg(inst, gp); + (try func.allocRegOrMem(func.typeOfIndex(inst), inst, true)).register; - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_not, .data = .{ @@ -1794,718 +2098,906 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { break :result .{ .register = dst_reg }; }, - .Int => return self.fail("TODO: airNot ints", .{}), + .Int => return func.fail("TODO: airNot ints", .{}), else => unreachable, } }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airMinMax( - self: *Self, - inst: Air.Inst.Index, - comptime tag: enum { - max, - min, - }, -) !void { - const zcu = self.bin_file.comp.module.?; - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airSlice(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); + const slice_ty = func.typeOfIndex(inst); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(slice_ty, zcu)); - const int_info = lhs_ty.intInfo(zcu); + const ptr_ty = func.typeOf(bin_op.lhs); + try func.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, .{ .air_ref = bin_op.lhs }); - if (int_info.bits > 64) return self.fail("TODO: > 64 bit @min", .{}); + const len_ty = func.typeOf(bin_op.rhs); + try func.genSetMem( + .{ .frame = frame_index }, + @intCast(ptr_ty.abiSize(zcu)), + len_ty, + .{ .air_ref = bin_op.rhs }, + ); - const lhs_reg, const lhs_lock = blk: { - if (lhs == .register) break :blk .{ lhs.register, null }; - - const lhs_reg, const lhs_lock = try self.allocReg(); - try self.genSetReg(lhs_ty, lhs_reg, lhs); - break :blk .{ lhs_reg, lhs_lock }; - }; - defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - - const rhs_reg, const rhs_lock = blk: { - if (rhs == .register) break :blk .{ rhs.register, null }; - - const rhs_reg, const rhs_lock = try self.allocReg(); - try self.genSetReg(rhs_ty, rhs_reg, rhs); - break :blk .{ rhs_reg, rhs_lock }; - }; - defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); - - const mask_reg, const mask_lock = try self.allocReg(); - defer self.register_manager.unlockReg(mask_lock); - - const result_reg, const result_lock = try self.allocReg(); - defer self.register_manager.unlockReg(result_lock); - - _ = try self.addInst(.{ - .tag = if (int_info.signedness == .unsigned) .sltu else .slt, - .ops = .rrr, - .data = .{ .r_type = .{ - .rd = mask_reg, - .rs1 = lhs_reg, - .rs2 = rhs_reg, - } }, - }); - - _ = try self.addInst(.{ - .tag = .sub, - .ops = .rrr, - .data = .{ .r_type = .{ - .rd = mask_reg, - .rs1 = .zero, - .rs2 = mask_reg, - } }, - }); - - _ = try self.addInst(.{ - .tag = .xor, - .ops = .rrr, - .data = .{ .r_type = .{ - .rd = result_reg, - .rs1 = lhs_reg, - .rs2 = rhs_reg, - } }, - }); - - _ = try self.addInst(.{ - .tag = .@"and", - .ops = .rrr, - .data = .{ .r_type = .{ - .rd = mask_reg, - .rs1 = result_reg, - .rs2 = mask_reg, - } }, - }); - - _ = try self.addInst(.{ - .tag = .xor, - .ops = .rrr, - .data = .{ .r_type = .{ - .rd = result_reg, - .rs1 = if (tag == .min) rhs_reg else lhs_reg, - .rs2 = mask_reg, - } }, - }); - - break :result .{ .register = result_reg }; - }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + const result = MCValue{ .load_frame = .{ .index = frame_index } }; + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airSlice(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement slice for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +fn airBinOp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { + const zcu = func.bin_file.comp.module.?; + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const dst_mcv = try func.binOp(inst, tag, bin_op.lhs, bin_op.rhs); + + const dst_ty = func.typeOfIndex(inst); + if (dst_ty.isAbiInt(zcu)) { + const abi_size: u32 = @intCast(dst_ty.abiSize(zcu)); + const bit_size: u32 = @intCast(dst_ty.bitSize(zcu)); + if (abi_size * 8 > bit_size) { + const dst_lock = switch (dst_mcv) { + .register => |dst_reg| func.register_manager.lockRegAssumeUnused(dst_reg), + else => null, + }; + defer if (dst_lock) |lock| func.register_manager.unlockReg(lock); + + if (dst_mcv.isRegister()) { + try func.truncateRegister(dst_ty, dst_mcv.getReg().?); + } else { + const tmp_reg, const tmp_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(tmp_lock); + + const hi_ty = try zcu.intType(.unsigned, @intCast((dst_ty.bitSize(zcu) - 1) % 64 + 1)); + const hi_mcv = dst_mcv.address().offset(@intCast(bit_size / 64 * 8)).deref(); + try func.genSetReg(hi_ty, tmp_reg, hi_mcv); + try func.truncateRegister(dst_ty, tmp_reg); + try func.genCopy(hi_ty, hi_mcv, .{ .register = tmp_reg }); + } + } + } + + return func.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); - - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - break :result try self.binOp(tag, lhs, lhs_ty, rhs, rhs_ty); - }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -/// For all your binary operation needs, this function will generate -/// the corresponding Mir instruction(s). Returns the location of the -/// result. -/// -/// If the binary operation itself happens to be an Air instruction, -/// pass the corresponding index in the inst parameter. That helps -/// this function do stuff like reusing operands. -/// -/// This function does not do any lowering to Mir itself, but instead -/// looks at the lhs and rhs and determines which kind of lowering -/// would be best suitable and then delegates the lowering to other -/// functions. -/// -/// `maybe_inst` **needs** to be a bin_op, make sure of that. fn binOp( - self: *Self, - tag: Air.Inst.Tag, - lhs: MCValue, - lhs_ty: Type, - rhs: MCValue, - rhs_ty: Type, -) InnerError!MCValue { - const zcu = self.bin_file.comp.module.?; + func: *Func, + maybe_inst: ?Air.Inst.Index, + air_tag: Air.Inst.Tag, + lhs_air: Air.Inst.Ref, + rhs_air: Air.Inst.Ref, +) !MCValue { + _ = maybe_inst; + const zcu = func.bin_file.comp.module.?; + const lhs_ty = func.typeOf(lhs_air); + const rhs_ty = func.typeOf(rhs_air); - switch (tag) { - // Arithmetic operations on integers and floats - .add, - .sub, - .mul, + if (lhs_ty.isRuntimeFloat()) libcall: { + const float_bits = lhs_ty.floatBits(func.target.*); + const type_needs_libcall = switch (float_bits) { + 16 => true, + 32, 64 => false, + 80, 128 => true, + else => unreachable, + }; + switch (air_tag) { + .rem, .mod => {}, + else => if (!type_needs_libcall) break :libcall, + } + return func.fail("binOp libcall runtime-float ops", .{}); + } + + if (lhs_ty.bitSize(zcu) > 64) return func.fail("TODO: binOp >= 64 bits", .{}); + + const lhs_mcv = try func.resolveInst(lhs_air); + const rhs_mcv = try func.resolveInst(rhs_air); + + const class_for_dst_ty: abi.RegisterClass = switch (air_tag) { + // will always return int register no matter the input .cmp_eq, .cmp_neq, - .cmp_gt, - .cmp_gte, .cmp_lt, .cmp_lte, + .cmp_gt, + .cmp_gte, + => .int, + + else => func.typeRegClass(lhs_ty), + }; + + const dst_reg, const dst_lock = try func.allocReg(class_for_dst_ty); + defer func.register_manager.unlockReg(dst_lock); + + try func.genBinOp( + air_tag, + lhs_mcv, + lhs_ty, + rhs_mcv, + rhs_ty, + dst_reg, + ); + + return .{ .register = dst_reg }; +} + +/// Does the same thing as binOp however is meant to be used internally to the backend. +/// +/// The `dst_reg` argument is meant to be caller-locked. Asserts that the binOp result can be +/// fit into the register. +/// +/// Assumes that the `dst_reg` class is correct. +fn genBinOp( + func: *Func, + tag: Air.Inst.Tag, + lhs_mcv: MCValue, + lhs_ty: Type, + rhs_mcv: MCValue, + rhs_ty: Type, + dst_reg: Register, +) !void { + const zcu = func.bin_file.comp.module.?; + const bit_size = lhs_ty.bitSize(zcu); + assert(bit_size <= 64); + + const is_unsigned = lhs_ty.isUnsignedInt(zcu); + + const lhs_reg, const maybe_lhs_lock = try func.promoteReg(lhs_ty, lhs_mcv); + const rhs_reg, const maybe_rhs_lock = try func.promoteReg(rhs_ty, rhs_mcv); + + defer if (maybe_lhs_lock) |lock| func.register_manager.unlockReg(lock); + defer if (maybe_rhs_lock) |lock| func.register_manager.unlockReg(lock); + + switch (tag) { + .add, + .add_wrap, + .sub, + .sub_wrap, + .mul, + .mul_wrap, => { + if (!math.isPowerOfTwo(bit_size)) + return func.fail( + "TODO: genBinOp {s} non-pow 2, found {}", + .{ @tagName(tag), bit_size }, + ); + switch (lhs_ty.zigTypeTag(zcu)) { - .Float => return self.fail("TODO binary operations on floats", .{}), - .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - assert(lhs_ty.eql(rhs_ty, zcu)); - const int_info = lhs_ty.intInfo(zcu); - if (int_info.bits <= 64) { - return self.binOpRegister(tag, lhs, lhs_ty, rhs, rhs_ty); - } else { - return self.fail("TODO binary operations on int with bits > 64", .{}); + const mir_tag: Mir.Inst.Tag = switch (tag) { + .add, .add_wrap => switch (bit_size) { + 8, 16, 64 => .add, + 32 => .addw, + else => unreachable, + }, + .sub, .sub_wrap => switch (bit_size) { + 8, 16, 32 => .subw, + 64 => .sub, + else => unreachable, + }, + .mul, .mul_wrap => switch (bit_size) { + 8, 16, 64 => .mul, + 32 => .mulw, + else => unreachable, + }, + else => unreachable, + }; + + _ = try func.addInst(.{ + .tag = mir_tag, + .ops = .rrr, + .data = .{ + .r_type = .{ + .rd = dst_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + }, + }, + }); + + // truncate when the instruction is larger than the bit size. + switch (bit_size) { + 8, 16 => try func.truncateRegister(lhs_ty, dst_reg), + 32 => {}, // addw/subw affects the first 32-bits + 64 => {}, // add/sub affects the entire register + else => unreachable, } }, - else => |x| return self.fail("TOOD: binOp {s}", .{@tagName(x)}), + .Float => { + const mir_tag: Mir.Inst.Tag = switch (tag) { + .add => switch (bit_size) { + 32 => .fadds, + 64 => .faddd, + else => unreachable, + }, + .sub => switch (bit_size) { + 32 => .fsubs, + 64 => .fsubd, + else => unreachable, + }, + .mul => switch (bit_size) { + 32 => .fmuls, + 64 => .fmuld, + else => unreachable, + }, + else => unreachable, + }; + + _ = try func.addInst(.{ + .tag = mir_tag, + .ops = .rrr, + .data = .{ + .r_type = .{ + .rd = dst_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + }, + }, + }); + }, + else => unreachable, } }, .ptr_add, .ptr_sub, => { - switch (lhs_ty.zigTypeTag(zcu)) { - .Pointer => { - const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize(zcu)) { - .One => ptr_ty.childType(zcu).childType(zcu), // ptr to array, so get array element type - else => ptr_ty.childType(zcu), - }; - const elem_size = elem_ty.abiSize(zcu); + const tmp_reg = try func.copyToTmpRegister(rhs_ty, .{ .register = rhs_reg }); + const tmp_mcv = MCValue{ .register = tmp_reg }; + const tmp_lock = func.register_manager.lockRegAssumeUnused(tmp_reg); + defer func.register_manager.unlockReg(tmp_lock); - if (elem_size == 1) { - const base_tag: Air.Inst.Tag = switch (tag) { - .ptr_add => .add, - .ptr_sub => .sub, - else => unreachable, - }; + // RISC-V has no immediate mul, so we copy the size to a temporary register + const elem_size = lhs_ty.elemType2(zcu).abiSize(zcu); + const elem_size_reg = try func.copyToTmpRegister(Type.usize, .{ .immediate = elem_size }); - return try self.binOpRegister(base_tag, lhs, lhs_ty, rhs, rhs_ty); - } else { - const offset = try self.binOp( - .mul, - rhs, - Type.usize, - .{ .immediate = elem_size }, - Type.usize, - ); + try func.genBinOp( + .mul, + tmp_mcv, + rhs_ty, + .{ .register = elem_size_reg }, + Type.usize, + tmp_reg, + ); - const addr = try self.binOp( - tag, - lhs, - Type.manyptr_u8, - offset, - Type.usize, - ); - return addr; - } + try func.genBinOp( + switch (tag) { + .ptr_add => .add, + .ptr_sub => .sub, + else => unreachable, }, - else => unreachable, + lhs_mcv, + Type.usize, // we know it's a pointer, so it'll be usize. + tmp_mcv, + Type.usize, + dst_reg, + ); + }, + + .bit_and, + .bit_or, + .bool_and, + .bool_or, + => { + _ = try func.addInst(.{ + .tag = switch (tag) { + .bit_and, .bool_and => .@"and", + .bit_or, .bool_or => .@"or", + else => unreachable, + }, + .ops = .rrr, + .data = .{ + .r_type = .{ + .rd = dst_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + }, + }, + }); + + switch (tag) { + .bool_and, + .bool_or, + => try func.truncateRegister(Type.bool, dst_reg), + else => {}, } }, - // These instructions have unsymteric bit sizes on RHS and LHS. - .shr, - .shl, + .div_trunc, => { - switch (lhs_ty.zigTypeTag(zcu)) { - .Float => return self.fail("TODO binary operations on floats", .{}), - .Vector => return self.fail("TODO binary operations on vectors", .{}), - .Int => { - const int_info = lhs_ty.intInfo(zcu); - if (int_info.bits <= 64) { - return self.binOpRegister(tag, lhs, lhs_ty, rhs, rhs_ty); - } else { - return self.fail("TODO binary operations on int with bits > 64", .{}); - } + if (!math.isPowerOfTwo(bit_size)) + return func.fail( + "TODO: genBinOp {s} non-pow 2, found {}", + .{ @tagName(tag), bit_size }, + ); + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .div_trunc => switch (bit_size) { + 8, 16, 32 => if (is_unsigned) .divuw else .divw, + 64 => if (is_unsigned) .divu else .div, + else => unreachable, }, else => unreachable, - } - }, - else => return self.fail("TODO binOp {}", .{tag}), - } -} -/// Don't call this function directly. Use binOp instead. -/// -/// Calling this function signals an intention to generate a Mir -/// instruction of the form -/// -/// op dest, lhs, rhs -/// -/// Asserts that generating an instruction of that form is possible. -fn binOpRegister( - self: *Self, - tag: Air.Inst.Tag, - lhs: MCValue, - lhs_ty: Type, - rhs: MCValue, - rhs_ty: Type, -) !MCValue { - const lhs_reg, const lhs_lock = blk: { - if (lhs == .register) break :blk .{ lhs.register, null }; + }; - const lhs_reg, const lhs_lock = try self.allocReg(); - try self.genSetReg(lhs_ty, lhs_reg, lhs); - break :blk .{ lhs_reg, lhs_lock }; - }; - defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - - const rhs_reg, const rhs_lock = blk: { - if (rhs == .register) break :blk .{ rhs.register, null }; - - const rhs_reg, const rhs_lock = try self.allocReg(); - try self.genSetReg(rhs_ty, rhs_reg, rhs); - break :blk .{ rhs_reg, rhs_lock }; - }; - defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); - - const dest_reg, const dest_lock = try self.allocReg(); - defer self.register_manager.unlockReg(dest_lock); - - const mir_tag: Mir.Inst.Tag = switch (tag) { - .add => .add, - .sub => .sub, - .mul => .mul, - - .shl => .sllw, - .shr => .srlw, - - .cmp_eq, - .cmp_neq, - .cmp_gt, - .cmp_gte, - .cmp_lt, - .cmp_lte, - => .pseudo, - - else => return self.fail("TODO: binOpRegister {s}", .{@tagName(tag)}), - }; - - switch (mir_tag) { - .add, - .sub, - .mul, - .sllw, - .srlw, - => { - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = mir_tag, .ops = .rrr, .data = .{ .r_type = .{ - .rd = dest_reg, + .rd = dst_reg, .rs1 = lhs_reg, .rs2 = rhs_reg, }, }, }); + + if (!is_unsigned) { + // truncate when the instruction is larger than the bit size. + switch (bit_size) { + 8, 16 => try func.truncateRegister(lhs_ty, dst_reg), + 32 => {}, // divw affects the first 32-bits + 64 => {}, // div affects the entire register + else => unreachable, + } + } }, - .pseudo => { - const pseudo_op = switch (tag) { - .cmp_eq, - .cmp_neq, - .cmp_gt, - .cmp_gte, - .cmp_lt, - .cmp_lte, - => .pseudo_compare, + .shr, + .shr_exact, + .shl, + .shl_exact, + => { + if (!math.isPowerOfTwo(bit_size)) + return func.fail( + "TODO: genBinOp {s} non-pow 2, found {}", + .{ @tagName(tag), bit_size }, + ); + + // it's important that the shift amount is exact + try func.truncateRegister(rhs_ty, rhs_reg); + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .shl, .shl_exact => switch (bit_size) { + 8, 16, 64 => .sll, + 32 => .sllw, + else => unreachable, + }, + .shr, .shr_exact => switch (bit_size) { + 8, 16, 64 => .srl, + 32 => .srlw, + else => unreachable, + }, else => unreachable, }; - _ = try self.addInst(.{ + _ = try func.addInst(.{ + .tag = mir_tag, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = dst_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + } }, + }); + + switch (bit_size) { + 8, 16 => try func.truncateRegister(lhs_ty, dst_reg), + 32 => {}, + 64 => {}, + else => unreachable, + } + }, + + // TODO: move the isel logic out of lower and into here. + .cmp_eq, + .cmp_neq, + .cmp_lt, + .cmp_lte, + .cmp_gt, + .cmp_gte, + => { + _ = try func.addInst(.{ .tag = .pseudo, - .ops = pseudo_op, + .ops = .pseudo_compare, .data = .{ .compare = .{ - .rd = dest_reg, - .rs1 = lhs_reg, - .rs2 = rhs_reg, .op = switch (tag) { .cmp_eq => .eq, .cmp_neq => .neq, - .cmp_gt => .gt, - .cmp_gte => .gte, .cmp_lt => .lt, .cmp_lte => .lte, + .cmp_gt => .gt, + .cmp_gte => .gte, else => unreachable, }, + .rd = dst_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + .ty = lhs_ty, }, }, }); }, - else => unreachable, + // A branchless @min/@max sequence. + // + // Assume that a0 and a1 are the lhs and rhs respectively. + // Also assume that a2 is the destination register. + // + // Algorithm: + // slt s0, a0, a1 + // sub s0, zero, s0 + // xor a2, a0, a1 + // and s0, a2, s0 + // xor a2, a0, s0 # a0 is @min, a1 is @max + // + // "slt s0, a0, a1" will set s0 to 1 if a0 is less than a1, and 1 otherwise. + // + // "sub s0, zero, s0" will set all the bits of s0 to 1 if it was 1, otherwise it'll remain at 0. + // + // "xor a2, a0, a1" stores the bitwise XOR of a0 and a1 in a2. Effectively getting the difference between them. + // + // "and a0, a2, s0" here we mask the result of the XOR with the negated s0. If a0 < a1, s0 is -1, which + // doesn't change the bits of a2. If a0 >= a1, s0 is 0, nullifying a2. + // + // "xor a2, a0, s0" the final XOR operation adjusts a2 to be the minimum value of a0 and a1. If a0 was less than + // a1, s0 was -1, flipping all the bits in a2 and effectively restoring a0. If a0 was greater than or equal to a1, + // s0 was 0, leaving a2 unchanged as a0. + .min, .max => { + const int_info = lhs_ty.intInfo(zcu); + + const mask_reg, const mask_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(mask_lock); + + _ = try func.addInst(.{ + .tag = if (int_info.signedness == .unsigned) .sltu else .slt, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = mask_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + } }, + }); + + _ = try func.addInst(.{ + .tag = .sub, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = mask_reg, + .rs1 = .zero, + .rs2 = mask_reg, + } }, + }); + + _ = try func.addInst(.{ + .tag = .xor, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = dst_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + } }, + }); + + _ = try func.addInst(.{ + .tag = .@"and", + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = mask_reg, + .rs1 = dst_reg, + .rs2 = mask_reg, + } }, + }); + + _ = try func.addInst(.{ + .tag = .xor, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = dst_reg, + .rs1 = if (tag == .min) rhs_reg else lhs_reg, + .rs2 = mask_reg, + } }, + }); + }, + else => return func.fail("TODO: genBinOp {}", .{tag}), } - - // generate the struct for OF checks - - return MCValue{ .register = dest_reg }; } -fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); - - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - break :result try self.binOp(tag, lhs, lhs_ty, rhs, rhs_ty); - }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +fn airPtrArithmetic(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; + const dst_mcv = try func.binOp(inst, tag, bin_op.lhs, bin_op.rhs); + return func.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airAddWrap(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement addwrap for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} +fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; -fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement add_sat for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn airSubWrap(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - // RISCV arthemtic instructions already wrap, so this is simply a sub binOp with - // no overflow checks. - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); - - break :result try self.binOp(.sub, lhs, lhs_ty, rhs, rhs_ty); - }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement sub_sat for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn airMul(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement mul for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn airMulWrap(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement mulwrap for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement mul_sat for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const lhs = try self.resolveInst(extra.lhs); - const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.typeOf(extra.lhs); - const rhs_ty = self.typeOf(extra.rhs); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const lhs_ty = func.typeOf(extra.lhs); const int_info = lhs_ty.intInfo(zcu); - const tuple_ty = self.typeOfIndex(inst); - const result_mcv = try self.allocRegOrMem(inst, false); + const tuple_ty = func.typeOfIndex(inst); + const result_mcv = try func.allocRegOrMem(tuple_ty, inst, false); const offset = result_mcv.load_frame; if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { - const add_result = try self.binOp(.add, lhs, lhs_ty, rhs, rhs_ty); - const add_result_reg = try self.copyToTmpRegister(lhs_ty, add_result); - const add_result_reg_lock = self.register_manager.lockRegAssumeUnused(add_result_reg); - defer self.register_manager.unlockReg(add_result_reg_lock); + const add_result = try func.binOp(null, .add, extra.lhs, extra.rhs); + const add_result_reg = try func.copyToTmpRegister(lhs_ty, add_result); + const add_result_reg_lock = func.register_manager.lockRegAssumeUnused(add_result_reg); + defer func.register_manager.unlockReg(add_result_reg_lock); const shift_amount: u6 = @intCast(Type.usize.bitSize(zcu) - int_info.bits); - const shift_reg, const shift_lock = try self.allocReg(); - defer self.register_manager.unlockReg(shift_lock); + const shift_reg, const shift_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(shift_lock); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .slli, .ops = .rri, .data = .{ .i_type = .{ .rd = shift_reg, .rs1 = add_result_reg, - .imm12 = Immediate.s(shift_amount), + .imm12 = Immediate.u(shift_amount), }, }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = if (int_info.signedness == .unsigned) .srli else .srai, .ops = .rri, .data = .{ .i_type = .{ .rd = shift_reg, .rs1 = shift_reg, - .imm12 = Immediate.s(shift_amount), + .imm12 = Immediate.u(shift_amount), }, }, }); - const add_result_frame: FrameAddr = .{ - .index = offset.index, - .off = offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))), - }; - try self.genSetStack( + try func.genSetMem( + .{ .frame = offset.index }, + offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))), lhs_ty, - add_result_frame, add_result, ); - const overflow_mcv = try self.binOp( + const overflow_reg, const overflow_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(overflow_lock); + + try func.genBinOp( .cmp_neq, .{ .register = shift_reg }, lhs_ty, .{ .register = add_result_reg }, lhs_ty, + overflow_reg, ); - const overflow_frame: FrameAddr = .{ - .index = offset.index, - .off = offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), - }; - try self.genSetStack( + try func.genSetMem( + .{ .frame = offset.index }, + offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), Type.u1, - overflow_frame, - overflow_mcv, + .{ .register = overflow_reg }, ); break :result result_mcv; } else { - return self.fail("TODO: less than 8 bit or non-pow 2 addition", .{}); + return func.fail("TODO: less than 8 bit or non-pow 2 addition", .{}); } }; - return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); + return func.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } -fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { - _ = inst; - return self.fail("TODO implement airSubWithOverflow for {}", .{self.target.cpu.arch}); -} +fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; -fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { - //const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const zcu = self.bin_file.comp.module.?; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const lhs = try self.resolveInst(extra.lhs); - const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.typeOf(extra.lhs); - const rhs_ty = self.typeOf(extra.rhs); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const lhs = try func.resolveInst(extra.lhs); + const rhs = try func.resolveInst(extra.rhs); + const lhs_ty = func.typeOf(extra.lhs); + const rhs_ty = func.typeOf(extra.rhs); - switch (lhs_ty.zigTypeTag(zcu)) { - else => |x| return self.fail("TODO: airMulWithOverflow {s}", .{@tagName(x)}), - .Int => { - assert(lhs_ty.eql(rhs_ty, zcu)); - const int_info = lhs_ty.intInfo(zcu); + const int_info = lhs_ty.intInfo(zcu); + + if (!math.isPowerOfTwo(int_info.bits) or int_info.bits < 8) { + return func.fail("TODO: airSubWithOverflow non-power of 2 and less than 8 bits", .{}); + } + + if (int_info.bits > 64) { + return func.fail("TODO: airSubWithOverflow > 64 bits", .{}); + } + + const tuple_ty = func.typeOfIndex(inst); + const result_mcv = try func.allocRegOrMem(tuple_ty, inst, false); + const offset = result_mcv.load_frame; + + const dest_mcv = try func.binOp(null, .sub, extra.lhs, extra.rhs); + assert(dest_mcv == .register); + const dest_reg = dest_mcv.register; + + try func.genSetMem( + .{ .frame = offset.index }, + offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))), + lhs_ty, + .{ .register = dest_reg }, + ); + + const lhs_reg, const lhs_lock = try func.promoteReg(lhs_ty, lhs); + defer if (lhs_lock) |lock| func.register_manager.unlockReg(lock); + + const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs); + defer if (rhs_lock) |lock| func.register_manager.unlockReg(lock); + + const overflow_reg = try func.copyToTmpRegister(Type.usize, .{ .immediate = 0 }); + + const overflow_lock = func.register_manager.lockRegAssumeUnused(overflow_reg); + defer func.register_manager.unlockReg(overflow_lock); + + switch (int_info.signedness) { + .unsigned => { + _ = try func.addInst(.{ + .tag = .sltu, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = overflow_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + } }, + }); + + try func.genSetMem( + .{ .frame = offset.index }, + offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), + Type.u1, + .{ .register = overflow_reg }, + ); + + break :result result_mcv; + }, + .signed => { switch (int_info.bits) { - 1...32 => { - if (self.hasFeature(.m)) { - const dest = try self.binOp(.mul, lhs, lhs_ty, rhs, rhs_ty); + 64 => { + _ = try func.addInst(.{ + .tag = .slt, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = overflow_reg, + .rs1 = overflow_reg, + .rs2 = rhs_reg, + } }, + }); - const add_result_lock = self.register_manager.lockRegAssumeUnused(dest.register); - defer self.register_manager.unlockReg(add_result_lock); + _ = try func.addInst(.{ + .tag = .slt, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = rhs_reg, + .rs1 = rhs_reg, + .rs2 = lhs_reg, + } }, + }); - const tuple_ty = self.typeOfIndex(inst); + _ = try func.addInst(.{ + .tag = .xor, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = lhs_reg, + .rs1 = overflow_reg, + .rs2 = rhs_reg, + } }, + }); - // TODO: optimization, set this to true. needs the other struct access stuff to support - // accessing registers. - const result_mcv = try self.allocRegOrMem(inst, false); + try func.genBinOp( + .cmp_neq, + .{ .register = overflow_reg }, + Type.usize, + .{ .register = rhs_reg }, + Type.usize, + overflow_reg, + ); - const result_off: i32 = @intCast(tuple_ty.structFieldOffset(0, zcu)); - const overflow_off: i32 = @intCast(tuple_ty.structFieldOffset(1, zcu)); + try func.genSetMem( + .{ .frame = offset.index }, + offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), + Type.u1, + .{ .register = overflow_reg }, + ); - try self.genSetStack(lhs_ty, result_mcv.offset(result_off).load_frame, dest); - - if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { - if (int_info.signedness == .unsigned) { - switch (int_info.bits) { - 1...8 => { - const max_val = std.math.pow(u16, 2, int_info.bits) - 1; - - const overflow_reg, const overflow_lock = try self.allocReg(); - defer self.register_manager.unlockReg(overflow_lock); - - const add_reg, const add_lock = blk: { - if (dest == .register) break :blk .{ dest.register, null }; - - const add_reg, const add_lock = try self.allocReg(); - try self.genSetReg(lhs_ty, add_reg, dest); - break :blk .{ add_reg, add_lock }; - }; - defer if (add_lock) |lock| self.register_manager.unlockReg(lock); - - _ = try self.addInst(.{ - .tag = .andi, - .ops = .rri, - .data = .{ .i_type = .{ - .rd = overflow_reg, - .rs1 = add_reg, - .imm12 = Immediate.s(max_val), - } }, - }); - - const overflow_mcv = try self.binOp( - .cmp_neq, - .{ .register = overflow_reg }, - lhs_ty, - .{ .register = add_reg }, - lhs_ty, - ); - - try self.genSetStack( - lhs_ty, - result_mcv.offset(overflow_off).load_frame, - overflow_mcv, - ); - - break :result result_mcv; - }, - - else => return self.fail("TODO: airMulWithOverflow check for size {d}", .{int_info.bits}), - } - } else { - return self.fail("TODO: airMulWithOverflow calculate carry for signed addition", .{}); - } - } else { - return self.fail("TODO: airMulWithOverflow with < 8 bits or non-pow of 2", .{}); - } - } else { - return self.fail("TODO: emulate mul for targets without M feature", .{}); - } + break :result result_mcv; }, - else => return self.fail("TODO: airMulWithOverflow larger than 32-bit mul", .{}), + else => |int_bits| return func.fail("TODO: airSubWithOverflow signed {}", .{int_bits}), } }, } }; - return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); + return func.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } -fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { - _ = inst; - return self.fail("TODO implement airShlWithOverflow for {}", .{self.target.cpu.arch}); -} +fn airMulWithOverflow(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; -fn airDiv(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement div for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const lhs = try func.resolveInst(extra.lhs); + const rhs = try func.resolveInst(extra.rhs); + const lhs_ty = func.typeOf(extra.lhs); + const rhs_ty = func.typeOf(extra.rhs); -fn airRem(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement rem for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} + const tuple_ty = func.typeOfIndex(inst); -fn airMod(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement zcu for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} + // genSetReg needs to support register_offset src_mcv for this to be true. + const result_mcv = try func.allocRegOrMem(tuple_ty, inst, false); -fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement bitwise and for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} + const result_off: i32 = @intCast(tuple_ty.structFieldOffset(0, zcu)); + const overflow_off: i32 = @intCast(tuple_ty.structFieldOffset(1, zcu)); -fn airBitOr(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement bitwise or for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} + const dest_reg, const dest_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(dest_lock); -fn airXor(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement xor for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} + try func.genBinOp( + .mul, + lhs, + lhs_ty, + rhs, + rhs_ty, + dest_reg, + ); -fn airShl(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); + try func.genCopy( + lhs_ty, + result_mcv.offset(result_off), + .{ .register = dest_reg }, + ); - break :result try self.binOp(.shl, lhs, lhs_ty, rhs, rhs_ty); + switch (lhs_ty.zigTypeTag(zcu)) { + else => |x| return func.fail("TODO: airMulWithOverflow {s}", .{@tagName(x)}), + .Int => { + assert(lhs_ty.eql(rhs_ty, zcu)); + const int_info = lhs_ty.intInfo(zcu); + switch (int_info.bits) { + 1...32 => { + if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { + if (int_info.signedness == .unsigned) { + switch (int_info.bits) { + 1...8 => { + const max_val = std.math.pow(u16, 2, int_info.bits) - 1; + + const add_reg, const add_lock = try func.promoteReg(lhs_ty, lhs); + defer if (add_lock) |lock| func.register_manager.unlockReg(lock); + + const overflow_reg, const overflow_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(overflow_lock); + + _ = try func.addInst(.{ + .tag = .andi, + .ops = .rri, + .data = .{ .i_type = .{ + .rd = overflow_reg, + .rs1 = add_reg, + .imm12 = Immediate.s(max_val), + } }, + }); + + try func.genBinOp( + .cmp_neq, + .{ .register = overflow_reg }, + lhs_ty, + .{ .register = add_reg }, + lhs_ty, + overflow_reg, + ); + + try func.genCopy( + lhs_ty, + result_mcv.offset(overflow_off), + .{ .register = overflow_reg }, + ); + + break :result result_mcv; + }, + + else => return func.fail("TODO: airMulWithOverflow check for size {d}", .{int_info.bits}), + } + } else { + return func.fail("TODO: airMulWithOverflow calculate carry for signed addition", .{}); + } + } else { + return func.fail("TODO: airMulWithOverflow with < 8 bits or non-pow of 2", .{}); + } + }, + else => return func.fail("TODO: airMulWithOverflow larger than 32-bit mul", .{}), + } + }, + } }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + + return func.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } -fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +fn airShlWithOverflow(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airShlWithOverflow", .{}); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airShr(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement shr for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +fn airAddSat(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airAddSat", .{}); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement .optional_payload for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airSubSat(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airSubSat", .{}); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airMulSat(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airMulSat", .{}); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement .optional_payload_ptr_set for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airShlSat(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airShlSat", .{}); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const zcu = self.bin_file.comp.module.?; - const err_union_ty = self.typeOf(ty_op.operand); +fn airOptionalPayload(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = result: { + const pl_ty = func.typeOfIndex(inst); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none; + + const opt_mcv = try func.resolveInst(ty_op.operand); + if (func.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) { + switch (opt_mcv) { + .register => |pl_reg| try func.truncateRegister(pl_ty, pl_reg), + else => {}, + } + break :result opt_mcv; + } + + const pl_mcv = try func.allocRegOrMem(pl_ty, inst, true); + try func.genCopy(pl_ty, pl_mcv, opt_mcv); + break :result pl_mcv; + }; + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +} + +fn airOptionalPayloadPtr(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement .optional_payload_ptr for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +} + +fn airOptionalPayloadPtrSet(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement .optional_payload_ptr_set for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +} + +fn airUnwrapErrErr(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const zcu = func.bin_file.comp.module.?; + const err_union_ty = func.typeOf(ty_op.operand); const err_ty = err_union_ty.errorUnionSet(zcu); const payload_ty = err_union_ty.errorUnionPayload(zcu); - const operand = try self.resolveInst(ty_op.operand); + const operand = try func.resolveInst(ty_op.operand); const result: MCValue = result: { if (err_ty.errorSetIsEmpty(zcu)) { @@ -2520,43 +3012,47 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { switch (operand) { .register => |reg| { - const eu_lock = self.register_manager.lockReg(reg); - defer if (eu_lock) |lock| self.register_manager.unlockReg(lock); - - var result = try self.copyToNewRegister(inst, operand); + const eu_lock = func.register_manager.lockReg(reg); + defer if (eu_lock) |lock| func.register_manager.unlockReg(lock); + const result = try func.copyToNewRegister(inst, operand); if (err_off > 0) { - result = try self.binOp( + try func.genBinOp( .shr, result, err_union_ty, .{ .immediate = @as(u6, @intCast(err_off * 8)) }, Type.u8, + result.register, ); } break :result result; }, - else => return self.fail("TODO implement unwrap_err_err for {}", .{operand}), + .load_frame => |frame_addr| break :result .{ .load_frame = .{ + .index = frame_addr.index, + .off = frame_addr.off + @as(i32, @intCast(err_off)), + } }, + else => return func.fail("TODO implement unwrap_err_err for {}", .{operand}), } }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const operand_ty = self.typeOf(ty_op.operand); - const operand = try self.resolveInst(ty_op.operand); - const result = try self.genUnwrapErrUnionPayloadMir(operand_ty, operand); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airUnwrapErrPayload(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const operand_ty = func.typeOf(ty_op.operand); + const operand = try func.resolveInst(ty_op.operand); + const result = try func.genUnwrapErrUnionPayloadMir(operand_ty, operand); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn genUnwrapErrUnionPayloadMir( - self: *Self, + func: *Func, err_union_ty: Type, err_union: MCValue, ) !MCValue { - const zcu = self.bin_file.comp.module.?; + const zcu = func.bin_file.comp.module.?; const payload_ty = err_union_ty.errorUnionPayload(zcu); const result: MCValue = result: { @@ -2569,24 +3065,23 @@ fn genUnwrapErrUnionPayloadMir( .off = frame_addr.off + payload_off, } }, .register => |reg| { - const eu_lock = self.register_manager.lockReg(reg); - defer if (eu_lock) |lock| self.register_manager.unlockReg(lock); - - var result: MCValue = .{ .register = try self.copyToTmpRegister(err_union_ty, err_union) }; + const eu_lock = func.register_manager.lockReg(reg); + defer if (eu_lock) |lock| func.register_manager.unlockReg(lock); + const result_reg = try func.copyToTmpRegister(err_union_ty, err_union); if (payload_off > 0) { - result = try self.binOp( + try func.genBinOp( .shr, - result, + .{ .register = result_reg }, err_union_ty, .{ .immediate = @as(u6, @intCast(payload_off * 8)) }, Type.u8, + result_reg, ); } - - break :result result; + break :result .{ .register = result_reg }; }, - else => return self.fail("TODO implement genUnwrapErrUnionPayloadMir for {}", .{err_union}), + else => return func.fail("TODO implement genUnwrapErrUnionPayloadMir for {}", .{err_union}), } }; @@ -2594,99 +3089,140 @@ fn genUnwrapErrUnionPayloadMir( } // *(E!T) -> E -fn airUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement unwrap error union error ptr for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airUnwrapErrErrPtr(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement unwrap error union error ptr for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } // *(E!T) -> *T -fn airUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement unwrap error union payload ptr for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airUnwrapErrPayloadPtr(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement unwrap error union payload ptr for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement .errunion_payload_ptr_set for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airErrUnionPayloadPtrSet(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement .errunion_payload_ptr_set for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void { - const result: MCValue = if (self.liveness.isUnused(inst)) +fn airErrReturnTrace(func: *Func, inst: Air.Inst.Index) !void { + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else - return self.fail("TODO implement airErrReturnTrace for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ .none, .none, .none }); + return func.fail("TODO implement airErrReturnTrace for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ .none, .none, .none }); } -fn airSetErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void { +fn airSetErrReturnTrace(func: *Func, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airSetErrReturnTrace for {}", .{self.target.cpu.arch}); + return func.fail("TODO implement airSetErrReturnTrace for {}", .{func.target.cpu.arch}); } -fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { +fn airSaveErrReturnTraceIndex(func: *Func, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airSaveErrReturnTraceIndex for {}", .{self.target.cpu.arch}); + return func.fail("TODO implement airSaveErrReturnTraceIndex for {}", .{func.target.cpu.arch}); } -fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const zcu = self.bin_file.comp.module.?; - const optional_ty = self.typeOfIndex(inst); +fn airWrapOptional(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = result: { + const pl_ty = func.typeOf(ty_op.operand); + if (!pl_ty.hasRuntimeBits(zcu)) break :result .{ .immediate = 1 }; - // Optional with a zero-bit payload type is just a boolean true - if (optional_ty.abiSize(zcu) == 1) - break :result MCValue{ .immediate = 1 }; + const opt_ty = func.typeOfIndex(inst); + const pl_mcv = try func.resolveInst(ty_op.operand); + const same_repr = opt_ty.optionalReprIsPayload(zcu); + if (same_repr and func.reuseOperand(inst, ty_op.operand, 0, pl_mcv)) break :result pl_mcv; - return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}); + const pl_lock: ?RegisterLock = switch (pl_mcv) { + .register => |reg| func.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (pl_lock) |lock| func.register_manager.unlockReg(lock); + + const opt_mcv = try func.allocRegOrMem(opt_ty, inst, true); + try func.genCopy(pl_ty, opt_mcv, pl_mcv); + + if (!same_repr) { + const pl_abi_size: i32 = @intCast(pl_ty.abiSize(zcu)); + switch (opt_mcv) { + .load_frame => |frame_addr| try func.genSetMem( + .{ .frame = frame_addr.index }, + frame_addr.off + pl_abi_size, + Type.u8, + .{ .immediate = 1 }, + ), + .register => return func.fail("TODO: airWrapOption opt_mcv register", .{}), + else => unreachable, + } + } + break :result opt_mcv; }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// T to E!T -fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement wrap errunion payload for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airWrapErrUnionPayload(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + + const eu_ty = ty_op.ty.toType(); + const pl_ty = eu_ty.errorUnionPayload(zcu); + const err_ty = eu_ty.errorUnionSet(zcu); + const operand = try func.resolveInst(ty_op.operand); + + const result: MCValue = result: { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .{ .immediate = 0 }; + + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu)); + const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu)); + const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu)); + try func.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand); + try func.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }); + break :result .{ .load_frame = .{ .index = frame_index } }; + }; + + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// E to E!T -fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airWrapErrUnionErr(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const eu_ty = ty_op.ty.toType(); const pl_ty = eu_ty.errorUnionPayload(zcu); const err_ty = eu_ty.errorUnionSet(zcu); const result: MCValue = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result try self.resolveInst(ty_op.operand); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result try func.resolveInst(ty_op.operand); - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu)); const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu)); const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu)); - try self.genSetStack(pl_ty, .{ .index = frame_index, .off = pl_off }, .undef); - const operand = try self.resolveInst(ty_op.operand); - try self.genSetStack(err_ty, .{ .index = frame_index, .off = err_off }, operand); + try func.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef); + const operand = try func.resolveInst(ty_op.operand); + try func.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand); break :result .{ .load_frame = .{ .index = frame_index } }; }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airTry(self: *Self, inst: Air.Inst.Index) !void { - const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const extra = self.air.extraData(Air.Try, pl_op.payload); - const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]); - const operand_ty = self.typeOf(pl_op.operand); - const result = try self.genTry(inst, pl_op.operand, body, operand_ty, false); - return self.finishAir(inst, result, .{ .none, .none, .none }); +fn airTry(func: *Func, inst: Air.Inst.Index) !void { + const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const extra = func.air.extraData(Air.Try, pl_op.payload); + const body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]); + const operand_ty = func.typeOf(pl_op.operand); + const result = try func.genTry(inst, pl_op.operand, body, operand_ty, false); + return func.finishAir(inst, result, .{ .none, .none, .none }); } fn genTry( - self: *Self, + func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, body: []const Air.Inst.Index, @@ -2695,180 +3231,204 @@ fn genTry( ) !MCValue { _ = operand_is_ptr; - const liveness_cond_br = self.liveness.getCondBr(inst); + const liveness_cond_br = func.liveness.getCondBr(inst); - const operand_mcv = try self.resolveInst(operand); - const is_err_mcv = try self.isErr(null, operand_ty, operand_mcv); + const operand_mcv = try func.resolveInst(operand); + const is_err_mcv = try func.isErr(null, operand_ty, operand_mcv); // A branch to the false section. Uses beq. 1 is the default "true" state. - const reloc = try self.condBr(Type.anyerror, is_err_mcv); + const reloc = try func.condBr(Type.anyerror, is_err_mcv); - if (self.liveness.operandDies(inst, 0)) { - if (operand.toIndex()) |operand_inst| try self.processDeath(operand_inst); + if (func.liveness.operandDies(inst, 0)) { + if (operand.toIndex()) |operand_inst| try func.processDeath(operand_inst); } - self.scope_generation += 1; - const state = try self.saveState(); + func.scope_generation += 1; + const state = try func.saveState(); - for (liveness_cond_br.else_deaths) |death| try self.processDeath(death); - try self.genBody(body); - try self.restoreState(state, &.{}, .{ + for (liveness_cond_br.else_deaths) |death| try func.processDeath(death); + try func.genBody(body); + try func.restoreState(state, &.{}, .{ .emit_instructions = false, .update_tracking = true, .resurrect = true, .close_scope = true, }); - self.performReloc(reloc); + func.performReloc(reloc); - for (liveness_cond_br.then_deaths) |death| try self.processDeath(death); + for (liveness_cond_br.then_deaths) |death| try func.processDeath(death); - const result = if (self.liveness.isUnused(inst)) + const result = if (func.liveness.isUnused(inst)) .unreach else - try self.genUnwrapErrUnionPayloadMir(operand_ty, operand_mcv); + try func.genUnwrapErrUnionPayloadMir(operand_ty, operand_mcv); return result; } -fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; +fn airSlicePtr(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result = result: { - const src_mcv = try self.resolveInst(ty_op.operand); - if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; + const src_mcv = try func.resolveInst(ty_op.operand); + if (func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; - const dst_mcv = try self.allocRegOrMem(inst, true); - const dst_ty = self.typeOfIndex(inst); - try self.genCopy(dst_ty, dst_mcv, src_mcv); + const dst_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, true); + const dst_ty = func.typeOfIndex(inst); + try func.genCopy(dst_ty, dst_mcv, src_mcv); break :result dst_mcv; }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const src_mcv = try self.resolveInst(ty_op.operand); +fn airSliceLen(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const src_mcv = try func.resolveInst(ty_op.operand); + const ty = func.typeOfIndex(inst); + switch (src_mcv) { .load_frame => |frame_addr| { const len_mcv: MCValue = .{ .load_frame = .{ .index = frame_addr.index, .off = frame_addr.off + 8, } }; - if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result len_mcv; + if (func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result len_mcv; - const dst_mcv = try self.allocRegOrMem(inst, true); - try self.genCopy(Type.usize, dst_mcv, len_mcv); + const dst_mcv = try func.allocRegOrMem(ty, inst, true); + try func.genCopy(Type.usize, dst_mcv, len_mcv); break :result dst_mcv; }, .register_pair => |pair| { const len_mcv: MCValue = .{ .register = pair[1] }; - if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result len_mcv; + if (func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result len_mcv; - const dst_mcv = try self.allocRegOrMem(inst, true); - try self.genCopy(Type.usize, dst_mcv, len_mcv); + const dst_mcv = try func.allocRegOrMem(ty, inst, true); + try func.genCopy(Type.usize, dst_mcv, len_mcv); break :result dst_mcv; }, - else => return self.fail("TODO airSliceLen for {}", .{src_mcv}), + else => return func.fail("TODO airSliceLen for {}", .{src_mcv}), } }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement ptr_slice_len_ptr for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airPtrSliceLenPtr(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement ptr_slice_len_ptr for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement ptr_slice_ptr_ptr for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airPtrSlicePtrPtr(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement ptr_slice_ptr_ptr for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const is_volatile = false; // TODO - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; +fn airSliceElemVal(func: *Func, inst: Air.Inst.Index) !void { + const mod = func.bin_file.comp.module.?; + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - if (!is_volatile and self.liveness.isUnused(inst)) return self.finishAir( - inst, - .unreach, - .{ bin_op.lhs, bin_op.rhs, .none }, - ); const result: MCValue = result: { - const slice_mcv = try self.resolveInst(bin_op.lhs); - const index_mcv = try self.resolveInst(bin_op.rhs); + const elem_ty = func.typeOfIndex(inst); + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; - const slice_ty = self.typeOf(bin_op.lhs); - - const slice_ptr_field_type = slice_ty.slicePtrFieldType(zcu); - - const index_lock: ?RegisterLock = if (index_mcv == .register) - self.register_manager.lockRegAssumeUnused(index_mcv.register) - else - null; - defer if (index_lock) |reg| self.register_manager.unlockReg(reg); - - const base_mcv: MCValue = switch (slice_mcv) { - .load_frame, - .load_symbol, - => .{ .register = try self.copyToTmpRegister(slice_ptr_field_type, slice_mcv) }, - else => return self.fail("TODO slice_elem_val when slice is {}", .{slice_mcv}), - }; - - const dest = try self.allocRegOrMem(inst, true); - const addr = try self.binOp(.ptr_add, base_mcv, slice_ptr_field_type, index_mcv, Type.usize); - try self.load(dest, addr, slice_ptr_field_type); - - break :result dest; + const slice_ty = func.typeOf(bin_op.lhs); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); + const elem_ptr = try func.genSliceElemPtr(bin_op.lhs, bin_op.rhs); + const dst_mcv = try func.allocRegOrMem(elem_ty, inst, false); + try func.load(dst_mcv, elem_ptr, slice_ptr_field_type); + break :result dst_mcv; }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement slice_elem_ptr for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); +fn airSliceElemPtr(func: *Func, inst: Air.Inst.Index) !void { + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; + const dst_mcv = try func.genSliceElemPtr(extra.lhs, extra.rhs); + return func.finishAir(inst, dst_mcv, .{ extra.lhs, extra.rhs, .none }); } -fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const array_ty = self.typeOf(bin_op.lhs); - const array_mcv = try self.resolveInst(bin_op.lhs); +fn genSliceElemPtr(func: *Func, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { + const zcu = func.bin_file.comp.module.?; + const slice_ty = func.typeOf(lhs); + const slice_mcv = try func.resolveInst(lhs); + const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) { + .register => |reg| func.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (slice_mcv_lock) |lock| func.register_manager.unlockReg(lock); - const index_mcv = try self.resolveInst(bin_op.rhs); - const index_ty = self.typeOf(bin_op.rhs); + const elem_ty = slice_ty.childType(zcu); + const elem_size = elem_ty.abiSize(zcu); + + const index_ty = func.typeOf(rhs); + const index_mcv = try func.resolveInst(rhs); + const index_mcv_lock: ?RegisterLock = switch (index_mcv) { + .register => |reg| func.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (index_mcv_lock) |lock| func.register_manager.unlockReg(lock); + + const offset_reg = try func.elemOffset(index_ty, index_mcv, elem_size); + const offset_reg_lock = func.register_manager.lockRegAssumeUnused(offset_reg); + defer func.register_manager.unlockReg(offset_reg_lock); + + const addr_reg, const addr_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(addr_lock); + try func.genSetReg(Type.usize, addr_reg, slice_mcv); + + _ = try func.addInst(.{ + .tag = .add, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = addr_reg, + .rs1 = addr_reg, + .rs2 = offset_reg, + } }, + }); + + return .{ .register = addr_reg }; +} + +fn airArrayElemVal(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const result_ty = func.typeOfIndex(inst); + + const array_ty = func.typeOf(bin_op.lhs); + const array_mcv = try func.resolveInst(bin_op.lhs); + + const index_mcv = try func.resolveInst(bin_op.rhs); + const index_ty = func.typeOf(bin_op.rhs); const elem_ty = array_ty.childType(zcu); const elem_abi_size = elem_ty.abiSize(zcu); - const addr_reg, const addr_reg_lock = try self.allocReg(); - defer self.register_manager.unlockReg(addr_reg_lock); + const addr_reg, const addr_reg_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(addr_reg_lock); switch (array_mcv) { .register => { - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, zcu)); - try self.genSetStack(array_ty, .{ .index = frame_index }, array_mcv); - try self.genSetReg(Type.usize, addr_reg, .{ .lea_frame = .{ .index = frame_index } }); + const frame_index = try func.allocFrameIndex(FrameAlloc.initType(array_ty, zcu)); + try func.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mcv); + try func.genSetReg(Type.usize, addr_reg, .{ .lea_frame = .{ .index = frame_index } }); }, .load_frame => |frame_addr| { - try self.genSetReg(Type.usize, addr_reg, .{ .lea_frame = frame_addr }); + try func.genSetReg(Type.usize, addr_reg, .{ .lea_frame = frame_addr }); }, - else => try self.genSetReg(Type.usize, addr_reg, array_mcv.address()), + else => try func.genSetReg(Type.usize, addr_reg, array_mcv.address()), } - const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_abi_size); - const offset_lock = self.register_manager.lockRegAssumeUnused(offset_reg); - defer self.register_manager.unlockReg(offset_lock); + const offset_reg = try func.elemOffset(index_ty, index_mcv, elem_abi_size); + const offset_lock = func.register_manager.lockRegAssumeUnused(offset_reg); + defer func.register_manager.unlockReg(offset_lock); - const dst_mcv = try self.allocRegOrMem(inst, false); - _ = try self.addInst(.{ + const dst_mcv = try func.allocRegOrMem(result_ty, inst, false); + _ = try func.addInst(.{ .tag = .add, .ops = .rrr, .data = .{ .r_type = .{ @@ -2877,163 +3437,225 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { .rs2 = addr_reg, } }, }); - try self.genCopy(elem_ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg } }); + try func.genCopy(elem_ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg } }); break :result dst_mcv; }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { +fn airPtrElemVal(func: *Func, inst: Air.Inst.Index) !void { const is_volatile = false; // TODO - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement ptr_elem_val for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const result: MCValue = if (!is_volatile and func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement ptr_elem_val for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement ptr_elem_ptr for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); -} +fn airPtrElemPtr(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; -fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - _ = bin_op; - return self.fail("TODO implement airSetUnionTag for {}", .{self.target.cpu.arch}); - // return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} + const result = result: { + const elem_ptr_ty = func.typeOfIndex(inst); + const base_ptr_ty = func.typeOf(extra.lhs); -fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airGetUnionTag for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); -} - -fn airClz(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airClz for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); -} - -fn airCtz(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.typeOf(ty_op.operand); - - const dest_reg = try self.register_manager.allocReg(inst, gp); - - const source_reg, const source_lock = blk: { - if (operand == .register) break :blk .{ operand.register, null }; - - const source_reg, const source_lock = try self.allocReg(); - try self.genSetReg(operand_ty, source_reg, operand); - break :blk .{ source_reg, source_lock }; + const base_ptr_mcv = try func.resolveInst(extra.lhs); + const base_ptr_lock: ?RegisterLock = switch (base_ptr_mcv) { + .register => |reg| func.register_manager.lockRegAssumeUnused(reg), + else => null, }; - defer if (source_lock) |lock| self.register_manager.unlockReg(lock); + defer if (base_ptr_lock) |lock| func.register_manager.unlockReg(lock); - // TODO: the B extension for RISCV should have the ctz instruction, and we should use it. + if (elem_ptr_ty.ptrInfo(zcu).flags.vector_index != .none) { + break :result if (func.reuseOperand(inst, extra.lhs, 0, base_ptr_mcv)) + base_ptr_mcv + else + try func.copyToNewRegister(inst, base_ptr_mcv); + } - try self.ctz(source_reg, dest_reg, operand_ty); + const elem_ty = base_ptr_ty.elemType2(zcu); + const elem_abi_size = elem_ty.abiSize(zcu); + const index_ty = func.typeOf(extra.rhs); + const index_mcv = try func.resolveInst(extra.rhs); + const index_lock: ?RegisterLock = switch (index_mcv) { + .register => |reg| func.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (index_lock) |lock| func.register_manager.unlockReg(lock); - break :result .{ .register = dest_reg }; + const offset_reg = try func.elemOffset(index_ty, index_mcv, elem_abi_size); + const offset_reg_lock = func.register_manager.lockRegAssumeUnused(offset_reg); + defer func.register_manager.unlockReg(offset_reg_lock); + + const result_reg, const result_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(result_lock); + + try func.genBinOp( + .ptr_add, + base_ptr_mcv, + base_ptr_ty, + .{ .register = offset_reg }, + Type.usize, + result_reg, + ); + + break :result MCValue{ .register = result_reg }; }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } -fn ctz(self: *Self, src: Register, dst: Register, ty: Type) !void { - const zcu = self.bin_file.comp.module.?; - const length = (ty.abiSize(zcu) * 8) - 1; - - const count_reg, const count_lock = try self.allocReg(); - defer self.register_manager.unlockReg(count_lock); - - const len_reg, const len_lock = try self.allocReg(); - defer self.register_manager.unlockReg(len_lock); - - try self.genSetReg(Type.usize, count_reg, .{ .immediate = 0 }); - try self.genSetReg(Type.usize, len_reg, .{ .immediate = length }); - - _ = src; - _ = dst; - - return self.fail("TODO: finish ctz", .{}); +fn airSetUnionTag(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + _ = bin_op; + return func.fail("TODO implement airSetUnionTag for {}", .{func.target.cpu.arch}); + // return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airPopcount for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airGetUnionTag(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airGetUnionTag for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airAbs(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const ty = self.typeOf(ty_op.operand); +fn airClz(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airClz for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +} + +fn airCtz(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + _ = ty_op; + return func.fail("TODO: finish ctz", .{}); +} + +fn airPopcount(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airPopcount for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +} + +fn airAbs(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const ty = func.typeOf(ty_op.operand); const scalar_ty = ty.scalarType(zcu); - const operand = try self.resolveInst(ty_op.operand); + const operand = try func.resolveInst(ty_op.operand); switch (scalar_ty.zigTypeTag(zcu)) { .Int => if (ty.zigTypeTag(zcu) == .Vector) { - return self.fail("TODO implement airAbs for {}", .{ty.fmt(zcu)}); + return func.fail("TODO implement airAbs for {}", .{ty.fmt(zcu)}); } else { - const int_bits = ty.intInfo(zcu).bits; + const return_mcv = try func.copyToNewRegister(inst, operand); + const operand_reg = return_mcv.register; - if (int_bits > 32) { - return self.fail("TODO: airAbs for larger than 32 bits", .{}); + const temp_reg, const temp_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(temp_lock); + + _ = try func.addInst(.{ + .tag = .srai, + .ops = .rri, + .data = .{ .i_type = .{ + .rd = temp_reg, + .rs1 = operand_reg, + .imm12 = Immediate.u(63), + } }, + }); + + _ = try func.addInst(.{ + .tag = .xor, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = operand_reg, + .rs1 = operand_reg, + .rs2 = temp_reg, + } }, + }); + + _ = try func.addInst(.{ + .tag = .sub, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = operand_reg, + .rs1 = operand_reg, + .rs2 = temp_reg, + } }, + }); + + break :result return_mcv; + }, + .Float => { + const float_bits = scalar_ty.floatBits(zcu.getTarget()); + switch (float_bits) { + 16 => return func.fail("TODO: airAbs 16-bit float", .{}), + 32 => {}, + 64 => {}, + 80 => return func.fail("TODO: airAbs 80-bit float", .{}), + 128 => return func.fail("TODO: airAbs 128-bit float", .{}), + else => unreachable, } - // promote the src into a register - const src_mcv = try self.copyToNewRegister(inst, operand); - // temp register for shift - const temp_reg = try self.register_manager.allocReg(inst, gp); + const return_mcv = try func.copyToNewRegister(inst, operand); + const operand_reg = return_mcv.register; - _ = try self.addInst(.{ - .tag = .abs, - .ops = .rri, + assert(operand_reg.class() == .float); + + _ = try func.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_fabs, .data = .{ - .i_type = .{ - .rs1 = src_mcv.register, - .rd = temp_reg, - .imm12 = Immediate.s(int_bits - 1), + .fabs = .{ + .rd = operand_reg, + .rs = operand_reg, + .bits = float_bits, }, }, }); - break :result src_mcv; + break :result return_mcv; }, - else => return self.fail("TODO: implement airAbs {}", .{scalar_ty.fmt(zcu)}), + else => return func.fail("TODO: implement airAbs {}", .{scalar_ty.fmt(zcu)}), } + + break :result .unreach; }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const zcu = self.bin_file.comp.module.?; - const ty = self.typeOf(ty_op.operand); - const operand = try self.resolveInst(ty_op.operand); +fn airByteSwap(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const zcu = func.bin_file.comp.module.?; + const ty = func.typeOf(ty_op.operand); + const operand = try func.resolveInst(ty_op.operand); const int_bits = ty.intInfo(zcu).bits; // bytes are no-op - if (int_bits == 8 and self.reuseOperand(inst, ty_op.operand, 0, operand)) { - return self.finishAir(inst, operand, .{ ty_op.operand, .none, .none }); + if (int_bits == 8 and func.reuseOperand(inst, ty_op.operand, 0, operand)) { + return func.finishAir(inst, operand, .{ ty_op.operand, .none, .none }); } - const dest_reg = try self.register_manager.allocReg(null, gp); - try self.genSetReg(ty, dest_reg, operand); - - const dest_mcv: MCValue = .{ .register = dest_reg }; + const dest_mcv = try func.copyToNewRegister(inst, operand); + const dest_reg = dest_mcv.register; switch (int_bits) { 16 => { - const temp = try self.binOp(.shr, dest_mcv, ty, .{ .immediate = 8 }, Type.u8); - assert(temp == .register); - _ = try self.addInst(.{ + const temp_reg, const temp_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(temp_lock); + + _ = try func.addInst(.{ + .tag = .srli, + .ops = .rri, + .data = .{ .i_type = .{ + .imm12 = Immediate.s(8), + .rd = temp_reg, + .rs1 = dest_reg, + } }, + }); + + _ = try func.addInst(.{ .tag = .slli, .ops = .rri, .data = .{ .i_type = .{ @@ -3042,58 +3664,59 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { .rs1 = dest_reg, } }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .@"or", .ops = .rri, .data = .{ .r_type = .{ .rd = dest_reg, .rs1 = dest_reg, - .rs2 = temp.register, + .rs2 = temp_reg, } }, }); }, - else => return self.fail("TODO: {d} bits for airByteSwap", .{int_bits}), + else => return func.fail("TODO: {d} bits for airByteSwap", .{int_bits}), } break :result dest_mcv; }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airBitReverse for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airBitReverse(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airBitReverse for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) +fn airUnaryMath(func: *Func, inst: Air.Inst.Index) !void { + const tag = func.air.instructions.items(.tag)[@intFromEnum(inst)]; + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else - return self.fail("TODO implement airUnaryMath for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ un_op, .none, .none }); + return func.fail("TODO implementairUnaryMath {s} for {}", .{ @tagName(tag), func.target.cpu.arch }); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } fn reuseOperand( - self: *Self, + func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue, ) bool { - return self.reuseOperandAdvanced(inst, operand, op_index, mcv, inst); + return func.reuseOperandAdvanced(inst, operand, op_index, mcv, inst); } fn reuseOperandAdvanced( - self: *Self, + func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue, maybe_tracked_inst: ?Air.Inst.Index, ) bool { - if (!self.liveness.operandDies(inst, op_index)) + if (!func.liveness.operandDies(inst, op_index)) return false; switch (mcv) { @@ -3103,55 +3726,59 @@ fn reuseOperandAdvanced( // If it's in the registers table, need to associate the register(s) with the // new instruction. if (maybe_tracked_inst) |tracked_inst| { - if (!self.register_manager.isRegFree(reg)) { + if (!func.register_manager.isRegFree(reg)) { if (RegisterManager.indexOfRegIntoTracked(reg)) |index| { - self.register_manager.registers[index] = tracked_inst; + func.register_manager.registers[index] = tracked_inst; } } - } else self.register_manager.freeReg(reg); + } else func.register_manager.freeReg(reg); }, .load_frame => |frame_addr| if (frame_addr.index.isNamed()) return false, else => return false, } // Prevent the operand deaths processing code from deallocating it. - self.liveness.clearOperandDeath(inst, op_index); + func.liveness.clearOperandDeath(inst, op_index); const op_inst = operand.toIndex().?; - self.getResolvedInstValue(op_inst).reuse(self, maybe_tracked_inst, op_inst); + func.getResolvedInstValue(op_inst).reuse(func, maybe_tracked_inst, op_inst); return true; } -fn airLoad(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const elem_ty = self.typeOfIndex(inst); +fn airLoad(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const elem_ty = func.typeOfIndex(inst); + const result: MCValue = result: { if (!elem_ty.hasRuntimeBits(zcu)) break :result .none; - const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(zcu); - if (self.liveness.isUnused(inst) and !is_volatile) + const ptr = try func.resolveInst(ty_op.operand); + const is_volatile = func.typeOf(ty_op.operand).isVolatilePtr(zcu); + if (func.liveness.isUnused(inst) and !is_volatile) break :result .unreach; + const elem_size = elem_ty.abiSize(zcu); + const dst_mcv: MCValue = blk: { - if (self.reuseOperand(inst, ty_op.operand, 0, ptr)) { + // Pointer is 8 bytes, and if the element is more than that, we cannot reuse it. + if (elem_size <= 8 and func.reuseOperand(inst, ty_op.operand, 0, ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk ptr; } else { - break :blk try self.allocRegOrMem(inst, true); + break :blk try func.allocRegOrMem(elem_ty, inst, true); } }; - try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); + try func.load(dst_mcv, ptr, func.typeOf(ty_op.operand)); break :result dst_mcv; }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn load(self: *Self, dst_mcv: MCValue, ptr_mcv: MCValue, ptr_ty: Type) InnerError!void { - const zcu = self.bin_file.comp.module.?; +fn load(func: *Func, dst_mcv: MCValue, ptr_mcv: MCValue, ptr_ty: Type) InnerError!void { + const zcu = func.bin_file.comp.module.?; const dst_ty = ptr_ty.childType(zcu); log.debug("loading {}:{} into {}", .{ ptr_mcv, ptr_ty.fmt(zcu), dst_mcv }); @@ -3170,43 +3797,43 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_mcv: MCValue, ptr_ty: Type) InnerErro .register_offset, .lea_frame, .lea_symbol, - => try self.genCopy(dst_ty, dst_mcv, ptr_mcv.deref()), + => try func.genCopy(dst_ty, dst_mcv, ptr_mcv.deref()), .memory, .indirect, .load_symbol, .load_frame, => { - const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv); - const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); - defer self.register_manager.unlockReg(addr_lock); + const addr_reg = try func.copyToTmpRegister(ptr_ty, ptr_mcv); + const addr_lock = func.register_manager.lockRegAssumeUnused(addr_reg); + defer func.register_manager.unlockReg(addr_lock); - try self.genCopy(dst_ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg } }); + try func.genCopy(dst_ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg } }); }, - .air_ref => |ptr_ref| try self.load(dst_mcv, try self.resolveInst(ptr_ref), ptr_ty), + .air_ref => |ptr_ref| try func.load(dst_mcv, try func.resolveInst(ptr_ref), ptr_ty), } } -fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { +fn airStore(func: *Func, inst: Air.Inst.Index, safety: bool) !void { if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { // TODO if the value is undef, don't lower this instruction } - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const ptr = try self.resolveInst(bin_op.lhs); - const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.typeOf(bin_op.lhs); - const value_ty = self.typeOf(bin_op.rhs); + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const ptr = try func.resolveInst(bin_op.lhs); + const value = try func.resolveInst(bin_op.rhs); + const ptr_ty = func.typeOf(bin_op.lhs); + const value_ty = func.typeOf(bin_op.rhs); - try self.store(ptr, value, ptr_ty, value_ty); + try func.store(ptr, value, ptr_ty, value_ty); - return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); } /// Loads `value` into the "payload" of `pointer`. -fn store(self: *Self, ptr_mcv: MCValue, src_mcv: MCValue, ptr_ty: Type, src_ty: Type) !void { - const zcu = self.bin_file.comp.module.?; +fn store(func: *Func, ptr_mcv: MCValue, src_mcv: MCValue, ptr_ty: Type, src_ty: Type) !void { + const zcu = func.bin_file.comp.module.?; log.debug("storing {}:{} in {}:{}", .{ src_mcv, src_ty.fmt(zcu), ptr_mcv, ptr_ty.fmt(zcu) }); @@ -3223,40 +3850,40 @@ fn store(self: *Self, ptr_mcv: MCValue, src_mcv: MCValue, ptr_ty: Type, src_ty: .register_offset, .lea_symbol, .lea_frame, - => try self.genCopy(src_ty, ptr_mcv.deref(), src_mcv), + => try func.genCopy(src_ty, ptr_mcv.deref(), src_mcv), .memory, .indirect, .load_symbol, .load_frame, => { - const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv); - const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); - defer self.register_manager.unlockReg(addr_lock); + const addr_reg = try func.copyToTmpRegister(ptr_ty, ptr_mcv); + const addr_lock = func.register_manager.lockRegAssumeUnused(addr_reg); + defer func.register_manager.unlockReg(addr_lock); - try self.genCopy(src_ty, .{ .indirect = .{ .reg = addr_reg } }, src_mcv); + try func.genCopy(src_ty, .{ .indirect = .{ .reg = addr_reg } }, src_mcv); }, - .air_ref => |ptr_ref| try self.store(try self.resolveInst(ptr_ref), src_mcv, ptr_ty, src_ty), + .air_ref => |ptr_ref| try func.store(try func.resolveInst(ptr_ref), src_mcv, ptr_ty, src_ty), } } -fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; - const result = try self.structFieldPtr(inst, extra.struct_operand, extra.field_index); - return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); +fn airStructFieldPtr(func: *Func, inst: Air.Inst.Index) !void { + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.StructField, ty_pl.payload).data; + const result = try func.structFieldPtr(inst, extra.struct_operand, extra.field_index); + return func.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); } -fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result = try self.structFieldPtr(inst, ty_op.operand, index); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airStructFieldPtrIndex(func: *Func, inst: Air.Inst.Index, index: u8) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result = try func.structFieldPtr(inst, ty_op.operand, index); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { - const zcu = self.bin_file.comp.module.?; - const ptr_field_ty = self.typeOfIndex(inst); - const ptr_container_ty = self.typeOf(operand); +fn structFieldPtr(func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { + const zcu = func.bin_file.comp.module.?; + const ptr_field_ty = func.typeOfIndex(inst); + const ptr_container_ty = func.typeOf(operand); const ptr_container_ty_info = ptr_container_ty.ptrInfo(zcu); const container_ty = ptr_container_ty.childType(zcu); @@ -3269,27 +3896,27 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde else @intCast(container_ty.structFieldOffset(index, zcu)); - const src_mcv = try self.resolveInst(operand); + const src_mcv = try func.resolveInst(operand); const dst_mcv = if (switch (src_mcv) { .immediate, .lea_frame => true, - .register, .register_offset => self.reuseOperand(inst, operand, 0, src_mcv), + .register, .register_offset => func.reuseOperand(inst, operand, 0, src_mcv), else => false, - }) src_mcv else try self.copyToNewRegister(inst, src_mcv); + }) src_mcv else try func.copyToNewRegister(inst, src_mcv); return dst_mcv.offset(field_offset); } -fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; +fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void { + const mod = func.bin_file.comp.module.?; - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.StructField, ty_pl.payload).data; const operand = extra.struct_operand; const index = extra.field_index; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const zcu = self.bin_file.comp.module.?; - const src_mcv = try self.resolveInst(operand); - const struct_ty = self.typeOf(operand); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const zcu = func.bin_file.comp.module.?; + const src_mcv = try func.resolveInst(operand); + const struct_ty = func.typeOf(operand); const field_ty = struct_ty.structFieldType(index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none; @@ -3304,33 +3931,31 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { switch (src_mcv) { .dead, .unreach => unreachable, .register => |src_reg| { - const src_reg_lock = self.register_manager.lockRegAssumeUnused(src_reg); - defer self.register_manager.unlockReg(src_reg_lock); + const src_reg_lock = func.register_manager.lockRegAssumeUnused(src_reg); + defer func.register_manager.unlockReg(src_reg_lock); const dst_reg = if (field_off == 0) - (try self.copyToNewRegister(inst, src_mcv)).register + (try func.copyToNewRegister(inst, src_mcv)).register else - try self.copyToTmpRegister(Type.usize, .{ .register = src_reg }); + try func.copyToTmpRegister(Type.usize, .{ .register = src_reg }); const dst_mcv: MCValue = .{ .register = dst_reg }; - const dst_lock = self.register_manager.lockReg(dst_reg); - defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); + const dst_lock = func.register_manager.lockReg(dst_reg); + defer if (dst_lock) |lock| func.register_manager.unlockReg(lock); if (field_off > 0) { - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .srli, .ops = .rri, .data = .{ .i_type = .{ - .imm12 = Immediate.s(@intCast(field_off)), + .imm12 = Immediate.u(@intCast(field_off)), .rd = dst_reg, .rs1 = dst_reg, } }, }); - - return self.fail("TODO: airStructFieldVal register with field_off > 0", .{}); } - break :result if (field_off == 0) dst_mcv else try self.copyToNewRegister(inst, dst_mcv); + break :result if (field_off == 0) dst_mcv else try func.copyToNewRegister(inst, dst_mcv); }, .load_frame => { const field_abi_size: u32 = @intCast(field_ty.abiSize(mod)); @@ -3345,57 +3970,57 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { @intCast(field_bit_size), ); - const dst_reg, const dst_lock = try self.allocReg(); + const dst_reg, const dst_lock = try func.allocReg(.int); const dst_mcv = MCValue{ .register = dst_reg }; - defer self.register_manager.unlockReg(dst_lock); + defer func.register_manager.unlockReg(dst_lock); - try self.genCopy(int_ty, dst_mcv, off_mcv); - break :result try self.copyToNewRegister(inst, dst_mcv); + try func.genCopy(int_ty, dst_mcv, off_mcv); + break :result try func.copyToNewRegister(inst, dst_mcv); } const container_abi_size: u32 = @intCast(struct_ty.abiSize(mod)); const dst_mcv = if (field_byte_off + field_abi_size <= container_abi_size and - self.reuseOperand(inst, operand, 0, src_mcv)) + func.reuseOperand(inst, operand, 0, src_mcv)) off_mcv else dst: { - const dst_mcv = try self.allocRegOrMem(inst, true); - try self.genCopy(field_ty, dst_mcv, off_mcv); + const dst_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, true); + try func.genCopy(field_ty, dst_mcv, off_mcv); break :dst dst_mcv; }; if (field_abi_size * 8 > field_bit_size and dst_mcv.isMemory()) { - const tmp_reg, const tmp_lock = try self.allocReg(); - defer self.register_manager.unlockReg(tmp_lock); + const tmp_reg, const tmp_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(tmp_lock); const hi_mcv = dst_mcv.address().offset(@intCast(field_bit_size / 64 * 8)).deref(); - try self.genSetReg(Type.usize, tmp_reg, hi_mcv); - try self.genCopy(Type.usize, hi_mcv, .{ .register = tmp_reg }); + try func.genSetReg(Type.usize, tmp_reg, hi_mcv); + try func.genCopy(Type.usize, hi_mcv, .{ .register = tmp_reg }); } break :result dst_mcv; } - return self.fail("TODO: airStructFieldVal load_frame field_off non multiple of 8", .{}); + return func.fail("TODO: airStructFieldVal load_frame field_off non multiple of 8", .{}); }, - else => return self.fail("TODO: airStructField {s}", .{@tagName(src_mcv)}), + else => return func.fail("TODO: airStructField {s}", .{@tagName(src_mcv)}), } }; - return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); + return func.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); } -fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { +fn airFieldParentPtr(func: *Func, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement codegen airFieldParentPtr", .{}); + return func.fail("TODO implement codegen airFieldParentPtr", .{}); } -fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void { - const zcu = self.bin_file.comp.module.?; - const arg = self.air.instructions.items(.data)[@intFromEnum(inst)].arg; +fn genArgDbgInfo(func: Func, inst: Air.Inst.Index, mcv: MCValue) !void { + const zcu = func.bin_file.comp.module.?; + const arg = func.air.instructions.items(.data)[@intFromEnum(inst)].arg; const ty = arg.ty.toType(); - const owner_decl = zcu.funcOwnerDeclIndex(self.func_index); - const name = zcu.getParamName(self.func_index, arg.src_index); + const owner_decl = zcu.funcOwnerDeclIndex(func.func_index); + const name = zcu.getParamName(func.func_index, arg.src_index); - switch (self.debug_output) { + switch (func.debug_output) { .dwarf => |dw| switch (mcv) { .register => |reg| try dw.genArgDbgInfo(name, ty, owner_decl, .{ .register = reg.dwarfLocOp(), @@ -3408,119 +4033,100 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void { } } -fn airArg(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - var arg_index = self.arg_index; +fn airArg(func: *Func, inst: Air.Inst.Index) !void { + var arg_index = func.arg_index; // we skip over args that have no bits - while (self.args[arg_index] == .none) arg_index += 1; - self.arg_index = arg_index + 1; + while (func.args[arg_index] == .none) arg_index += 1; + func.arg_index = arg_index + 1; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const src_mcv = self.args[arg_index]; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const src_mcv = func.args[arg_index]; + const arg_ty = func.typeOfIndex(inst); - const arg_ty = self.typeOfIndex(inst); + const dst_mcv = try func.allocRegOrMem(arg_ty, inst, false); - const dst_mcv = switch (src_mcv) { - .register => dst: { - const frame = try self.allocFrameIndex(FrameAlloc.init(.{ - .size = Type.usize.abiSize(zcu), - .alignment = Type.usize.abiAlignment(zcu), - })); - const dst_mcv: MCValue = .{ .load_frame = .{ .index = frame } }; - try self.genCopy(Type.usize, dst_mcv, src_mcv); - break :dst dst_mcv; - }, - .register_pair => dst: { - const frame = try self.allocFrameIndex(FrameAlloc.init(.{ - .size = Type.usize.abiSize(zcu) * 2, - .alignment = Type.usize.abiAlignment(zcu), - })); - const dst_mcv: MCValue = .{ .load_frame = .{ .index = frame } }; - try self.genCopy(arg_ty, dst_mcv, src_mcv); - break :dst dst_mcv; - }, - .load_frame => src_mcv, - else => return self.fail("TODO: airArg {s}", .{@tagName(src_mcv)}), - }; + log.debug("airArg {} -> {}", .{ src_mcv, dst_mcv }); - try self.genArgDbgInfo(inst, src_mcv); + try func.genCopy(arg_ty, dst_mcv, src_mcv); + + try func.genArgDbgInfo(inst, src_mcv); break :result dst_mcv; }; - return self.finishAir(inst, result, .{ .none, .none, .none }); + return func.finishAir(inst, result, .{ .none, .none, .none }); } -fn airTrap(self: *Self) !void { - _ = try self.addInst(.{ +fn airTrap(func: *Func) !void { + _ = try func.addInst(.{ .tag = .unimp, .ops = .none, .data = undefined, }); - return self.finishAirBookkeeping(); + return func.finishAirBookkeeping(); } -fn airBreakpoint(self: *Self) !void { - _ = try self.addInst(.{ +fn airBreakpoint(func: *Func) !void { + _ = try func.addInst(.{ .tag = .ebreak, .ops = .none, .data = undefined, }); - return self.finishAirBookkeeping(); + return func.finishAirBookkeeping(); } -fn airRetAddr(self: *Self, inst: Air.Inst.Index) !void { - const dst_mcv = try self.allocRegOrMem(inst, true); - try self.genCopy(Type.usize, dst_mcv, .{ .load_frame = .{ .index = .ret_addr } }); - return self.finishAir(inst, dst_mcv, .{ .none, .none, .none }); +fn airRetAddr(func: *Func, inst: Air.Inst.Index) !void { + const dst_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, true); + try func.genCopy(Type.usize, dst_mcv, .{ .load_frame = .{ .index = .ret_addr } }); + return func.finishAir(inst, dst_mcv, .{ .none, .none, .none }); } -fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void { - const dst_mcv = try self.allocRegOrMem(inst, true); - try self.genCopy(Type.usize, dst_mcv, .{ .lea_frame = .{ .index = .base_ptr } }); - return self.finishAir(inst, dst_mcv, .{ .none, .none, .none }); +fn airFrameAddress(func: *Func, inst: Air.Inst.Index) !void { + const dst_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, true); + try func.genCopy(Type.usize, dst_mcv, .{ .lea_frame = .{ .index = .base_ptr } }); + return func.finishAir(inst, dst_mcv, .{ .none, .none, .none }); } -fn airFence(self: *Self) !void { - return self.fail("TODO implement fence() for {}", .{self.target.cpu.arch}); - //return self.finishAirBookkeeping(); +fn airFence(func: *Func) !void { + return func.fail("TODO implement fence() for {}", .{func.target.cpu.arch}); + //return func.finishAirBookkeeping(); } -fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { - if (modifier == .always_tail) return self.fail("TODO implement tail calls for riscv64", .{}); - const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; +fn airCall(func: *Func, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { + if (modifier == .always_tail) return func.fail("TODO implement tail calls for riscv64", .{}); + const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const callee = pl_op.operand; - const extra = self.air.extraData(Air.Call, pl_op.payload); - const arg_refs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); + const extra = func.air.extraData(Air.Call, pl_op.payload); + const arg_refs: []const Air.Inst.Ref = @ptrCast(func.air.extra[extra.end..][0..extra.data.args_len]); const expected_num_args = 8; const ExpectedContents = extern struct { vals: [expected_num_args][@sizeOf(MCValue)]u8 align(@alignOf(MCValue)), }; var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + std.heap.stackFallback(@sizeOf(ExpectedContents), func.gpa); const allocator = stack.get(); const arg_tys = try allocator.alloc(Type, arg_refs.len); defer allocator.free(arg_tys); - for (arg_tys, arg_refs) |*arg_ty, arg_ref| arg_ty.* = self.typeOf(arg_ref); + for (arg_tys, arg_refs) |*arg_ty, arg_ref| arg_ty.* = func.typeOf(arg_ref); const arg_vals = try allocator.alloc(MCValue, arg_refs.len); defer allocator.free(arg_vals); for (arg_vals, arg_refs) |*arg_val, arg_ref| arg_val.* = .{ .air_ref = arg_ref }; - const call_ret = try self.genCall(.{ .air = callee }, arg_tys, arg_vals); + const call_ret = try func.genCall(.{ .air = callee }, arg_tys, arg_vals); - var bt = self.liveness.iterateBigTomb(inst); - try self.feed(&bt, pl_op.operand); - for (arg_refs) |arg_ref| try self.feed(&bt, arg_ref); + var bt = func.liveness.iterateBigTomb(inst); + try func.feed(&bt, pl_op.operand); + for (arg_refs) |arg_ref| try func.feed(&bt, arg_ref); - const result = if (self.liveness.isUnused(inst)) .unreach else call_ret; - return self.finishAirResult(inst, result); + const result = if (func.liveness.isUnused(inst)) .unreach else call_ret; + return func.finishAirResult(inst, result); } fn genCall( - self: *Self, + func: *Func, info: union(enum) { air: Air.Inst.Ref, lib: struct { @@ -3533,11 +4139,11 @@ fn genCall( arg_tys: []const Type, args: []const MCValue, ) !MCValue { - const zcu = self.bin_file.comp.module.?; + const zcu = func.bin_file.comp.module.?; const fn_ty = switch (info) { .air => |callee| fn_info: { - const callee_ty = self.typeOf(callee); + const callee_ty = func.typeOf(callee); break :fn_info switch (callee_ty.zigTypeTag(zcu)) { .Fn => callee_ty, .Pointer => callee_ty.childType(zcu), @@ -3552,8 +4158,15 @@ fn genCall( }; const fn_info = zcu.typeToFunc(fn_ty).?; - var call_info = try self.resolveCallingConventionValues(fn_info); - defer call_info.deinit(self); + + const allocator = func.gpa; + + const var_args = try allocator.alloc(Type, args.len - fn_info.param_types.len); + defer allocator.free(var_args); + for (var_args, arg_tys[fn_info.param_types.len..]) |*var_arg, arg_ty| var_arg.* = arg_ty; + + var call_info = try func.resolveCallingConventionValues(fn_info, var_args); + defer call_info.deinit(func); // We need a properly aligned and sized call frame to be able to call this function. { @@ -3561,7 +4174,7 @@ fn genCall( .size = call_info.stack_byte_count, .alignment = call_info.stack_align, }); - const frame_allocs_slice = self.frame_allocs.slice(); + const frame_allocs_slice = func.frame_allocs.slice(); const stack_frame_size = &frame_allocs_slice.items(.abi_size)[@intFromEnum(FrameIndex.call_frame)]; stack_frame_size.* = @max(stack_frame_size.*, needed_call_frame.abi_size); @@ -3570,13 +4183,75 @@ fn genCall( stack_frame_align.* = stack_frame_align.max(needed_call_frame.abi_align); } - for (call_info.args, 0..) |mc_arg, arg_i| try self.genCopy(arg_tys[arg_i], mc_arg, args[arg_i]); + var reg_locks = std.ArrayList(?RegisterLock).init(allocator); + defer reg_locks.deinit(); + try reg_locks.ensureTotalCapacity(8); + defer for (reg_locks.items) |reg_lock| if (reg_lock) |lock| func.register_manager.unlockReg(lock); + + const frame_indices = try allocator.alloc(FrameIndex, args.len); + defer allocator.free(frame_indices); + + switch (call_info.return_value.long) { + .none, .unreach => {}, + .indirect => |reg_off| try func.register_manager.getReg(reg_off.reg, null), + else => unreachable, + } + for (call_info.args, args, arg_tys, frame_indices) |dst_arg, src_arg, arg_ty, *frame_index| { + switch (dst_arg) { + .none => {}, + .register => |reg| { + try func.register_manager.getReg(reg, null); + try reg_locks.append(func.register_manager.lockReg(reg)); + }, + .register_pair => |regs| { + for (regs) |reg| try func.register_manager.getReg(reg, null); + try reg_locks.appendSlice(&func.register_manager.lockRegs(2, regs)); + }, + .indirect => |reg_off| { + frame_index.* = try func.allocFrameIndex(FrameAlloc.initType(arg_ty, zcu)); + try func.genSetMem(.{ .frame = frame_index.* }, 0, arg_ty, src_arg); + try func.register_manager.getReg(reg_off.reg, null); + try reg_locks.append(func.register_manager.lockReg(reg_off.reg)); + }, + else => return func.fail("TODO: genCall set arg {s}", .{@tagName(dst_arg)}), + } + } + + switch (call_info.return_value.long) { + .none, .unreach => {}, + .indirect => |reg_off| { + const ret_ty = Type.fromInterned(fn_info.return_type); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(ret_ty, zcu)); + try func.genSetReg(Type.usize, reg_off.reg, .{ + .lea_frame = .{ .index = frame_index, .off = -reg_off.off }, + }); + call_info.return_value.short = .{ .load_frame = .{ .index = frame_index } }; + try reg_locks.append(func.register_manager.lockReg(reg_off.reg)); + }, + else => unreachable, + } + + for (call_info.args, arg_tys, args, frame_indices) |dst_arg, arg_ty, src_arg, frame_index| { + switch (dst_arg) { + .none, .load_frame => {}, + .register_pair => try func.genCopy(arg_ty, dst_arg, src_arg), + .register => |dst_reg| try func.genSetReg( + arg_ty, + dst_reg, + src_arg, + ), + .indirect => |reg_off| try func.genSetReg(Type.usize, reg_off.reg, .{ + .lea_frame = .{ .index = frame_index, .off = -reg_off.off }, + }), + else => return func.fail("TODO: genCall actual set {s}", .{@tagName(dst_arg)}), + } + } // Due to incremental compilation, how function calls are generated depends // on linking. switch (info) { .air => |callee| { - if (try self.air.value(callee, zcu)) |func_value| { + if (try func.air.value(callee, zcu)) |func_value| { const func_key = zcu.intern_pool.indexToKey(func_value.ip_index); switch (switch (func_key) { else => func_key, @@ -3585,35 +4260,53 @@ fn genCall( else => func_key, } else func_key, }) { - .func => |func| { - if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func.owner_decl); + .func => |func_val| { + if (func.bin_file.cast(link.File.Elf)) |elf_file| { + const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func_val.owner_decl); const sym = elf_file.symbol(sym_index); - _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); - const got_addr = sym.zigGotAddress(elf_file); - try self.genSetReg(Type.usize, .ra, .{ .memory = @intCast(got_addr) }); + if (func.mod.pic) { + return func.fail("TODO: genCall pic", .{}); + } else { + try func.genSetReg(Type.usize, .ra, .{ .load_symbol = .{ .sym = sym.esym_index } }); + _ = try func.addInst(.{ + .tag = .jalr, + .ops = .rri, + .data = .{ .i_type = .{ + .rd = .ra, + .rs1 = .ra, + .imm12 = Immediate.s(0), + } }, + }); + } + } else unreachable; // not a valid riscv64 format + }, + .extern_func => |extern_func| { + const owner_decl = zcu.declPtr(extern_func.decl); + const lib_name = extern_func.lib_name.toSlice(&zcu.intern_pool); + const decl_name = owner_decl.name.toSlice(&zcu.intern_pool); + const atom_index = try func.symbolIndex(); - _ = try self.addInst(.{ - .tag = .jalr, - .ops = .rri, - .data = .{ .i_type = .{ - .rd = .ra, - .rs1 = .ra, - .imm12 = Immediate.s(0), + if (func.bin_file.cast(link.File.Elf)) |elf_file| { + _ = try func.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_extern_fn_reloc, + .data = .{ .reloc = .{ + .atom_index = atom_index, + .sym_index = try elf_file.getGlobalSymbol(decl_name, lib_name), } }, }); - } else unreachable; + } else unreachable; // not a valid riscv64 format }, - .extern_func => return self.fail("TODO: extern func calls", .{}), - else => return self.fail("TODO implement calling bitcasted functions", .{}), + else => return func.fail("TODO implement calling bitcasted functions", .{}), } } else { - assert(self.typeOf(callee).zigTypeTag(zcu) == .Pointer); - const addr_reg, const addr_lock = try self.allocReg(); - defer self.register_manager.unlockReg(addr_lock); - try self.genSetReg(Type.usize, addr_reg, .{ .air_ref = callee }); - _ = try self.addInst(.{ + assert(func.typeOf(callee).zigTypeTag(zcu) == .Pointer); + const addr_reg, const addr_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(addr_lock); + try func.genSetReg(Type.usize, addr_reg, .{ .air_ref = callee }); + + _ = try func.addInst(.{ .tag = .jalr, .ops = .rri, .data = .{ .i_type = .{ @@ -3624,15 +4317,15 @@ fn genCall( }); } }, - .lib => return self.fail("TODO: lib func calls", .{}), + .lib => return func.fail("TODO: lib func calls", .{}), } return call_info.return_value.short; } -fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void { - const zcu = self.bin_file.comp.module.?; - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; +fn airRet(func: *Func, inst: Air.Inst.Index, safety: bool) !void { + const zcu = func.bin_file.comp.module.?; + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; if (safety) { // safe @@ -3640,123 +4333,138 @@ fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void { // not safe } - const ret_ty = self.fn_type.fnReturnType(zcu); - switch (self.ret_mcv.short) { + const ret_ty = func.fn_type.fnReturnType(zcu); + switch (func.ret_mcv.short) { .none => {}, .register, .register_pair, - => try self.genCopy(ret_ty, self.ret_mcv.short, .{ .air_ref = un_op }), + => try func.genCopy(ret_ty, func.ret_mcv.short, .{ .air_ref = un_op }), .indirect => |reg_off| { - try self.register_manager.getReg(reg_off.reg, null); - const lock = self.register_manager.lockRegAssumeUnused(reg_off.reg); - defer self.register_manager.unlockReg(lock); + try func.register_manager.getReg(reg_off.reg, null); + const lock = func.register_manager.lockRegAssumeUnused(reg_off.reg); + defer func.register_manager.unlockReg(lock); - try self.genSetReg(Type.usize, reg_off.reg, self.ret_mcv.long); - try self.genCopy( + try func.genSetReg(Type.usize, reg_off.reg, func.ret_mcv.long); + try func.genSetMem( + .{ .reg = reg_off.reg }, + reg_off.off, ret_ty, - .{ .register_offset = reg_off }, .{ .air_ref = un_op }, ); }, else => unreachable, } - self.ret_mcv.liveOut(self, inst); - try self.finishAir(inst, .unreach, .{ un_op, .none, .none }); + func.ret_mcv.liveOut(func, inst); + try func.finishAir(inst, .unreach, .{ un_op, .none, .none }); // Just add space for an instruction, reloced this later - const index = try self.addInst(.{ + const index = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_j, .data = .{ .inst = undefined }, }); - try self.exitlude_jump_relocs.append(self.gpa, index); + try func.exitlude_jump_relocs.append(func.gpa, index); } -fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const ptr = try self.resolveInst(un_op); +fn airRetLoad(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const ptr = try func.resolveInst(un_op); - const ptr_ty = self.typeOf(un_op); - switch (self.ret_mcv.short) { + const ptr_ty = func.typeOf(un_op); + switch (func.ret_mcv.short) { .none => {}, - .register, .register_pair => try self.load(self.ret_mcv.short, ptr, ptr_ty), - .indirect => |reg_off| try self.genSetReg(ptr_ty, reg_off.reg, ptr), + .register, .register_pair => try func.load(func.ret_mcv.short, ptr, ptr_ty), + .indirect => |reg_off| try func.genSetReg(ptr_ty, reg_off.reg, ptr), else => unreachable, } - self.ret_mcv.liveOut(self, inst); - try self.finishAir(inst, .unreach, .{ un_op, .none, .none }); + func.ret_mcv.liveOut(func, inst); + try func.finishAir(inst, .unreach, .{ un_op, .none, .none }); // Just add space for an instruction, reloced this later - const index = try self.addInst(.{ + const index = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_j, .data = .{ .inst = undefined }, }); - try self.exitlude_jump_relocs.append(self.gpa, index); + try func.exitlude_jump_relocs.append(func.gpa, index); } -fn airCmp(self: *Self, inst: Air.Inst.Index) !void { - const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const zcu = self.bin_file.comp.module.?; +fn airCmp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const zcu = func.bin_file.comp.module.?; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const lhs_ty = func.typeOf(bin_op.lhs); - const int_ty = switch (lhs_ty.zigTypeTag(zcu)) { - .Vector => unreachable, // Handled by cmp_vector. - .Enum => lhs_ty.intTagType(zcu), - .Int => lhs_ty, - .Bool => Type.u1, - .Pointer => Type.usize, - .ErrorSet => Type.u16, - .Optional => blk: { - const payload_ty = lhs_ty.optionalChild(zcu); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - break :blk Type.u1; - } else if (lhs_ty.isPtrLikeOptional(zcu)) { - break :blk Type.usize; + switch (lhs_ty.zigTypeTag(zcu)) { + .Int, + .Enum, + .Bool, + .Pointer, + .ErrorSet, + .Optional, + => { + const int_ty = switch (lhs_ty.zigTypeTag(zcu)) { + .Enum => lhs_ty.intTagType(zcu), + .Int => lhs_ty, + .Bool => Type.u1, + .Pointer => Type.usize, + .ErrorSet => Type.anyerror, + .Optional => blk: { + const payload_ty = lhs_ty.optionalChild(zcu); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + break :blk Type.u1; + } else if (lhs_ty.isPtrLikeOptional(zcu)) { + break :blk Type.usize; + } else { + return func.fail("TODO riscv cmp non-pointer optionals", .{}); + } + }, + else => unreachable, + }; + + const int_info = int_ty.intInfo(zcu); + if (int_info.bits <= 64) { + break :result try func.binOp(inst, tag, bin_op.lhs, bin_op.rhs); } else { - return self.fail("TODO riscv cmp non-pointer optionals", .{}); + return func.fail("TODO riscv cmp for ints > 64 bits", .{}); } }, - .Float => return self.fail("TODO riscv cmp floats", .{}), + .Float => { + const float_bits = lhs_ty.floatBits(func.target.*); + const float_reg_size: u32 = if (func.hasFeature(.d)) 64 else 32; + if (float_bits > float_reg_size) { + return func.fail("TODO: airCmp float > 64/32 bits", .{}); + } + break :result try func.binOp(inst, tag, bin_op.lhs, bin_op.rhs); + }, else => unreachable, - }; - - const int_info = int_ty.intInfo(zcu); - if (int_info.bits <= 64) { - break :result try self.binOp(tag, lhs, int_ty, rhs, int_ty); - } else { - return self.fail("TODO riscv cmp for ints > 64 bits", .{}); } }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void { +fn airCmpVector(func: *Func, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airCmpVector for {}", .{self.target.cpu.arch}); + return func.fail("TODO implement airCmpVector for {}", .{func.target.cpu.arch}); } -fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const operand = try self.resolveInst(un_op); +fn airCmpLtErrorsLen(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try func.resolveInst(un_op); _ = operand; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airCmpLtErrorsLen for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ un_op, .none, .none }); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airCmpLtErrorsLen for {}", .{func.target.cpu.arch}); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { - const dbg_stmt = self.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt; +fn airDbgStmt(func: *Func, inst: Air.Inst.Index) !void { + const dbg_stmt = func.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt; - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_dbg_line_column, .data = .{ .pseudo_dbg_line_column = .{ @@ -3765,44 +4473,44 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } }, }); - return self.finishAirBookkeeping(); + return func.finishAirBookkeeping(); } -fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload); - try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len])); +fn airDbgInlineBlock(func: *Func, inst: Air.Inst.Index) !void { + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.DbgInlineBlock, ty_pl.payload); + try func.lowerBlock(inst, @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len])); } -fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { - const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; +fn airDbgVar(func: *Func, inst: Air.Inst.Index) !void { + const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const operand = pl_op.operand; - const ty = self.typeOf(operand); - const mcv = try self.resolveInst(operand); + const ty = func.typeOf(operand); + const mcv = try func.resolveInst(operand); - const name = self.air.nullTerminatedString(pl_op.payload); + const name = func.air.nullTerminatedString(pl_op.payload); - const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; - try self.genVarDbgInfo(tag, ty, mcv, name); + const tag = func.air.instructions.items(.tag)[@intFromEnum(inst)]; + try func.genVarDbgInfo(tag, ty, mcv, name); - return self.finishAir(inst, .unreach, .{ operand, .none, .none }); + return func.finishAir(inst, .unreach, .{ operand, .none, .none }); } fn genVarDbgInfo( - self: Self, + func: Func, tag: Air.Inst.Tag, ty: Type, mcv: MCValue, name: [:0]const u8, ) !void { - const zcu = self.bin_file.comp.module.?; + const zcu = func.bin_file.comp.module.?; const is_ptr = switch (tag) { .dbg_var_ptr => true, .dbg_var_val => false, else => unreachable, }; - switch (self.debug_output) { + switch (func.debug_output) { .dwarf => |dw| { const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (mcv) { .register => |reg| .{ .register = reg.dwarfLocOp() }, @@ -3819,47 +4527,47 @@ fn genVarDbgInfo( break :blk .nop; }, }; - try dw.genVarDbgInfo(name, ty, zcu.funcOwnerDeclIndex(self.func_index), is_ptr, loc); + try dw.genVarDbgInfo(name, ty, zcu.funcOwnerDeclIndex(func.func_index), is_ptr, loc); }, .plan9 => {}, .none => {}, } } -fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { - const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const cond = try self.resolveInst(pl_op.operand); - const cond_ty = self.typeOf(pl_op.operand); - const extra = self.air.extraData(Air.CondBr, pl_op.payload); - const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.then_body_len]); - const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]); - const liveness_cond_br = self.liveness.getCondBr(inst); +fn airCondBr(func: *Func, inst: Air.Inst.Index) !void { + const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const cond = try func.resolveInst(pl_op.operand); + const cond_ty = func.typeOf(pl_op.operand); + const extra = func.air.extraData(Air.CondBr, pl_op.payload); + const then_body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end..][0..extra.data.then_body_len]); + const else_body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]); + const liveness_cond_br = func.liveness.getCondBr(inst); // If the condition dies here in this condbr instruction, process // that death now instead of later as this has an effect on // whether it needs to be spilled in the branches - if (self.liveness.operandDies(inst, 0)) { - if (pl_op.operand.toIndex()) |op_inst| try self.processDeath(op_inst); + if (func.liveness.operandDies(inst, 0)) { + if (pl_op.operand.toIndex()) |op_inst| try func.processDeath(op_inst); } - self.scope_generation += 1; - const state = try self.saveState(); - const reloc = try self.condBr(cond_ty, cond); + func.scope_generation += 1; + const state = try func.saveState(); + const reloc = try func.condBr(cond_ty, cond); - for (liveness_cond_br.then_deaths) |death| try self.processDeath(death); - try self.genBody(then_body); - try self.restoreState(state, &.{}, .{ + for (liveness_cond_br.then_deaths) |death| try func.processDeath(death); + try func.genBody(then_body); + try func.restoreState(state, &.{}, .{ .emit_instructions = false, .update_tracking = true, .resurrect = true, .close_scope = true, }); - self.performReloc(reloc); + func.performReloc(reloc); - for (liveness_cond_br.else_deaths) |death| try self.processDeath(death); - try self.genBody(else_body); - try self.restoreState(state, &.{}, .{ + for (liveness_cond_br.else_deaths) |death| try func.processDeath(death); + try func.genBody(else_body); + try func.restoreState(state, &.{}, .{ .emit_instructions = false, .update_tracking = true, .resurrect = true, @@ -3867,13 +4575,13 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { }); // We already took care of pl_op.operand earlier, so there's nothing left to do. - self.finishAirBookkeeping(); + func.finishAirBookkeeping(); } -fn condBr(self: *Self, cond_ty: Type, condition: MCValue) !Mir.Inst.Index { - const cond_reg = try self.copyToTmpRegister(cond_ty, condition); +fn condBr(func: *Func, cond_ty: Type, condition: MCValue) !Mir.Inst.Index { + const cond_reg = try func.copyToTmpRegister(cond_ty, condition); - return try self.addInst(.{ + return try func.addInst(.{ .tag = .beq, .ops = .rr_inst, .data = .{ @@ -3886,168 +4594,249 @@ fn condBr(self: *Self, cond_ty: Type, condition: MCValue) !Mir.Inst.Index { }); } -fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand = try self.resolveInst(un_op); - break :result try self.isNull(operand); - }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); -} +fn isNull(func: *Func, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MCValue { + const zcu = func.bin_file.comp.module.?; + const pl_ty = opt_ty.optionalChild(zcu); -fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand_ptr = try self.resolveInst(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); + const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(zcu)) + .{ .off = 0, .ty = if (pl_ty.isSlice(zcu)) pl_ty.slicePtrFieldType(zcu) else pl_ty } + else + .{ .off = @intCast(pl_ty.abiSize(zcu)), .ty = Type.bool }; + + const return_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, true); + assert(return_mcv == .register); // should not be larger 8 bytes + const return_reg = return_mcv.register; + + switch (opt_mcv) { + .none, + .unreach, + .dead, + .undef, + .immediate, + .register_pair, + .register_offset, + .lea_frame, + .lea_symbol, + .reserved_frame, + .air_ref, + => return func.fail("TODO: hmm {}", .{opt_mcv}), + + .register => |opt_reg| { + if (some_info.off == 0) { + _ = try func.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_compare, + .data = .{ + .compare = .{ + .op = .eq, + .rd = return_reg, + .rs1 = opt_reg, + .rs2 = try func.copyToTmpRegister( + some_info.ty, + .{ .immediate = 0 }, + ), + .ty = Type.bool, + }, + }, + }); + return return_mcv; } - }; - try self.load(operand, operand_ptr, self.typeOf(un_op)); - break :result try self.isNull(operand); - }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); + assert(some_info.ty.ip_index == .bool_type); + const opt_abi_size: u32 = @intCast(opt_ty.abiSize(zcu)); + _ = opt_abi_size; + return func.fail("TODO: isNull some_info.off != 0 register", .{}); + }, + + .load_frame => { + const opt_reg = try func.copyToTmpRegister( + some_info.ty, + opt_mcv.address().offset(some_info.off).deref(), + ); + const opt_reg_lock = func.register_manager.lockRegAssumeUnused(opt_reg); + defer func.register_manager.unlockReg(opt_reg_lock); + + _ = try func.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_compare, + .data = .{ + .compare = .{ + .op = .eq, + .rd = return_reg, + .rs1 = opt_reg, + .rs2 = try func.copyToTmpRegister( + some_info.ty, + .{ .immediate = 0 }, + ), + .ty = Type.bool, + }, + }, + }); + return return_mcv; + }, + + else => return func.fail("TODO: isNull {}", .{opt_mcv}), + } } -fn isNull(self: *Self, operand: MCValue) !MCValue { +fn airIsNull(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try func.resolveInst(un_op); + const ty = func.typeOf(un_op); + const result = try func.isNull(inst, ty, operand); + return func.finishAir(inst, result, .{ un_op, .none, .none }); +} + +fn airIsNullPtr(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try func.resolveInst(un_op); _ = operand; - // Here you can specialize this instruction if it makes sense to, otherwise the default - // will call isNonNull and invert the result. - return self.fail("TODO call isNonNull and invert the result", .{}); + const ty = func.typeOf(un_op); + _ = ty; + + if (true) return func.fail("TODO: airIsNullPtr", .{}); + + return func.finishAir(inst, .unreach, .{ un_op, .none, .none }); } -fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand = try self.resolveInst(un_op); - break :result try self.isNonNull(operand); - }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); +fn airIsNonNull(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try func.resolveInst(un_op); + const ty = func.typeOf(un_op); + const result = try func.isNull(inst, ty, operand); + assert(result == .register); + + _ = try func.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_not, + .data = .{ + .rr = .{ + .rd = result.register, + .rs = result.register, + }, + }, + }); + + return func.finishAir(inst, result, .{ un_op, .none, .none }); } -fn isNonNull(self: *Self, operand: MCValue) !MCValue { +fn airIsNonNullPtr(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try func.resolveInst(un_op); _ = operand; - // Here you can specialize this instruction if it makes sense to, otherwise the default - // will call isNull and invert the result. - return self.fail("TODO call isNull and invert the result", .{}); + const ty = func.typeOf(un_op); + _ = ty; + + if (true) return func.fail("TODO: airIsNonNullPtr", .{}); + + return func.finishAir(inst, .unreach, .{ un_op, .none, .none }); } -fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand_ptr = try self.resolveInst(un_op); +fn airIsErr(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const operand = try func.resolveInst(un_op); + const operand_ty = func.typeOf(un_op); + break :result try func.isErr(inst, operand_ty, operand); + }; + return func.finishAir(inst, result, .{ un_op, .none, .none }); +} + +fn airIsErrPtr(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const operand_ptr = try func.resolveInst(un_op); const operand: MCValue = blk: { - if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + if (func.reuseOperand(inst, un_op, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk operand_ptr; } else { - break :blk try self.allocRegOrMem(inst, true); + break :blk try func.allocRegOrMem(func.typeOfIndex(inst), inst, true); } }; - try self.load(operand, operand_ptr, self.typeOf(un_op)); - break :result try self.isNonNull(operand); - }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); -} - -fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand = try self.resolveInst(un_op); - const operand_ty = self.typeOf(un_op); - break :result try self.isErr(inst, operand_ty, operand); - }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); -} - -fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand_ptr = try self.resolveInst(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } - }; - try self.load(operand, operand_ptr, self.typeOf(un_op)); - const operand_ptr_ty = self.typeOf(un_op); + try func.load(operand, operand_ptr, func.typeOf(un_op)); + const operand_ptr_ty = func.typeOf(un_op); const operand_ty = operand_ptr_ty.childType(zcu); - break :result try self.isErr(inst, operand_ty, operand); + break :result try func.isErr(inst, operand_ty, operand); }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } /// Generates a compare instruction which will indicate if `eu_mcv` is an error. /// /// Result is in the return register. -fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue { - const zcu = self.bin_file.comp.module.?; +fn isErr(func: *Func, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue { + _ = maybe_inst; + const zcu = func.bin_file.comp.module.?; const err_ty = eu_ty.errorUnionSet(zcu); if (err_ty.errorSetIsEmpty(zcu)) return MCValue{ .immediate = 0 }; // always false + const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), zcu)); - _ = maybe_inst; - - const err_off = errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), zcu); + const return_reg, const return_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(return_lock); switch (eu_mcv) { .register => |reg| { - const eu_lock = self.register_manager.lockReg(reg); - defer if (eu_lock) |lock| self.register_manager.unlockReg(lock); + const eu_lock = func.register_manager.lockReg(reg); + defer if (eu_lock) |lock| func.register_manager.unlockReg(lock); - const return_reg = try self.copyToTmpRegister(eu_ty, eu_mcv); - const return_lock = self.register_manager.lockRegAssumeUnused(return_reg); - defer self.register_manager.unlockReg(return_lock); - - var return_mcv: MCValue = .{ .register = return_reg }; + try func.genCopy(eu_ty, .{ .register = return_reg }, eu_mcv); if (err_off > 0) { - return_mcv = try self.binOp( + try func.genBinOp( .shr, - return_mcv, + .{ .register = return_reg }, eu_ty, .{ .immediate = @as(u6, @intCast(err_off * 8)) }, Type.u8, + return_reg, ); } - return_mcv = try self.binOp( + try func.genBinOp( .cmp_neq, - return_mcv, - Type.u16, + .{ .register = return_reg }, + Type.anyerror, .{ .immediate = 0 }, - Type.u16, + Type.u8, + return_reg, ); - - return return_mcv; }, - else => return self.fail("TODO implement isErr for {}", .{eu_mcv}), + .load_frame => |frame_addr| { + try func.genBinOp( + .cmp_neq, + .{ .load_frame = .{ + .index = frame_addr.index, + .off = frame_addr.off + err_off, + } }, + Type.anyerror, + .{ .immediate = 0 }, + Type.anyerror, + return_reg, + ); + }, + else => return func.fail("TODO implement isErr for {}", .{eu_mcv}), } + + return .{ .register = return_reg }; } -fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand = try self.resolveInst(un_op); - const ty = self.typeOf(un_op); - break :result try self.isNonErr(inst, ty, operand); +fn airIsNonErr(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const operand = try func.resolveInst(un_op); + const ty = func.typeOf(un_op); + break :result try func.isNonErr(inst, ty, operand); }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } -fn isNonErr(self: *Self, inst: Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue { - const is_err_res = try self.isErr(inst, eu_ty, eu_mcv); +fn isNonErr(func: *Func, inst: Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue { + const is_err_res = try func.isErr(inst, eu_ty, eu_mcv); switch (is_err_res) { .register => |reg| { - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_not, .data = .{ @@ -4068,53 +4857,53 @@ fn isNonErr(self: *Self, inst: Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MC } } -fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const operand_ptr = try self.resolveInst(un_op); +fn airIsNonErrPtr(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const operand_ptr = try func.resolveInst(un_op); const operand: MCValue = blk: { - if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + if (func.reuseOperand(inst, un_op, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk operand_ptr; } else { - break :blk try self.allocRegOrMem(inst, true); + break :blk try func.allocRegOrMem(func.typeOfIndex(inst), inst, true); } }; - const operand_ptr_ty = self.typeOf(un_op); + const operand_ptr_ty = func.typeOf(un_op); const operand_ty = operand_ptr_ty.childType(zcu); - try self.load(operand, operand_ptr, self.typeOf(un_op)); - break :result try self.isNonErr(inst, operand_ty, operand); + try func.load(operand, operand_ptr, func.typeOf(un_op)); + break :result try func.isNonErr(inst, operand_ty, operand); }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airLoop(self: *Self, inst: Air.Inst.Index) !void { +fn airLoop(func: *Func, inst: Air.Inst.Index) !void { // A loop is a setup to be able to jump back to the beginning. - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const loop = self.air.extraData(Air.Block, ty_pl.payload); - const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]); + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const loop = func.air.extraData(Air.Block, ty_pl.payload); + const body: []const Air.Inst.Index = @ptrCast(func.air.extra[loop.end..][0..loop.data.body_len]); - self.scope_generation += 1; - const state = try self.saveState(); + func.scope_generation += 1; + const state = try func.saveState(); - const jmp_target: Mir.Inst.Index = @intCast(self.mir_instructions.len); - try self.genBody(body); - try self.restoreState(state, &.{}, .{ + const jmp_target: Mir.Inst.Index = @intCast(func.mir_instructions.len); + try func.genBody(body); + try func.restoreState(state, &.{}, .{ .emit_instructions = true, .update_tracking = false, .resurrect = false, .close_scope = true, }); - _ = try self.jump(jmp_target); + _ = try func.jump(jmp_target); - self.finishAirBookkeeping(); + func.finishAirBookkeeping(); } -/// Send control flow to the `index` of `self.code`. -fn jump(self: *Self, index: Mir.Inst.Index) !Mir.Inst.Index { - return self.addInst(.{ +/// Send control flow to the `index` of `func.code`. +fn jump(func: *Func, index: Mir.Inst.Index) !Mir.Inst.Index { + return func.addInst(.{ .tag = .pseudo, .ops = .pseudo_j, .data = .{ @@ -4123,113 +4912,200 @@ fn jump(self: *Self, index: Mir.Inst.Index) !Mir.Inst.Index { }); } -fn airBlock(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Block, ty_pl.payload); - try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len])); +fn airBlock(func: *Func, inst: Air.Inst.Index) !void { + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.Block, ty_pl.payload); + try func.lowerBlock(inst, @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len])); } -fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) !void { +fn lowerBlock(func: *Func, inst: Air.Inst.Index, body: []const Air.Inst.Index) !void { // A block is a setup to be able to jump to the end. - const inst_tracking_i = self.inst_tracking.count(); - self.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(.unreach)); + const inst_tracking_i = func.inst_tracking.count(); + func.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(.unreach)); - self.scope_generation += 1; - try self.blocks.putNoClobber(self.gpa, inst, .{ .state = self.initRetroactiveState() }); - const liveness = self.liveness.getBlock(inst); + func.scope_generation += 1; + try func.blocks.putNoClobber(func.gpa, inst, .{ .state = func.initRetroactiveState() }); + const liveness = func.liveness.getBlock(inst); // TODO emit debug info lexical block - try self.genBody(body); + try func.genBody(body); - var block_data = self.blocks.fetchRemove(inst).?; - defer block_data.value.deinit(self.gpa); + var block_data = func.blocks.fetchRemove(inst).?; + defer block_data.value.deinit(func.gpa); if (block_data.value.relocs.items.len > 0) { - try self.restoreState(block_data.value.state, liveness.deaths, .{ + try func.restoreState(block_data.value.state, liveness.deaths, .{ .emit_instructions = false, .update_tracking = true, .resurrect = true, .close_scope = true, }); - for (block_data.value.relocs.items) |reloc| self.performReloc(reloc); + for (block_data.value.relocs.items) |reloc| func.performReloc(reloc); } - if (std.debug.runtime_safety) assert(self.inst_tracking.getIndex(inst).? == inst_tracking_i); - const tracking = &self.inst_tracking.values()[inst_tracking_i]; - if (self.liveness.isUnused(inst)) try tracking.die(self, inst); - self.getValueIfFree(tracking.short, inst); - self.finishAirBookkeeping(); + if (std.debug.runtime_safety) assert(func.inst_tracking.getIndex(inst).? == inst_tracking_i); + const tracking = &func.inst_tracking.values()[inst_tracking_i]; + if (func.liveness.isUnused(inst)) try tracking.die(func, inst); + func.getValueIfFree(tracking.short, inst); + func.finishAirBookkeeping(); } -fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { - const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const condition = pl_op.operand; - _ = condition; - return self.fail("TODO airSwitch for {}", .{self.target.cpu.arch}); - // return self.finishAir(inst, .dead, .{ condition, .none, .none }); +fn airSwitchBr(func: *Func, inst: Air.Inst.Index) !void { + const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const condition = try func.resolveInst(pl_op.operand); + const condition_ty = func.typeOf(pl_op.operand); + const switch_br = func.air.extraData(Air.SwitchBr, pl_op.payload); + var extra_index: usize = switch_br.end; + var case_i: u32 = 0; + const liveness = try func.liveness.getSwitchBr(func.gpa, inst, switch_br.data.cases_len + 1); + defer func.gpa.free(liveness.deaths); + + // If the condition dies here in this switch instruction, process + // that death now instead of later as this has an effect on + // whether it needs to be spilled in the branches + if (func.liveness.operandDies(inst, 0)) { + if (pl_op.operand.toIndex()) |op_inst| try func.processDeath(op_inst); + } + + func.scope_generation += 1; + const state = try func.saveState(); + + while (case_i < switch_br.data.cases_len) : (case_i += 1) { + const case = func.air.extraData(Air.SwitchBr.Case, extra_index); + const items: []const Air.Inst.Ref = + @ptrCast(func.air.extra[case.end..][0..case.data.items_len]); + const case_body: []const Air.Inst.Index = + @ptrCast(func.air.extra[case.end + items.len ..][0..case.data.body_len]); + extra_index = case.end + items.len + case_body.len; + + var relocs = try func.gpa.alloc(Mir.Inst.Index, items.len); + defer func.gpa.free(relocs); + + for (items, relocs, 0..) |item, *reloc, i| { + // switch branches must be comptime-known, so this is stored in an immediate + const item_mcv = try func.resolveInst(item); + + const cmp_reg, const cmp_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(cmp_lock); + + try func.genBinOp( + .cmp_neq, + condition, + condition_ty, + item_mcv, + condition_ty, + cmp_reg, + ); + + if (!(i < relocs.len - 1)) { + _ = try func.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_not, + .data = .{ .rr = .{ + .rd = cmp_reg, + .rs = cmp_reg, + } }, + }); + } + + reloc.* = try func.condBr(condition_ty, .{ .register = cmp_reg }); + } + + for (liveness.deaths[case_i]) |operand| try func.processDeath(operand); + + for (relocs[0 .. relocs.len - 1]) |reloc| func.performReloc(reloc); + try func.genBody(case_body); + try func.restoreState(state, &.{}, .{ + .emit_instructions = false, + .update_tracking = true, + .resurrect = true, + .close_scope = true, + }); + + func.performReloc(relocs[relocs.len - 1]); + } + + if (switch_br.data.else_body_len > 0) { + const else_body: []const Air.Inst.Index = + @ptrCast(func.air.extra[extra_index..][0..switch_br.data.else_body_len]); + + const else_deaths = liveness.deaths.len - 1; + for (liveness.deaths[else_deaths]) |operand| try func.processDeath(operand); + + try func.genBody(else_body); + try func.restoreState(state, &.{}, .{ + .emit_instructions = false, + .update_tracking = true, + .resurrect = true, + .close_scope = true, + }); + } + + // We already took care of pl_op.operand earlier, so there's nothing left to do + func.finishAirBookkeeping(); } -fn performReloc(self: *Self, inst: Mir.Inst.Index) void { - const tag = self.mir_instructions.items(.tag)[inst]; - const ops = self.mir_instructions.items(.ops)[inst]; - const target: Mir.Inst.Index = @intCast(self.mir_instructions.len); +fn performReloc(func: *Func, inst: Mir.Inst.Index) void { + const tag = func.mir_instructions.items(.tag)[inst]; + const ops = func.mir_instructions.items(.ops)[inst]; + const target: Mir.Inst.Index = @intCast(func.mir_instructions.len); switch (tag) { .bne, .beq, - => self.mir_instructions.items(.data)[inst].b_type.inst = target, - .jal => self.mir_instructions.items(.data)[inst].j_type.inst = target, + => func.mir_instructions.items(.data)[inst].b_type.inst = target, + .jal => func.mir_instructions.items(.data)[inst].j_type.inst = target, .pseudo => switch (ops) { - .pseudo_j => self.mir_instructions.items(.data)[inst].inst = target, + .pseudo_j => func.mir_instructions.items(.data)[inst].inst = target, else => std.debug.panic("TODO: performReloc {s}", .{@tagName(ops)}), }, else => std.debug.panic("TODO: performReloc {s}", .{@tagName(tag)}), } } -fn airBr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; - const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br; +fn airBr(func: *Func, inst: Air.Inst.Index) !void { + const mod = func.bin_file.comp.module.?; + const br = func.air.instructions.items(.data)[@intFromEnum(inst)].br; - const block_ty = self.typeOfIndex(br.block_inst); + const block_ty = func.typeOfIndex(br.block_inst); const block_unused = - !block_ty.hasRuntimeBitsIgnoreComptime(mod) or self.liveness.isUnused(br.block_inst); - const block_tracking = self.inst_tracking.getPtr(br.block_inst).?; - const block_data = self.blocks.getPtr(br.block_inst).?; + !block_ty.hasRuntimeBitsIgnoreComptime(mod) or func.liveness.isUnused(br.block_inst); + const block_tracking = func.inst_tracking.getPtr(br.block_inst).?; + const block_data = func.blocks.getPtr(br.block_inst).?; const first_br = block_data.relocs.items.len == 0; const block_result = result: { if (block_unused) break :result .none; - if (!first_br) try self.getValue(block_tracking.short, null); - const src_mcv = try self.resolveInst(br.operand); + if (!first_br) try func.getValue(block_tracking.short, null); + const src_mcv = try func.resolveInst(br.operand); - if (self.reuseOperandAdvanced(inst, br.operand, 0, src_mcv, br.block_inst)) { + if (func.reuseOperandAdvanced(inst, br.operand, 0, src_mcv, br.block_inst)) { if (first_br) break :result src_mcv; - try self.getValue(block_tracking.short, br.block_inst); + try func.getValue(block_tracking.short, br.block_inst); // .long = .none to avoid merging operand and block result stack frames. const current_tracking: InstTracking = .{ .long = .none, .short = src_mcv }; - try current_tracking.materializeUnsafe(self, br.block_inst, block_tracking.*); - for (current_tracking.getRegs()) |src_reg| self.register_manager.freeReg(src_reg); + try current_tracking.materializeUnsafe(func, br.block_inst, block_tracking.*); + for (current_tracking.getRegs()) |src_reg| func.register_manager.freeReg(src_reg); break :result block_tracking.short; } - const dst_mcv = if (first_br) try self.allocRegOrMem(br.block_inst, true) else dst: { - try self.getValue(block_tracking.short, br.block_inst); + const dst_mcv = if (first_br) try func.allocRegOrMem(block_ty, br.block_inst, true) else dst: { + try func.getValue(block_tracking.short, br.block_inst); break :dst block_tracking.short; }; - try self.genCopy(block_ty, dst_mcv, try self.resolveInst(br.operand)); + try func.genCopy(block_ty, dst_mcv, try func.resolveInst(br.operand)); break :result dst_mcv; }; // Process operand death so that it is properly accounted for in the State below. - if (self.liveness.operandDies(inst, 0)) { - if (br.operand.toIndex()) |op_inst| try self.processDeath(op_inst); + if (func.liveness.operandDies(inst, 0)) { + if (br.operand.toIndex()) |op_inst| try func.processDeath(op_inst); } if (first_br) { block_tracking.* = InstTracking.init(block_result); - try self.saveRetroactiveState(&block_data.state); - } else try self.restoreState(block_data.state, &.{}, .{ + try func.saveRetroactiveState(&block_data.state); + } else try func.restoreState(block_data.state, &.{}, .{ .emit_instructions = true, .update_tracking = false, .resurrect = false, @@ -4238,49 +5114,88 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { // Emit a jump with a relocation. It will be patched up after the block ends. // Leave the jump offset undefined - const jmp_reloc = try self.jump(undefined); - try block_data.relocs.append(self.gpa, jmp_reloc); + const jmp_reloc = try func.jump(undefined); + try block_data.relocs.append(func.gpa, jmp_reloc); // Stop tracking block result without forgetting tracking info - try self.freeValue(block_tracking.short); + try func.freeValue(block_tracking.short); - self.finishAirBookkeeping(); + func.finishAirBookkeeping(); } -fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const air_tags = self.air.instructions.items(.tag); - _ = air_tags; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement boolean operations for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +fn airBoolOp(func: *Func, inst: Air.Inst.Index) !void { + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const tag: Air.Inst.Tag = func.air.instructions.items(.tag)[@intFromEnum(inst)]; + + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const lhs = try func.resolveInst(bin_op.lhs); + const rhs = try func.resolveInst(bin_op.rhs); + const lhs_ty = Type.bool; + const rhs_ty = Type.bool; + + const lhs_reg, const lhs_lock = try func.promoteReg(lhs_ty, lhs); + defer if (lhs_lock) |lock| func.register_manager.unlockReg(lock); + + const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs); + defer if (rhs_lock) |lock| func.register_manager.unlockReg(lock); + + const result_reg, const result_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(result_lock); + + _ = try func.addInst(.{ + .tag = if (tag == .bool_or) .@"or" else .@"and", + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = result_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + } }, + }); + + // safety truncate + if (func.wantSafety()) { + _ = try func.addInst(.{ + .tag = .andi, + .ops = .rri, + .data = .{ .i_type = .{ + .rd = result_reg, + .rs1 = result_reg, + .imm12 = Immediate.s(1), + } }, + }); + } + + break :result .{ .register = result_reg }; + }; + return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airAsm(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Asm, ty_pl.payload); +fn airAsm(func: *Func, inst: Air.Inst.Index) !void { + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; const clobbers_len: u31 = @truncate(extra.data.flags); var extra_i: usize = extra.end; const outputs: []const Air.Inst.Ref = - @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]); + @ptrCast(func.air.extra[extra_i..][0..extra.data.outputs_len]); extra_i += outputs.len; - const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs: []const Air.Inst.Ref = @ptrCast(func.air.extra[extra_i..][0..extra.data.inputs_len]); extra_i += inputs.len; log.debug("airAsm input: {any}", .{inputs}); - const dead = !is_volatile and self.liveness.isUnused(inst); + const dead = !is_volatile and func.liveness.isUnused(inst); const result: MCValue = if (dead) .unreach else result: { if (outputs.len > 1) { - return self.fail("TODO implement codegen for asm with more than 1 output", .{}); + return func.fail("TODO implement codegen for asm with more than 1 output", .{}); } const output_constraint: ?[]const u8 = for (outputs) |output| { if (output != .none) { - return self.fail("TODO implement codegen for non-expr asm", .{}); + return func.fail("TODO implement codegen for non-expr asm", .{}); } - const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]); - const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); + const extra_bytes = std.mem.sliceAsBytes(func.air.extra[extra_i..]); + const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(func.air.extra[extra_i..]), 0); const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0); // This equation accounts for the fact that even if we have exactly 4 bytes // for the string, we still use the next u32 for the null terminator. @@ -4290,7 +5205,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } else null; for (inputs) |input| { - const input_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]); + const input_bytes = std.mem.sliceAsBytes(func.air.extra[extra_i..]); const constraint = std.mem.sliceTo(input_bytes, 0); const name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0); // This equation accounts for the fact that even if we have exactly 4 bytes @@ -4298,21 +5213,21 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { extra_i += (constraint.len + name.len + (2 + 3)) / 4; if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { - return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); + return func.fail("unrecognized asm input constraint: '{s}'", .{constraint}); } const reg_name = constraint[1 .. constraint.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail("unrecognized register: '{s}'", .{reg_name}); + return func.fail("unrecognized register: '{s}'", .{reg_name}); - const arg_mcv = try self.resolveInst(input); - try self.register_manager.getReg(reg, null); - try self.genSetReg(self.typeOf(input), reg, arg_mcv); + const arg_mcv = try func.resolveInst(input); + try func.register_manager.getReg(reg, null); + try func.genSetReg(func.typeOf(input), reg, arg_mcv); } { var clobber_i: u32 = 0; while (clobber_i < clobbers_len) : (clobber_i += 1) { - const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); + const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(func.air.extra[extra_i..]), 0); // This equation accounts for the fact that even if we have exactly 4 bytes // for the string, we still use the next u32 for the null terminator. extra_i += clobber.len / 4 + 1; @@ -4320,31 +5235,31 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { if (std.mem.eql(u8, clobber, "") or std.mem.eql(u8, clobber, "memory")) { // nothing really to do } else { - try self.register_manager.getReg(parseRegName(clobber) orelse - return self.fail("invalid clobber: '{s}'", .{clobber}), null); + try func.register_manager.getReg(parseRegName(clobber) orelse + return func.fail("invalid clobber: '{s}'", .{clobber}), null); } } } - const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len]; + const asm_source = std.mem.sliceAsBytes(func.air.extra[extra_i..])[0..extra.data.source_len]; if (std.meta.stringToEnum(Mir.Inst.Tag, asm_source)) |tag| { - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = tag, .ops = .none, .data = undefined, }); } else { - return self.fail("TODO: asm_source {s}", .{asm_source}); + return func.fail("TODO: asm_source {s}", .{asm_source}); } if (output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { - return self.fail("unrecognized asm output constraint: '{s}'", .{output}); + return func.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail("unrecognized register: '{s}'", .{reg_name}); + return func.fail("unrecognized register: '{s}'", .{reg_name}); break :result .{ .register = reg }; } else { break :result .{ .none = {} }; @@ -4363,29 +5278,29 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } if (buf_index + inputs.len > buf.len) break :simple; @memcpy(buf[buf_index..][0..inputs.len], inputs); - return self.finishAir(inst, result, buf); + return func.finishAir(inst, result, buf); } - var bt = self.liveness.iterateBigTomb(inst); - for (outputs) |output| if (output != .none) try self.feed(&bt, output); - for (inputs) |input| try self.feed(&bt, input); - return self.finishAirResult(inst, result); + var bt = func.liveness.iterateBigTomb(inst); + for (outputs) |output| if (output != .none) try func.feed(&bt, output); + for (inputs) |input| try func.feed(&bt, input); + return func.finishAirResult(inst, result); } -/// Sets the value without any modifications to register allocation metadata or stack allocation metadata. -fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { - const zcu = self.bin_file.comp.module.?; +/// Sets the value of `dst_mcv` to the value of `src_mcv`. +fn genCopy(func: *Func, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { + const zcu = func.bin_file.comp.module.?; // There isn't anything to store if (dst_mcv == .none) return; if (!dst_mcv.isMutable()) { // panic so we can see the trace - return self.fail("tried to genCopy immutable: {s}", .{@tagName(dst_mcv)}); + return std.debug.panic("tried to genCopy immutable: {s}", .{@tagName(dst_mcv)}); } switch (dst_mcv) { - .register => |reg| return self.genSetReg(ty, reg, src_mcv), - .register_offset => |dst_reg_off| try self.genSetReg(ty, dst_reg_off.reg, switch (src_mcv) { + .register => |reg| return func.genSetReg(ty, reg, src_mcv), + .register_offset => |dst_reg_off| try func.genSetReg(ty, dst_reg_off.reg, switch (src_mcv) { .none, .unreach, .dead, @@ -4396,49 +5311,49 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { .register_offset, => src_mcv.offset(-dst_reg_off.off), else => .{ .register_offset = .{ - .reg = try self.copyToTmpRegister(ty, src_mcv), + .reg = try func.copyToTmpRegister(ty, src_mcv), .off = -dst_reg_off.off, } }, }), - .indirect => |ro| { - const src_reg = try self.copyToTmpRegister(ty, src_mcv); - - _ = try self.addInst(.{ - .tag = .pseudo, - .ops = .pseudo_store_rm, - .data = .{ .rm = .{ - .r = src_reg, - .m = .{ - .base = .{ .reg = ro.reg }, - .mod = .{ .rm = .{ .disp = ro.off, .size = self.memSize(ty) } }, - }, - } }, - }); - }, - .load_frame => |frame| return self.genSetStack(ty, frame, src_mcv), - .memory => return self.fail("TODO: genCopy memory", .{}), + .indirect => |reg_off| try func.genSetMem( + .{ .reg = reg_off.reg }, + reg_off.off, + ty, + src_mcv, + ), + .load_frame => |frame_addr| try func.genSetMem( + .{ .frame = frame_addr.index }, + frame_addr.off, + ty, + src_mcv, + ), + .memory => return func.fail("TODO: genCopy memory", .{}), .register_pair => |dst_regs| { - const src_info: ?struct { addr_reg: Register, addr_lock: RegisterLock } = switch (src_mcv) { + const src_info: ?struct { addr_reg: Register, addr_lock: ?RegisterLock } = switch (src_mcv) { .register_pair, .memory, .indirect, .load_frame => null, .load_symbol => src: { - const src_addr_reg, const src_addr_lock = try self.allocReg(); - errdefer self.register_manager.unlockReg(src_addr_lock); + const src_addr_reg, const src_addr_lock = try func.promoteReg(Type.usize, src_mcv.address()); + errdefer func.register_manager.unlockReg(src_addr_lock); - try self.genSetReg(Type.usize, src_addr_reg, src_mcv.address()); break :src .{ .addr_reg = src_addr_reg, .addr_lock = src_addr_lock }; }, - .air_ref => |src_ref| return self.genCopy( + .air_ref => |src_ref| return func.genCopy( ty, dst_mcv, - try self.resolveInst(src_ref), + try func.resolveInst(src_ref), ), - else => unreachable, + else => return func.fail("genCopy register_pair src: {}", .{src_mcv}), + }; + + defer if (src_info) |info| { + if (info.addr_lock) |lock| { + func.register_manager.unlockReg(lock); + } }; - defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock); var part_disp: i32 = 0; - for (dst_regs, try self.splitType(ty), 0..) |dst_reg, dst_ty, part_i| { - try self.genSetReg(dst_ty, dst_reg, switch (src_mcv) { + for (dst_regs, try func.splitType(ty), 0..) |dst_reg, dst_ty, part_i| { + try func.genSetReg(dst_ty, dst_reg, switch (src_mcv) { .register_pair => |src_regs| .{ .register = src_regs[part_i] }, .memory, .indirect, .load_frame => src_mcv.address().offset(part_disp).deref(), .load_symbol => .{ .indirect = .{ @@ -4450,113 +5365,31 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { part_disp += @intCast(dst_ty.abiSize(zcu)); } }, - else => return self.fail("TODO: genCopy to {s} from {s}", .{ @tagName(dst_mcv), @tagName(src_mcv) }), - } -} - -fn genSetStack( - self: *Self, - ty: Type, - frame: FrameAddr, - src_mcv: MCValue, -) InnerError!void { - const zcu = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(ty.abiSize(zcu)); - - switch (src_mcv) { - .none => return, - .dead => unreachable, - .undef => { - if (!self.wantSafety()) return; - try self.genSetStack(ty, frame, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); - }, - .immediate, - .lea_frame, - => { - // TODO: remove this lock in favor of a copyToTmpRegister when we load 64 bit immediates with - // a register allocation. - const reg, const reg_lock = try self.allocReg(); - defer self.register_manager.unlockReg(reg_lock); - - try self.genSetReg(ty, reg, src_mcv); - - return self.genSetStack(ty, frame, .{ .register = reg }); - }, - .register => |reg| { - switch (abi_size) { - 1, 2, 4, 8 => { - _ = try self.addInst(.{ - .tag = .pseudo, - .ops = .pseudo_store_rm, - .data = .{ .rm = .{ - .r = reg, - .m = .{ - .base = .{ .frame = frame.index }, - .mod = .{ - .rm = .{ - .size = self.memSize(ty), - .disp = frame.off, - }, - }, - }, - } }, - }); - }, - else => unreachable, // register can hold a max of 8 bytes - } - }, - .register_pair => |pair| { - var part_disp: i32 = frame.off; - for (try self.splitType(ty), pair) |src_ty, src_reg| { - try self.genSetStack( - src_ty, - .{ .index = frame.index, .off = part_disp }, - .{ .register = src_reg }, - ); - part_disp += @intCast(src_ty.abiSize(zcu)); - } - }, - .load_frame, - .indirect, - .load_symbol, - => { - if (abi_size <= 8) { - const reg = try self.copyToTmpRegister(ty, src_mcv); - return self.genSetStack(ty, frame, .{ .register = reg }); - } - - try self.genInlineMemcpy( - .{ .lea_frame = frame }, - src_mcv.address(), - .{ .immediate = abi_size }, - ); - }, - .air_ref => |ref| try self.genSetStack(ty, frame, try self.resolveInst(ref)), - else => return self.fail("TODO: genSetStack {s}", .{@tagName(src_mcv)}), + else => return func.fail("TODO: genCopy to {s} from {s}", .{ @tagName(dst_mcv), @tagName(src_mcv) }), } } fn genInlineMemcpy( - self: *Self, + func: *Func, dst_ptr: MCValue, src_ptr: MCValue, len: MCValue, ) !void { - const regs = try self.register_manager.allocRegs(4, .{null} ** 4, tp); - const locks = self.register_manager.lockRegsAssumeUnused(4, regs); - defer for (locks) |lock| self.register_manager.unlockReg(lock); + const regs = try func.register_manager.allocRegs(4, .{null} ** 4, abi.Registers.Integer.temporary); + const locks = func.register_manager.lockRegsAssumeUnused(4, regs); + defer for (locks) |lock| func.register_manager.unlockReg(lock); const count = regs[0]; const tmp = regs[1]; const src = regs[2]; const dst = regs[3]; - try self.genSetReg(Type.usize, count, len); - try self.genSetReg(Type.usize, src, src_ptr); - try self.genSetReg(Type.usize, dst, dst_ptr); + try func.genSetReg(Type.usize, count, len); + try func.genSetReg(Type.usize, src, src_ptr); + try func.genSetReg(Type.usize, dst, dst_ptr); // lb tmp, 0(src) - const first_inst = try self.addInst(.{ + const first_inst = try func.addInst(.{ .tag = .lb, .ops = .rri, .data = .{ @@ -4569,7 +5402,7 @@ fn genInlineMemcpy( }); // sb tmp, 0(dst) - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .sb, .ops = .rri, .data = .{ @@ -4582,7 +5415,7 @@ fn genInlineMemcpy( }); // dec count by 1 - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .addi, .ops = .rri, .data = .{ @@ -4595,12 +5428,12 @@ fn genInlineMemcpy( }); // branch if count is 0 - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .beq, .ops = .rr_inst, .data = .{ .b_type = .{ - .inst = @intCast(self.mir_instructions.len + 4), // points after the last inst + .inst = @intCast(func.mir_instructions.len + 4), // points after the last inst .rs1 = count, .rs2 = .zero, }, @@ -4608,7 +5441,7 @@ fn genInlineMemcpy( }); // increment the pointers - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .addi, .ops = .rri, .data = .{ @@ -4620,7 +5453,7 @@ fn genInlineMemcpy( }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .addi, .ops = .rri, .data = .{ @@ -4633,7 +5466,85 @@ fn genInlineMemcpy( }); // jump back to start of loop - _ = try self.addInst(.{ + _ = try func.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_j, + .data = .{ .inst = first_inst }, + }); +} + +fn genInlineMemset( + func: *Func, + dst_ptr: MCValue, + src_value: MCValue, + len: MCValue, +) !void { + const regs = try func.register_manager.allocRegs(3, .{null} ** 3, abi.Registers.Integer.temporary); + const locks = func.register_manager.lockRegsAssumeUnused(3, regs); + defer for (locks) |lock| func.register_manager.unlockReg(lock); + + const count = regs[0]; + const src = regs[1]; + const dst = regs[2]; + + try func.genSetReg(Type.usize, count, len); + try func.genSetReg(Type.usize, src, src_value); + try func.genSetReg(Type.usize, dst, dst_ptr); + + // sb src, 0(dst) + const first_inst = try func.addInst(.{ + .tag = .sb, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = dst, + .rs1 = src, + .imm12 = Immediate.s(0), + }, + }, + }); + + // dec count by 1 + _ = try func.addInst(.{ + .tag = .addi, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = count, + .rs1 = count, + .imm12 = Immediate.s(-1), + }, + }, + }); + + // branch if count is 0 + _ = try func.addInst(.{ + .tag = .beq, + .ops = .rr_inst, + .data = .{ + .b_type = .{ + .inst = @intCast(func.mir_instructions.len + 4), // points after the last inst + .rs1 = count, + .rs2 = .zero, + }, + }, + }); + + // increment the pointers + _ = try func.addInst(.{ + .tag = .addi, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = dst, + .rs1 = dst, + .imm12 = Immediate.s(1), + }, + }, + }); + + // jump back to start of loop + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_j, .data = .{ @@ -4643,25 +5554,29 @@ fn genInlineMemcpy( } /// Sets the value of `src_mcv` into `reg`. Assumes you have a lock on it. -fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError!void { - const zcu = self.bin_file.comp.module.?; +fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!void { + const zcu = func.bin_file.comp.module.?; const abi_size: u32 = @intCast(ty.abiSize(zcu)); - if (abi_size > 8) return self.fail("tried to set reg with size {}", .{abi_size}); + if (abi_size > 8) return std.debug.panic("tried to set reg with size {}", .{abi_size}); + + const dst_reg_class = reg.class(); switch (src_mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. .undef => { - if (!self.wantSafety()) + if (!func.wantSafety()) return; // The already existing value will do just fine. // Write the debug undefined value. - return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); + return func.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); }, .immediate => |unsigned_x| { + assert(dst_reg_class == .int); + const x: i64 = @bitCast(unsigned_x); if (math.minInt(i12) <= x and x <= math.maxInt(i12)) { - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .addi, .ops = .rri, .data = .{ .i_type = .{ @@ -4675,7 +5590,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! const carry: i32 = if (lo12 < 0) 1 else 0; const hi20: i20 = @truncate((x >> 12) +% carry); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .lui, .ops = .ri, .data = .{ .u_type = .{ @@ -4683,7 +5598,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! .imm20 = Immediate.s(hi20), } }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .addi, .ops = .rri, .data = .{ .i_type = .{ @@ -4696,27 +5611,27 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! // TODO: use a more advanced myriad seq to do this without a reg. // see: https://github.com/llvm/llvm-project/blob/081a66ffacfe85a37ff775addafcf3371e967328/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp#L224 - const temp, const temp_lock = try self.allocReg(); - defer self.register_manager.unlockReg(temp_lock); + const temp, const temp_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(temp_lock); const lo32: i32 = @truncate(x); const carry: i32 = if (lo32 < 0) 1 else 0; const hi32: i32 = @truncate((x >> 32) +% carry); - try self.genSetReg(Type.i32, temp, .{ .immediate = @bitCast(@as(i64, lo32)) }); - try self.genSetReg(Type.i32, reg, .{ .immediate = @bitCast(@as(i64, hi32)) }); + try func.genSetReg(Type.i32, temp, .{ .immediate = @bitCast(@as(i64, lo32)) }); + try func.genSetReg(Type.i32, reg, .{ .immediate = @bitCast(@as(i64, hi32)) }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .slli, .ops = .rri, .data = .{ .i_type = .{ .rd = reg, .rs1 = reg, - .imm12 = Immediate.s(32), + .imm12 = Immediate.u(32), } }, }); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .add, .ops = .rrr, .data = .{ .r_type = .{ @@ -4732,8 +5647,15 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! if (src_reg.id() == reg.id()) return; - // mov reg, src_reg - _ = try self.addInst(.{ + const src_reg_class = src_reg.class(); + + if (src_reg_class == .float and dst_reg_class == .int) { + // to move from float -> int, we use FMV.X.W + return func.fail("TODO: genSetReg float -> int", .{}); + } + + // mv reg, src_reg + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_mv, .data = .{ .rr = .{ @@ -4742,22 +5664,9 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! } }, }); }, - .register_pair => |pair| try self.genSetReg(ty, reg, .{ .register = pair[0] }), - .memory => |addr| { - try self.genSetReg(ty, reg, .{ .immediate = addr }); - - _ = try self.addInst(.{ - .tag = .ld, - .ops = .rri, - .data = .{ .i_type = .{ - .rd = reg, - .rs1 = reg, - .imm12 = Immediate.s(0), - } }, - }); - }, + .register_pair => return func.fail("genSetReg should we allow reg -> reg_pair?", .{}), .load_frame => |frame| { - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_load_rm, .data = .{ .rm = .{ @@ -4765,47 +5674,73 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! .m = .{ .base = .{ .frame = frame.index }, .mod = .{ - .rm = .{ - .size = self.memSize(ty), - .disp = frame.off, - }, + .size = func.memSize(ty), + .unsigned = ty.isUnsignedInt(zcu), + .disp = frame.off, }, }, } }, }); }, - .lea_frame => |frame| { - _ = try self.addInst(.{ + .memory => |addr| { + try func.genSetReg(ty, reg, .{ .immediate = addr }); + + _ = try func.addInst(.{ + .tag = .ld, + .ops = .rri, + .data = .{ .i_type = .{ + .rd = reg, + .rs1 = reg, + .imm12 = Immediate.u(0), + } }, + }); + }, + .lea_frame, .register_offset => { + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_lea_rm, .data = .{ .rm = .{ .r = reg, - .m = .{ - .base = .{ .frame = frame.index }, - .mod = .{ - .rm = .{ - .size = self.memSize(ty), - .disp = frame.off, + .m = switch (src_mcv) { + .register_offset => |reg_off| .{ + .base = .{ .reg = reg_off.reg }, + .mod = .{ + .size = func.memSize(ty), + .disp = reg_off.off, + .unsigned = false, }, }, + .lea_frame => |frame| .{ + .base = .{ .frame = frame.index }, + .mod = .{ + .size = func.memSize(ty), + .disp = frame.off, + .unsigned = false, + }, + }, + else => unreachable, }, } }, }); }, - .load_symbol => { - try self.genSetReg(ty, reg, src_mcv.address()); - try self.genSetReg(ty, reg, .{ .indirect = .{ .reg = reg } }); - }, .indirect => |reg_off| { + const float_class = dst_reg_class == .float; + const load_tag: Mir.Inst.Tag = switch (abi_size) { - 1 => .lb, - 2 => .lh, - 4 => .lw, - 8 => .ld, - else => return self.fail("TODO: genSetReg for size {d}", .{abi_size}), + 1 => if (float_class) + unreachable // Zig does not support 8-bit floats + else + .lb, + 2 => if (float_class) + return func.fail("TODO: genSetReg indirect 16-bit float", .{}) + else + .lh, + 4 => if (float_class) .flw else .lw, + 8 => if (float_class) .fld else .ld, + else => return std.debug.panic("TODO: genSetReg for size {d}", .{abi_size}), }; - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = load_tag, .ops = .rri, .data = .{ .i_type = .{ @@ -4818,54 +5753,183 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! .lea_symbol => |sym_off| { assert(sym_off.off == 0); - const atom_index = try self.symbolIndex(); + const atom_index = try func.symbolIndex(); - _ = try self.addInst(.{ + _ = try func.addInst(.{ .tag = .pseudo, .ops = .pseudo_load_symbol, - .data = .{ .payload = try self.addExtra(Mir.LoadSymbolPayload{ - .register = reg.id(), + .data = .{ .payload = try func.addExtra(Mir.LoadSymbolPayload{ + .register = reg.encodeId(), .atom_index = atom_index, .sym_index = sym_off.sym, }) }, }); }, - .air_ref => |ref| try self.genSetReg(ty, reg, try self.resolveInst(ref)), - else => return self.fail("TODO: genSetReg {s}", .{@tagName(src_mcv)}), + .load_symbol => { + const addr_reg, const addr_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(addr_lock); + + try func.genSetReg(ty, addr_reg, src_mcv.address()); + try func.genSetReg(ty, reg, .{ .indirect = .{ .reg = addr_reg } }); + }, + .air_ref => |ref| try func.genSetReg(ty, reg, try func.resolveInst(ref)), + else => return func.fail("TODO: genSetReg {s}", .{@tagName(src_mcv)}), } } -fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result = result: { - const src_mcv = try self.resolveInst(un_op); - if (self.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv; - - const dst_mcv = try self.allocRegOrMem(inst, true); - const dst_ty = self.typeOfIndex(inst); - try self.genCopy(dst_ty, dst_mcv, src_mcv); - break :result dst_mcv; +fn genSetMem( + func: *Func, + base: Memory.Base, + disp: i32, + ty: Type, + src_mcv: MCValue, +) InnerError!void { + const mod = func.bin_file.comp.module.?; + const abi_size: u32 = @intCast(ty.abiSize(mod)); + const dst_ptr_mcv: MCValue = switch (base) { + .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } }, + .frame => |base_frame_index| .{ .lea_frame = .{ .index = base_frame_index, .off = disp } }, + .reloc => |base_symbol| .{ .lea_symbol = .{ .sym = base_symbol.sym_index, .off = disp } }, }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); + switch (src_mcv) { + .none, + .unreach, + .dead, + .reserved_frame, + => unreachable, + .undef => try func.genInlineMemset( + dst_ptr_mcv, + src_mcv, + .{ .immediate = abi_size }, + ), + .register_offset, + .memory, + .indirect, + .load_frame, + .lea_frame, + .load_symbol, + .lea_symbol, + => switch (abi_size) { + 0 => {}, + 1, 2, 4, 8 => { + // no matter what type, it should use an integer register + const src_reg = try func.copyToTmpRegister(Type.usize, src_mcv); + const src_lock = func.register_manager.lockRegAssumeUnused(src_reg); + defer func.register_manager.unlockReg(src_lock); + + try func.genSetMem(base, disp, ty, .{ .register = src_reg }); + }, + else => try func.genInlineMemcpy( + dst_ptr_mcv, + src_mcv.address(), + .{ .immediate = abi_size }, + ), + }, + .register => |reg| { + const mem_size = switch (base) { + .frame => |base_fi| mem_size: { + assert(disp >= 0); + const frame_abi_size = func.frame_allocs.items(.abi_size)[@intFromEnum(base_fi)]; + const frame_spill_pad = func.frame_allocs.items(.spill_pad)[@intFromEnum(base_fi)]; + assert(frame_abi_size - frame_spill_pad - disp >= abi_size); + break :mem_size if (frame_abi_size - frame_spill_pad - disp == abi_size) + frame_abi_size + else + abi_size; + }, + else => abi_size, + }; + const src_size = math.ceilPowerOfTwoAssert(u32, abi_size); + const src_align = Alignment.fromNonzeroByteUnits(math.ceilPowerOfTwoAssert(u32, src_size)); + if (src_size > mem_size) { + const frame_index = try func.allocFrameIndex(FrameAlloc.init(.{ + .size = src_size, + .alignment = src_align, + })); + const frame_mcv: MCValue = .{ .load_frame = .{ .index = frame_index } }; + _ = try func.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_store_rm, + .data = .{ .rm = .{ + .r = reg, + .m = .{ + .base = .{ .frame = frame_index }, + .mod = .{ + .size = Memory.Size.fromByteSize(src_size), + .unsigned = false, + }, + }, + } }, + }); + try func.genSetMem(base, disp, ty, frame_mcv); + try func.freeValue(frame_mcv); + } else _ = try func.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_store_rm, + .data = .{ .rm = .{ + .r = reg, + .m = .{ + .base = base, + .mod = .{ + .size = func.memSize(ty), + .disp = disp, + .unsigned = false, + }, + }, + } }, + }); + }, + .register_pair => |src_regs| { + var part_disp: i32 = disp; + for (try func.splitType(ty), src_regs) |src_ty, src_reg| { + try func.genSetMem(base, part_disp, src_ty, .{ .register = src_reg }); + part_disp += @intCast(src_ty.abiSize(mod)); + } + }, + .immediate => { + // TODO: remove this lock in favor of a copyToTmpRegister when we load 64 bit immediates with + // a register allocation. + const reg, const reg_lock = try func.promoteReg(ty, src_mcv); + defer if (reg_lock) |lock| func.register_manager.unlockReg(lock); + + return func.genSetMem(base, disp, ty, .{ .register = reg }); + }, + .air_ref => |src_ref| try func.genSetMem(base, disp, ty, try func.resolveInst(src_ref)), + } } -fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; +fn airIntFromPtr(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const result = result: { + const src_mcv = try func.resolveInst(un_op); + const src_ty = func.typeOfIndex(inst); + if (func.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv; - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result = if (self.liveness.isUnused(inst)) .unreach else result: { - const src_mcv = try self.resolveInst(ty_op.operand); + const dst_mcv = try func.allocRegOrMem(src_ty, inst, true); + const dst_ty = func.typeOfIndex(inst); + try func.genCopy(dst_ty, dst_mcv, src_mcv); + break :result dst_mcv; + }; + return func.finishAir(inst, result, .{ un_op, .none, .none }); +} - const dst_ty = self.typeOfIndex(inst); - const src_ty = self.typeOf(ty_op.operand); +fn airBitCast(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; - const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; - defer if (src_lock) |lock| self.register_manager.unlockReg(lock); + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result = if (func.liveness.isUnused(inst)) .unreach else result: { + const src_mcv = try func.resolveInst(ty_op.operand); + + const dst_ty = func.typeOfIndex(inst); + const src_ty = func.typeOf(ty_op.operand); + + const src_lock = if (src_mcv.getReg()) |reg| func.register_manager.lockReg(reg) else null; + defer if (src_lock) |lock| func.register_manager.unlockReg(lock); const dst_mcv = if (dst_ty.abiSize(zcu) <= src_ty.abiSize(zcu) and - self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: { - const dst_mcv = try self.allocRegOrMem(inst, true); - try self.genCopy(switch (math.order(dst_ty.abiSize(zcu), src_ty.abiSize(zcu))) { + func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: { + const dst_mcv = try func.allocRegOrMem(dst_ty, inst, true); + try func.genCopy(switch (math.order(dst_ty.abiSize(zcu), src_ty.abiSize(zcu))) { .lt => dst_ty, .eq => if (!dst_mcv.isMemory() or src_mcv.isMemory()) dst_ty else src_ty, .gt => src_ty, @@ -4880,230 +5944,440 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const bit_size = dst_ty.bitSize(zcu); if (abi_size * 8 <= bit_size) break :result dst_mcv; - return self.fail("TODO: airBitCast {} to {}", .{ src_ty.fmt(zcu), dst_ty.fmt(zcu) }); + return func.fail("TODO: airBitCast {} to {}", .{ src_ty.fmt(zcu), dst_ty.fmt(zcu) }); }; - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airArrayToSlice for {}", .{ - self.target.cpu.arch, +fn airArrayToSlice(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + + const slice_ty = func.typeOfIndex(inst); + const ptr_ty = func.typeOf(ty_op.operand); + const ptr = try func.resolveInst(ty_op.operand); + const array_ty = ptr_ty.childType(zcu); + const array_len = array_ty.arrayLen(zcu); + + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(slice_ty, zcu)); + try func.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); + try func.genSetMem( + .{ .frame = frame_index }, + @intCast(ptr_ty.abiSize(zcu)), + Type.usize, + .{ .immediate = array_len }, + ); + + const result = MCValue{ .load_frame = .{ .index = frame_index } }; + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +} + +fn airFloatFromInt(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airFloatFromInt for {}", .{ + func.target.cpu.arch, }); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airFloatFromInt for {}", .{ - self.target.cpu.arch, +fn airIntFromFloat(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airIntFromFloat for {}", .{ + func.target.cpu.arch, }); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airIntFromFloat for {}", .{ - self.target.cpu.arch, - }); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); -} - -fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Block, ty_pl.payload); +fn airCmpxchg(func: *Func, inst: Air.Inst.Index) !void { + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.Block, ty_pl.payload); _ = extra; - return self.fail("TODO implement airCmpxchg for {}", .{ - self.target.cpu.arch, + return func.fail("TODO implement airCmpxchg for {}", .{ + func.target.cpu.arch, }); - // return self.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value }); + // return func.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value }); } -fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void { +fn airAtomicRmw(func: *Func, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airCmpxchg for {}", .{self.target.cpu.arch}); + return func.fail("TODO implement airCmpxchg for {}", .{func.target.cpu.arch}); } -fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void { +fn airAtomicLoad(func: *Func, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airAtomicLoad for {}", .{self.target.cpu.arch}); + return func.fail("TODO implement airAtomicLoad for {}", .{func.target.cpu.arch}); } -fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void { +fn airAtomicStore(func: *Func, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void { _ = inst; _ = order; - return self.fail("TODO implement airAtomicStore for {}", .{self.target.cpu.arch}); + return func.fail("TODO implement airAtomicStore for {}", .{func.target.cpu.arch}); } -fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { - _ = inst; - if (safety) { - // TODO if the value is undef, write 0xaa bytes to dest - } else { - // TODO if the value is undef, don't lower this instruction +fn airMemset(func: *Func, inst: Air.Inst.Index, safety: bool) !void { + const zcu = func.bin_file.comp.module.?; + const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + + result: { + if (!safety and (try func.resolveInst(bin_op.rhs)) == .undef) break :result; + + const dst_ptr = try func.resolveInst(bin_op.lhs); + const dst_ptr_ty = func.typeOf(bin_op.lhs); + const dst_ptr_lock: ?RegisterLock = switch (dst_ptr) { + .register => |reg| func.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (dst_ptr_lock) |lock| func.register_manager.unlockReg(lock); + + const src_val = try func.resolveInst(bin_op.rhs); + const elem_ty = func.typeOf(bin_op.rhs); + const src_val_lock: ?RegisterLock = switch (src_val) { + .register => |reg| func.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (src_val_lock) |lock| func.register_manager.unlockReg(lock); + + const elem_abi_size: u31 = @intCast(elem_ty.abiSize(zcu)); + + if (elem_abi_size == 1) { + const ptr: MCValue = switch (dst_ptr_ty.ptrSize(zcu)) { + // TODO: this only handles slices stored in the stack + .Slice => dst_ptr, + .One => dst_ptr, + .C, .Many => unreachable, + }; + const len: MCValue = switch (dst_ptr_ty.ptrSize(zcu)) { + // TODO: this only handles slices stored in the stack + .Slice => dst_ptr.address().offset(8).deref(), + .One => .{ .immediate = dst_ptr_ty.childType(zcu).arrayLen(zcu) }, + .C, .Many => unreachable, + }; + const len_lock: ?RegisterLock = switch (len) { + .register => |reg| func.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (len_lock) |lock| func.register_manager.unlockReg(lock); + + try func.genInlineMemset(ptr, src_val, len); + break :result; + } + + // Store the first element, and then rely on memcpy copying forwards. + // Length zero requires a runtime check - so we handle arrays specially + // here to elide it. + switch (dst_ptr_ty.ptrSize(zcu)) { + .Slice => return func.fail("TODO: airMemset Slices", .{}), + .One => { + const elem_ptr_ty = try zcu.singleMutPtrType(elem_ty); + + const len = dst_ptr_ty.childType(zcu).arrayLen(zcu); + + assert(len != 0); // prevented by Sema + try func.store(dst_ptr, src_val, elem_ptr_ty, elem_ty); + + const second_elem_ptr_reg, const second_elem_ptr_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(second_elem_ptr_lock); + + const second_elem_ptr_mcv: MCValue = .{ .register = second_elem_ptr_reg }; + + try func.genSetReg(Type.usize, second_elem_ptr_reg, .{ .register_offset = .{ + .reg = try func.copyToTmpRegister(Type.usize, dst_ptr), + .off = elem_abi_size, + } }); + + const bytes_to_copy: MCValue = .{ .immediate = elem_abi_size * (len - 1) }; + try func.genInlineMemcpy(second_elem_ptr_mcv, dst_ptr, bytes_to_copy); + }, + .C, .Many => unreachable, + } } - return self.fail("TODO implement airMemset for {}", .{self.target.cpu.arch}); + return func.finishAir(inst, .unreach, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { +fn airMemcpy(func: *Func, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airMemcpy for {}", .{self.target.cpu.arch}); + return func.fail("TODO implement airMemcpy for {}", .{func.target.cpu.arch}); } -fn airTagName(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const operand = try self.resolveInst(un_op); - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else { +fn airTagName(func: *Func, inst: Air.Inst.Index) !void { + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; + const operand = try func.resolveInst(un_op); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else { _ = operand; - return self.fail("TODO implement airTagName for riscv64", .{}); + return func.fail("TODO implement airTagName for riscv64", .{}); }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); + return func.finishAir(inst, result, .{ un_op, .none, .none }); } -fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; +fn airErrorName(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const err_ty = self.typeOf(un_op); - const err_mcv = try self.resolveInst(un_op); + const err_ty = func.typeOf(un_op); + const err_mcv = try func.resolveInst(un_op); - const err_reg = try self.copyToTmpRegister(err_ty, err_mcv); - const err_lock = self.register_manager.lockRegAssumeUnused(err_reg); - defer self.register_manager.unlockReg(err_lock); + const err_reg = try func.copyToTmpRegister(err_ty, err_mcv); + const err_lock = func.register_manager.lockRegAssumeUnused(err_reg); + defer func.register_manager.unlockReg(err_lock); - const addr_reg, const addr_lock = try self.allocReg(); - defer self.register_manager.unlockReg(addr_lock); + const addr_reg, const addr_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(addr_lock); + // this is now the base address of the error name table const lazy_sym = link.File.LazySymbol.initDecl(.const_data, null, zcu); - if (self.bin_file.cast(link.File.Elf)) |elf_file| { + if (func.bin_file.cast(link.File.Elf)) |elf_file| { const sym_index = elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + return func.fail("{s} creating lazy symbol", .{@errorName(err)}); const sym = elf_file.symbol(sym_index); - try self.genSetReg(Type.usize, addr_reg, .{ .load_symbol = .{ .sym = sym.esym_index } }); + try func.genSetReg(Type.usize, addr_reg, .{ .load_symbol = .{ .sym = sym.esym_index } }); } else { - return self.fail("TODO: riscv non-elf", .{}); + return func.fail("TODO: riscv non-elf", .{}); } - const start_reg, const start_lock = try self.allocReg(); - defer self.register_manager.unlockReg(start_lock); + const start_reg, const start_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(start_lock); - const end_reg, const end_lock = try self.allocReg(); - defer self.register_manager.unlockReg(end_lock); + const end_reg, const end_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(end_lock); - _ = start_reg; - _ = end_reg; + // const tmp_reg, const tmp_lock = try func.allocReg(.int); + // defer func.register_manager.unlockReg(tmp_lock); - return self.fail("TODO: airErrorName", .{}); + // we move the base address forward by the following formula: base + (errno * 8) + + // shifting left by 4 is the same as multiplying by 8 + _ = try func.addInst(.{ + .tag = .slli, + .ops = .rri, + .data = .{ .i_type = .{ + .imm12 = Immediate.u(4), + .rd = err_reg, + .rs1 = err_reg, + } }, + }); + + _ = try func.addInst(.{ + .tag = .add, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = addr_reg, + .rs1 = addr_reg, + .rs2 = err_reg, + } }, + }); + + _ = try func.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_load_rm, + .data = .{ + .rm = .{ + .r = start_reg, + .m = .{ + .base = .{ .reg = addr_reg }, + .mod = .{ .size = .dword, .unsigned = true }, + }, + }, + }, + }); + + _ = try func.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_load_rm, + .data = .{ + .rm = .{ + .r = end_reg, + .m = .{ + .base = .{ .reg = addr_reg }, + .mod = .{ .size = .dword, .unsigned = true }, + }, + }, + }, + }); + + const dst_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, false); + const frame = dst_mcv.load_frame; + try func.genSetMem( + .{ .frame = frame.index }, + frame.off, + Type.usize, + .{ .register = start_reg }, + ); + + try func.genSetMem( + .{ .frame = frame.index }, + frame.off + 8, + Type.usize, + .{ .register = end_reg }, + ); + + return func.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); } -fn airSplat(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airSplat for riscv64", .{}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airSplat(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airSplat for riscv64", .{}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airSelect(self: *Self, inst: Air.Inst.Index) !void { - const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airSelect for riscv64", .{}); - return self.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs }); +fn airSelect(func: *Func, inst: Air.Inst.Index) !void { + const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const extra = func.air.extraData(Air.Bin, pl_op.payload).data; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airSelect for riscv64", .{}); + return func.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs }); } -fn airShuffle(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airShuffle for riscv64", .{}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airShuffle(func: *Func, inst: Air.Inst.Index) !void { + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airShuffle for riscv64", .{}); + return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airReduce(self: *Self, inst: Air.Inst.Index) !void { - const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airReduce for riscv64", .{}); - return self.finishAir(inst, result, .{ reduce.operand, .none, .none }); +fn airReduce(func: *Func, inst: Air.Inst.Index) !void { + const reduce = func.air.instructions.items(.data)[@intFromEnum(inst)].reduce; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airReduce for riscv64", .{}); + return func.finishAir(inst, result, .{ reduce.operand, .none, .none }); } -fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const zcu = self.bin_file.comp.module.?; - const result_ty = self.typeOfIndex(inst); +fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.bin_file.comp.module.?; + const result_ty = func.typeOfIndex(inst); const len: usize = @intCast(result_ty.arrayLen(zcu)); - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]); + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const elements: []const Air.Inst.Ref = @ptrCast(func.air.extra[ty_pl.payload..][0..len]); + const result: MCValue = result: { switch (result_ty.zigTypeTag(zcu)) { .Struct => { - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu)); + if (result_ty.containerLayout(zcu) == .@"packed") { + const struct_obj = zcu.typeToStruct(result_ty).?; + try func.genInlineMemset( + .{ .lea_frame = .{ .index = frame_index } }, + .{ .immediate = 0 }, + .{ .immediate = result_ty.abiSize(zcu) }, + ); - if (result_ty.containerLayout(zcu) == .@"packed") {} else for (elements, 0..) |elem, elem_i| { + for (elements, 0..) |elem, elem_i_usize| { + const elem_i: u32 = @intCast(elem_i_usize); + if ((try result_ty.structFieldValueComptime(zcu, elem_i)) != null) continue; + + const elem_ty = result_ty.structFieldType(elem_i, zcu); + const elem_bit_size: u32 = @intCast(elem_ty.bitSize(zcu)); + if (elem_bit_size > 64) { + return func.fail( + "TODO airAggregateInit implement packed structs with large fields", + .{}, + ); + } + + const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu)); + const elem_abi_bits = elem_abi_size * 8; + const elem_off = zcu.structPackedFieldBitOffset(struct_obj, elem_i); + const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size); + const elem_bit_off = elem_off % elem_abi_bits; + const elem_mcv = try func.resolveInst(elem); + + _ = elem_byte_off; + _ = elem_bit_off; + + const elem_lock = switch (elem_mcv) { + .register => |reg| func.register_manager.lockReg(reg), + .immediate => |imm| lock: { + if (imm == 0) continue; + break :lock null; + }, + else => null, + }; + defer if (elem_lock) |lock| func.register_manager.unlockReg(lock); + + return func.fail("TODO: airAggregateInit packed structs", .{}); + } + } else for (elements, 0..) |elem, elem_i| { if ((try result_ty.structFieldValueComptime(zcu, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i, zcu); const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, zcu)); - const elem_mcv = try self.resolveInst(elem); + const elem_mcv = try func.resolveInst(elem); + try func.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, elem_mcv); + } + break :result .{ .load_frame = .{ .index = frame_index } }; + }, + .Array => { + const elem_ty = result_ty.childType(zcu); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu)); + const elem_size: u32 = @intCast(elem_ty.abiSize(zcu)); - const elem_frame: FrameAddr = .{ - .index = frame_index, - .off = elem_off, - }; - try self.genSetStack( + for (elements, 0..) |elem, elem_i| { + const elem_mcv = try func.resolveInst(elem); + const elem_off: i32 = @intCast(elem_size * elem_i); + try func.genSetMem( + .{ .frame = frame_index }, + elem_off, elem_ty, - elem_frame, elem_mcv, ); } + if (result_ty.sentinel(zcu)) |sentinel| try func.genSetMem( + .{ .frame = frame_index }, + @intCast(elem_size * elements.len), + elem_ty, + try func.genTypedValue(sentinel), + ); + break :result .{ .load_frame = .{ .index = frame_index } }; }, - else => return self.fail("TODO: airAggregateInit {}", .{result_ty.fmt(zcu)}), + else => return func.fail("TODO: airAggregate {}", .{result_ty.fmt(zcu)}), } - break :result .{ .register = .zero }; }; if (elements.len <= Liveness.bpi - 1) { var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); @memcpy(buf[0..elements.len], elements); - return self.finishAir(inst, result, buf); + return func.finishAir(inst, result, buf); } - var bt = self.liveness.iterateBigTomb(inst); - for (elements) |elem| try self.feed(&bt, elem); - return self.finishAirResult(inst, result); + var bt = func.liveness.iterateBigTomb(inst); + for (elements) |elem| try func.feed(&bt, elem); + return func.finishAirResult(inst, result); } -fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; +fn airUnionInit(func: *Func, inst: Air.Inst.Index) !void { + const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data; _ = extra; - return self.fail("TODO implement airUnionInit for riscv64", .{}); - // return self.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value }); + return func.fail("TODO implement airUnionInit for riscv64", .{}); + // return func.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value }); } -fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void { - const prefetch = self.air.instructions.items(.data)[@intFromEnum(inst)].prefetch; +fn airPrefetch(func: *Func, inst: Air.Inst.Index) !void { + const prefetch = func.air.instructions.items(.data)[@intFromEnum(inst)].prefetch; // TODO: RISC-V does have prefetch instruction variants. // see here: https://raw.githubusercontent.com/riscv/riscv-CMOs/master/specifications/cmobase-v1.0.1.pdf - return self.finishAir(inst, .unreach, .{ prefetch.ptr, .none, .none }); + return func.finishAir(inst, .unreach, .{ prefetch.ptr, .none, .none }); } -fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { - const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else { - return self.fail("TODO implement airMulAdd for riscv64", .{}); +fn airMulAdd(func: *Func, inst: Air.Inst.Index) !void { + const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const extra = func.air.extraData(Air.Bin, pl_op.payload).data; + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else { + return func.fail("TODO implement airMulAdd for riscv64", .{}); }; - return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand }); + return func.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand }); } -fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { - const zcu = self.bin_file.comp.module.?; +fn resolveInst(func: *Func, ref: Air.Inst.Ref) InnerError!MCValue { + const zcu = func.bin_file.comp.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.typeOf(ref); + const inst_ty = func.typeOf(ref); if (!inst_ty.hasRuntimeBits(zcu)) return .none; const mcv = if (ref.toIndex()) |inst| mcv: { - break :mcv self.inst_tracking.getPtr(inst).?.short; + break :mcv func.inst_tracking.getPtr(inst).?.short; } else mcv: { const ip_index = ref.toInterned().?; - const gop = try self.const_tracking.getOrPut(self.gpa, ip_index); + const gop = try func.const_tracking.getOrPut(func.gpa, ip_index); if (!gop.found_existing) gop.value_ptr.* = InstTracking.init( - try self.genTypedValue(Value.fromInterned(ip_index)), + try func.genTypedValue(Value.fromInterned(ip_index)), ); break :mcv gop.value_ptr.short; }; @@ -5111,21 +6385,21 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { return mcv; } -fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { - const tracking = self.inst_tracking.getPtr(inst).?; +fn getResolvedInstValue(func: *Func, inst: Air.Inst.Index) *InstTracking { + const tracking = func.inst_tracking.getPtr(inst).?; return switch (tracking.short) { .none, .unreach, .dead => unreachable, else => tracking, }; } -fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { - const zcu = self.bin_file.comp.module.?; +fn genTypedValue(func: *Func, val: Value) InnerError!MCValue { + const zcu = func.bin_file.comp.module.?; const result = try codegen.genTypedValue( - self.bin_file, - self.src_loc, + func.bin_file, + func.src_loc, val, - zcu.funcOwnerDeclIndex(self.func_index), + zcu.funcOwnerDeclIndex(func.func_index), ); const mcv: MCValue = switch (result) { .mcv => |mcv| switch (mcv) { @@ -5135,11 +6409,11 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { .immediate => |imm| .{ .immediate = imm }, .memory => |addr| .{ .memory = addr }, .load_got, .load_direct, .load_tlv => { - return self.fail("TODO: genTypedValue {s}", .{@tagName(mcv)}); + return func.fail("TODO: genTypedValue {s}", .{@tagName(mcv)}); }, }, .fail => |msg| { - self.err_msg = msg; + func.err_msg = msg; return error.CodegenFail; }, }; @@ -5152,36 +6426,39 @@ const CallMCValues = struct { stack_byte_count: u31, stack_align: Alignment, - fn deinit(self: *CallMCValues, func: *Self) void { - func.gpa.free(self.args); - self.* = undefined; + fn deinit(call: *CallMCValues, func: *Func) void { + func.gpa.free(call.args); + call.* = undefined; } }; /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues( - self: *Self, + func: *Func, fn_info: InternPool.Key.FuncType, + var_args: []const Type, ) !CallMCValues { - const zcu = self.bin_file.comp.module.?; + const zcu = func.bin_file.comp.module.?; const ip = &zcu.intern_pool; - const param_types = try self.gpa.alloc(Type, fn_info.param_types.len); - defer self.gpa.free(param_types); + const param_types = try func.gpa.alloc(Type, fn_info.param_types.len + var_args.len); + defer func.gpa.free(param_types); for (param_types[0..fn_info.param_types.len], fn_info.param_types.get(ip)) |*dest, src| { dest.* = Type.fromInterned(src); } + for (param_types[fn_info.param_types.len..], var_args) |*param_ty, arg_ty| + param_ty.* = func.promoteVarArg(arg_ty); const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try func.gpa.alloc(MCValue, param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = 0, .stack_align = undefined, }; - errdefer self.gpa.free(result.args); + errdefer func.gpa.free(result.args); const ret_ty = Type.fromInterned(fn_info.return_type); @@ -5193,7 +6470,7 @@ fn resolveCallingConventionValues( }, .C, .Unspecified => { if (result.args.len > 8) { - return self.fail("RISC-V calling convention does not support more than 8 arguments", .{}); + return func.fail("RISC-V calling convention does not support more than 8 arguments", .{}); } var ret_int_reg_i: u32 = 0; @@ -5209,21 +6486,29 @@ fn resolveCallingConventionValues( } else { var ret_tracking: [2]InstTracking = undefined; var ret_tracking_i: usize = 0; + var ret_float_reg_i: usize = 0; const classes = mem.sliceTo(&abi.classifySystem(ret_ty, zcu), .none); for (classes) |class| switch (class) { .integer => { - const ret_int_reg = abi.function_ret_regs[ret_int_reg_i]; + const ret_int_reg = abi.Registers.Integer.function_ret_regs[ret_int_reg_i]; ret_int_reg_i += 1; ret_tracking[ret_tracking_i] = InstTracking.init(.{ .register = ret_int_reg }); ret_tracking_i += 1; }, + .float => { + const ret_float_reg = abi.Registers.Float.function_ret_regs[ret_float_reg_i]; + ret_float_reg_i += 1; + + ret_tracking[ret_tracking_i] = InstTracking.init(.{ .register = ret_float_reg }); + ret_tracking_i += 1; + }, .memory => { - const ret_int_reg = abi.function_ret_regs[ret_int_reg_i]; + const ret_int_reg = abi.Registers.Integer.function_ret_regs[ret_int_reg_i]; ret_int_reg_i += 1; - const ret_indirect_reg = abi.function_arg_regs[param_int_reg_i]; + const ret_indirect_reg = abi.Registers.Integer.function_arg_regs[param_int_reg_i]; param_int_reg_i += 1; ret_tracking[ret_tracking_i] = .{ @@ -5232,11 +6517,11 @@ fn resolveCallingConventionValues( }; ret_tracking_i += 1; }, - else => return self.fail("TODO: C calling convention return class {}", .{class}), + else => return func.fail("TODO: C calling convention return class {}", .{class}), }; result.return_value = switch (ret_tracking_i) { - else => return self.fail("ty {} took {} tracking return indices", .{ ret_ty.fmt(zcu), ret_tracking_i }), + else => return func.fail("ty {} took {} tracking return indices", .{ ret_ty.fmt(zcu), ret_tracking_i }), 1 => ret_tracking[0], 2 => InstTracking.init(.{ .register_pair = .{ ret_tracking[0].short.register, ret_tracking[1].short.register, @@ -5244,8 +6529,14 @@ fn resolveCallingConventionValues( }; } + var param_float_reg_i: usize = 0; + for (param_types, result.args) |ty, *arg| { - assert(ty.hasRuntimeBitsIgnoreComptime(zcu)); + if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) { + assert(cc == .Unspecified); + arg.* = .none; + continue; + } var arg_mcv: [2]MCValue = undefined; var arg_mcv_i: usize = 0; @@ -5254,7 +6545,7 @@ fn resolveCallingConventionValues( for (classes) |class| switch (class) { .integer => { - const param_int_regs = abi.function_arg_regs; + const param_int_regs = abi.Registers.Integer.function_arg_regs; if (param_int_reg_i >= param_int_regs.len) break; const param_int_reg = param_int_regs[param_int_reg_i]; @@ -5263,36 +6554,47 @@ fn resolveCallingConventionValues( arg_mcv[arg_mcv_i] = .{ .register = param_int_reg }; arg_mcv_i += 1; }, + .float => { + const param_float_regs = abi.Registers.Float.function_arg_regs; + if (param_float_reg_i >= param_float_regs.len) break; + + const param_float_reg = param_float_regs[param_float_reg_i]; + param_float_reg_i += 1; + + arg_mcv[arg_mcv_i] = .{ .register = param_float_reg }; + arg_mcv_i += 1; + }, .memory => { - const param_int_regs = abi.function_arg_regs; + const param_int_regs = abi.Registers.Integer.function_arg_regs; + const param_int_reg = param_int_regs[param_int_reg_i]; + param_int_reg_i += 1; arg_mcv[arg_mcv_i] = .{ .indirect = .{ .reg = param_int_reg } }; arg_mcv_i += 1; }, - else => return self.fail("TODO: C calling convention arg class {}", .{class}), + else => return func.fail("TODO: C calling convention arg class {}", .{class}), } else { arg.* = switch (arg_mcv_i) { - else => return self.fail("ty {} took {} tracking arg indices", .{ ty.fmt(zcu), arg_mcv_i }), + else => return func.fail("ty {} took {} tracking arg indices", .{ ty.fmt(zcu), arg_mcv_i }), 1 => arg_mcv[0], 2 => .{ .register_pair = .{ arg_mcv[0].register, arg_mcv[1].register } }, }; continue; } - return self.fail("TODO: pass args by stack", .{}); + return func.fail("TODO: pass args by stack", .{}); } }, - else => return self.fail("TODO implement function parameters for {} on riscv64", .{cc}), + else => return func.fail("TODO implement function parameters for {} on riscv64", .{cc}), } result.stack_byte_count = @intCast(result.stack_align.forward(result.stack_byte_count)); return result; } -/// TODO support scope overrides. Also note this logic is duplicated with `Module.wantSafety`. -fn wantSafety(self: *Self) bool { - return switch (self.bin_file.comp.root_mod.optimize_mode) { +fn wantSafety(func: *Func) bool { + return switch (func.mod.optimize_mode) { .Debug => true, .ReleaseSafe => true, .ReleaseFast => false, @@ -5300,39 +6602,36 @@ fn wantSafety(self: *Self) bool { }; } -fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError { +fn fail(func: *Func, comptime format: []const u8, args: anytype) InnerError { @setCold(true); - assert(self.err_msg == null); - self.err_msg = try ErrorMsg.create(self.gpa, self.src_loc, format, args); + assert(func.err_msg == null); + func.err_msg = try ErrorMsg.create(func.gpa, func.src_loc, format, args); return error.CodegenFail; } -fn failSymbol(self: *Self, comptime format: []const u8, args: anytype) InnerError { +fn failSymbol(func: *Func, comptime format: []const u8, args: anytype) InnerError { @setCold(true); - assert(self.err_msg == null); - self.err_msg = try ErrorMsg.create(self.gpa, self.src_loc, format, args); + assert(func.err_msg == null); + func.err_msg = try ErrorMsg.create(func.gpa, func.src_loc, format, args); return error.CodegenFail; } fn parseRegName(name: []const u8) ?Register { - if (@hasDecl(Register, "parseRegName")) { - return Register.parseRegName(name); - } return std.meta.stringToEnum(Register, name); } -fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { - const zcu = self.bin_file.comp.module.?; - return self.air.typeOf(inst, &zcu.intern_pool); +fn typeOf(func: *Func, inst: Air.Inst.Ref) Type { + const zcu = func.bin_file.comp.module.?; + return func.air.typeOf(inst, &zcu.intern_pool); } -fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { - const zcu = self.bin_file.comp.module.?; - return self.air.typeOfIndex(inst, &zcu.intern_pool); +fn typeOfIndex(func: *Func, inst: Air.Inst.Index) Type { + const zcu = func.bin_file.comp.module.?; + return func.air.typeOfIndex(inst, &zcu.intern_pool); } -fn hasFeature(self: *Self, feature: Target.riscv.Feature) bool { - return Target.riscv.featureSetHas(self.target.cpu.features, feature); +fn hasFeature(func: *Func, feature: Target.riscv.Feature) bool { + return Target.riscv.featureSetHas(func.target.cpu.features, feature); } pub fn errUnionPayloadOffset(payload_ty: Type, zcu: *Module) u64 { @@ -5356,3 +6655,33 @@ pub fn errUnionErrorOffset(payload_ty: Type, zcu: *Module) u64 { return 0; } } + +fn promoteInt(func: *Func, ty: Type) Type { + const mod = func.bin_file.comp.module.?; + const int_info: InternPool.Key.IntType = switch (ty.toIntern()) { + .bool_type => .{ .signedness = .unsigned, .bits = 1 }, + else => if (ty.isAbiInt(mod)) ty.intInfo(mod) else return ty, + }; + for ([_]Type{ + Type.c_int, Type.c_uint, + Type.c_long, Type.c_ulong, + Type.c_longlong, Type.c_ulonglong, + }) |promote_ty| { + const promote_info = promote_ty.intInfo(mod); + if (int_info.signedness == .signed and promote_info.signedness == .unsigned) continue; + if (int_info.bits + @intFromBool(int_info.signedness == .unsigned and + promote_info.signedness == .signed) <= promote_info.bits) return promote_ty; + } + return ty; +} + +fn promoteVarArg(func: *Func, ty: Type) Type { + if (!ty.isRuntimeFloat()) return func.promoteInt(ty); + switch (ty.floatBits(func.target.*)) { + 32, 64 => return Type.f64, + else => |float_bits| { + assert(float_bits == func.target.c_type_bit_size(.longdouble)); + return Type.c_longdouble; + }, + } +} diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index ec256fefb3..8107c6350f 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -42,6 +42,12 @@ pub fn emitMir(emit: *Emit) Error!void { .enc = std.meta.activeTag(lowered_inst.encoding.data), }), .load_symbol_reloc => |symbol| { + const is_obj_or_static_lib = switch (emit.lower.output_mode) { + .Exe => false, + .Obj => true, + .Lib => emit.lower.link_mode == .static, + }; + if (emit.lower.bin_file.cast(link.File.Elf)) |elf_file| { const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?; const sym_index = elf_file.zigObjectPtr().?.symbol(symbol.sym_index); @@ -50,7 +56,7 @@ pub fn emitMir(emit: *Emit) Error!void { var hi_r_type: u32 = @intFromEnum(std.elf.R_RISCV.HI20); var lo_r_type: u32 = @intFromEnum(std.elf.R_RISCV.LO12_I); - if (sym.flags.needs_zig_got) { + if (sym.flags.needs_zig_got and !is_obj_or_static_lib) { _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); hi_r_type = Elf.R_ZIG_GOT_HI20; @@ -70,6 +76,19 @@ pub fn emitMir(emit: *Emit) Error!void { }); } else return emit.fail("TODO: load_symbol_reloc non-ELF", .{}); }, + .call_extern_fn_reloc => |symbol| { + if (emit.lower.bin_file.cast(link.File.Elf)) |elf_file| { + const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?; + + const r_type: u32 = @intFromEnum(std.elf.R_RISCV.CALL_PLT); + + try atom_ptr.addReloc(elf_file, .{ + .r_offset = start_offset, + .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | r_type, + .r_addend = 0, + }); + } else return emit.fail("TODO: call_extern_fn_reloc non-ELF", .{}); + }, }; } std.debug.assert(lowered_relocs.len == 0); diff --git a/src/arch/riscv64/Encoding.zig b/src/arch/riscv64/Encoding.zig index 91f100993b..b280b8a483 100644 --- a/src/arch/riscv64/Encoding.zig +++ b/src/arch/riscv64/Encoding.zig @@ -1,7 +1,71 @@ mnemonic: Mnemonic, data: Data, +const OpCode = enum(u7) { + OP = 0b0110011, + OP_IMM = 0b0010011, + OP_IMM_32 = 0b0011011, + OP_32 = 0b0111011, + + BRANCH = 0b1100011, + LOAD = 0b0000011, + STORE = 0b0100011, + SYSTEM = 0b1110011, + + OP_FP = 0b1010011, + LOAD_FP = 0b0000111, + STORE_FP = 0b0100111, + + JALR = 0b1100111, + AUIPC = 0b0010111, + LUI = 0b0110111, + JAL = 0b1101111, + NONE = 0b0000000, +}; + +const Fmt = enum(u2) { + /// 32-bit single-precision + S = 0b00, + /// 64-bit double-precision + D = 0b01, + _reserved = 0b10, + /// 128-bit quad-precision + Q = 0b11, +}; + +const Enc = struct { + opcode: OpCode, + + data: union(enum) { + /// funct3 + funct7 + ff: struct { + funct3: u3, + funct7: u7, + }, + /// funct5 + rm + fmt + fmt: struct { + funct5: u5, + rm: u3, + fmt: Fmt, + }, + /// funct3 + f: struct { + funct3: u3, + }, + /// typ + funct3 + has_5 + sh: struct { + typ: u6, + funct3: u3, + has_5: bool, + }, + /// U-type + none, + }, +}; + pub const Mnemonic = enum { + // base mnemonics + // I Type ld, lw, @@ -10,18 +74,25 @@ pub const Mnemonic = enum { lhu, lb, lbu, + sltiu, xori, andi, + slli, srli, srai, + slliw, + srliw, + sraiw, + addi, jalr, // U Type lui, + auipc, // S Type sd, @@ -37,64 +108,265 @@ pub const Mnemonic = enum { // R Type add, - @"and", + addw, sub, + subw, + @"and", + @"or", slt, - mul, sltu, xor, + sll, + srl, + sra, + + sllw, + srlw, + sraw, + // System ecall, ebreak, unimp, + // M extension + mul, + mulw, + + mulh, + mulhu, + mulhsu, + + div, + divu, + + divw, + divuw, + + rem, + remu, + + remw, + remuw, + + // F extension (32-bit float) + fadds, + fsubs, + fmuls, + fdivs, + + fmins, + fmaxs, + + fsqrts, + + flw, + fsw, + + feqs, + flts, + fles, + + fsgnjns, + fsgnjxs, + + // D extension (64-bit float) + faddd, + fsubd, + fmuld, + fdivd, + + fmind, + fmaxd, + + fsqrtd, + + fld, + fsd, + + feqd, + fltd, + fled, + + fsgnjnd, + fsgnjxd, + pub fn encoding(mnem: Mnemonic) Enc { return switch (mnem) { // zig fmt: off - .add => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0000000 }, - .sltu => .{ .opcode = 0b0110011, .funct3 = 0b011, .funct7 = 0b0000000 }, - .@"and" => .{ .opcode = 0b0110011, .funct3 = 0b111, .funct7 = 0b0000000 }, - .sub => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0100000 }, - .ld => .{ .opcode = 0b0000011, .funct3 = 0b011, .funct7 = null }, - .lw => .{ .opcode = 0b0000011, .funct3 = 0b010, .funct7 = null }, - .lwu => .{ .opcode = 0b0000011, .funct3 = 0b110, .funct7 = null }, - .lh => .{ .opcode = 0b0000011, .funct3 = 0b001, .funct7 = null }, - .lhu => .{ .opcode = 0b0000011, .funct3 = 0b101, .funct7 = null }, - .lb => .{ .opcode = 0b0000011, .funct3 = 0b000, .funct7 = null }, - .lbu => .{ .opcode = 0b0000011, .funct3 = 0b100, .funct7 = null }, + // OP - .sltiu => .{ .opcode = 0b0010011, .funct3 = 0b011, .funct7 = null }, + .add => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000000 } } }, + .sub => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0100000 } } }, - .addi => .{ .opcode = 0b0010011, .funct3 = 0b000, .funct7 = null }, - .andi => .{ .opcode = 0b0010011, .funct3 = 0b111, .funct7 = null }, - .xori => .{ .opcode = 0b0010011, .funct3 = 0b100, .funct7 = null }, - .jalr => .{ .opcode = 0b1100111, .funct3 = 0b000, .funct7 = null }, - .slli => .{ .opcode = 0b0010011, .funct3 = 0b001, .funct7 = null }, - .srli => .{ .opcode = 0b0010011, .funct3 = 0b101, .funct7 = null }, - .srai => .{ .opcode = 0b0010011, .funct3 = 0b101, .funct7 = null, .offset = 1 << 10 }, + .@"and" => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b111, .funct7 = 0b0000000 } } }, + .@"or" => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b110, .funct7 = 0b0000000 } } }, + .xor => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b100, .funct7 = 0b0000000 } } }, - .lui => .{ .opcode = 0b0110111, .funct3 = null, .funct7 = null }, + .sltu => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b011, .funct7 = 0b0000000 } } }, + .slt => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b010, .funct7 = 0b0000000 } } }, - .sd => .{ .opcode = 0b0100011, .funct3 = 0b011, .funct7 = null }, - .sw => .{ .opcode = 0b0100011, .funct3 = 0b010, .funct7 = null }, - .sh => .{ .opcode = 0b0100011, .funct3 = 0b001, .funct7 = null }, - .sb => .{ .opcode = 0b0100011, .funct3 = 0b000, .funct7 = null }, + .mul => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000001 } } }, + .mulh => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0000001 } } }, + .mulhsu => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b010, .funct7 = 0b0000001 } } }, + .mulhu => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b011, .funct7 = 0b0000001 } } }, + + .div => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b100, .funct7 = 0b0000001 } } }, + .divu => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0000001 } } }, - .jal => .{ .opcode = 0b1101111, .funct3 = null, .funct7 = null }, + .rem => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b110, .funct7 = 0b0000001 } } }, + .remu => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b111, .funct7 = 0b0000001 } } }, - .beq => .{ .opcode = 0b1100011, .funct3 = 0b000, .funct7 = null }, + .sll => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0000000 } } }, + .srl => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0000000 } } }, + .sra => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0100000 } } }, - .slt => .{ .opcode = 0b0110011, .funct3 = 0b010, .funct7 = 0b0000000 }, - .xor => .{ .opcode = 0b0110011, .funct3 = 0b100, .funct7 = 0b0000000 }, + // OP_IMM + + .addi => .{ .opcode = .OP_IMM, .data = .{ .f = .{ .funct3 = 0b000 } } }, + .andi => .{ .opcode = .OP_IMM, .data = .{ .f = .{ .funct3 = 0b111 } } }, + .xori => .{ .opcode = .OP_IMM, .data = .{ .f = .{ .funct3 = 0b100 } } }, + + .sltiu => .{ .opcode = .OP_IMM, .data = .{ .f = .{ .funct3 = 0b011 } } }, + + .slli => .{ .opcode = .OP_IMM, .data = .{ .sh = .{ .typ = 0b000000, .funct3 = 0b001, .has_5 = true } } }, + .srli => .{ .opcode = .OP_IMM, .data = .{ .sh = .{ .typ = 0b000000, .funct3 = 0b101, .has_5 = true } } }, + .srai => .{ .opcode = .OP_IMM, .data = .{ .sh = .{ .typ = 0b010000, .funct3 = 0b101, .has_5 = true } } }, + + + // OP_IMM_32 + + .slliw => .{ .opcode = .OP_IMM_32, .data = .{ .sh = .{ .typ = 0b000000, .funct3 = 0b001, .has_5 = false } } }, + .srliw => .{ .opcode = .OP_IMM_32, .data = .{ .sh = .{ .typ = 0b000000, .funct3 = 0b101, .has_5 = false } } }, + .sraiw => .{ .opcode = .OP_IMM_32, .data = .{ .sh = .{ .typ = 0b010000, .funct3 = 0b101, .has_5 = false } } }, + + + // OP_32 + + .addw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000000 } } }, + .subw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0100000 } } }, + .mulw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000001 } } }, + + .divw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b100, .funct7 = 0b0000001 } } }, + .divuw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0000001 } } }, + + .remw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b110, .funct7 = 0b0000001 } } }, + .remuw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b111, .funct7 = 0b0000001 } } }, + + .sllw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0000000 } } }, + .srlw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0000000 } } }, + .sraw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0100000 } } }, + + + // OP_FP + + .fadds => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00000, .fmt = .S, .rm = 0b111 } } }, + .faddd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00000, .fmt = .D, .rm = 0b111 } } }, + + .fsubs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00001, .fmt = .S, .rm = 0b111 } } }, + .fsubd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00001, .fmt = .D, .rm = 0b111 } } }, + + .fmuls => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00010, .fmt = .S, .rm = 0b111 } } }, + .fmuld => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00010, .fmt = .D, .rm = 0b111 } } }, + + .fdivs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00011, .fmt = .S, .rm = 0b111 } } }, + .fdivd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00011, .fmt = .D, .rm = 0b111 } } }, + + .fmins => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .S, .rm = 0b000 } } }, + .fmind => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .D, .rm = 0b000 } } }, + + .fmaxs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .S, .rm = 0b001 } } }, + .fmaxd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .D, .rm = 0b001 } } }, + + .fsqrts => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b01011, .fmt = .S, .rm = 0b111 } } }, + .fsqrtd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b01011, .fmt = .D, .rm = 0b111 } } }, + + .fles => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b000 } } }, + .fled => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b000 } } }, + + .flts => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b001 } } }, + .fltd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b001 } } }, + + .feqs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b010 } } }, + .feqd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b010 } } }, + + .fsgnjns => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .S, .rm = 0b000 } } }, + .fsgnjnd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .D, .rm = 0b000 } } }, + + .fsgnjxs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .S, .rm = 0b0010} } }, + .fsgnjxd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .D, .rm = 0b0010} } }, + + + // LOAD + + .lb => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b000 } } }, + .lh => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b001 } } }, + .lw => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b010 } } }, + .ld => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b011 } } }, + .lbu => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b100 } } }, + .lhu => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b101 } } }, + .lwu => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b110 } } }, + + + // STORE + + .sb => .{ .opcode = .STORE, .data = .{ .f = .{ .funct3 = 0b000 } } }, + .sh => .{ .opcode = .STORE, .data = .{ .f = .{ .funct3 = 0b001 } } }, + .sw => .{ .opcode = .STORE, .data = .{ .f = .{ .funct3 = 0b010 } } }, + .sd => .{ .opcode = .STORE, .data = .{ .f = .{ .funct3 = 0b011 } } }, + + + // LOAD_FP + + .flw => .{ .opcode = .LOAD_FP, .data = .{ .f = .{ .funct3 = 0b010 } } }, + .fld => .{ .opcode = .LOAD_FP, .data = .{ .f = .{ .funct3 = 0b011 } } }, + + + // STORE_FP + + .fsw => .{ .opcode = .STORE_FP, .data = .{ .f = .{ .funct3 = 0b010 } } }, + .fsd => .{ .opcode = .STORE_FP, .data = .{ .f = .{ .funct3 = 0b011 } } }, + + + // JALR + + .jalr => .{ .opcode = .JALR, .data = .{ .f = .{ .funct3 = 0b000 } } }, + + + // LUI + + .lui => .{ .opcode = .LUI, .data = .{ .none = {} } }, + + + // AUIPC + + .auipc => .{ .opcode = .AUIPC, .data = .{ .none = {} } }, + + + // JAL + + .jal => .{ .opcode = .JAL, .data = .{ .none = {} } }, + + + // BRANCH + + .beq => .{ .opcode = .BRANCH, .data = .{ .f = .{ .funct3 = 0b000 } } }, + + + // SYSTEM + + .ecall => .{ .opcode = .SYSTEM, .data = .{ .f = .{ .funct3 = 0b000 } } }, + .ebreak => .{ .opcode = .SYSTEM, .data = .{ .f = .{ .funct3 = 0b000 } } }, + + + // NONE + + .unimp => .{ .opcode = .NONE, .data = .{ .f = .{ .funct3 = 0b000 } } }, - .mul => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0000001 }, - .ecall => .{ .opcode = 0b1110011, .funct3 = 0b000, .funct7 = null }, - .ebreak => .{ .opcode = 0b1110011, .funct3 = 0b000, .funct7 = null }, - .unimp => .{ .opcode = 0b0000000, .funct3 = 0b000, .funct7 = null }, // zig fmt: on }; } @@ -102,6 +374,7 @@ pub const Mnemonic = enum { pub const InstEnc = enum { R, + R4, I, S, B, @@ -114,6 +387,19 @@ pub const InstEnc = enum { pub fn fromMnemonic(mnem: Mnemonic) InstEnc { return switch (mnem) { .addi, + .jalr, + .sltiu, + .xori, + .andi, + + .slli, + .srli, + .srai, + + .slliw, + .srliw, + .sraiw, + .ld, .lw, .lwu, @@ -121,22 +407,22 @@ pub const InstEnc = enum { .lhu, .lb, .lbu, - .jalr, - .sltiu, - .xori, - .andi, - .slli, - .srli, - .srai, + + .flw, + .fld, => .I, .lui, + .auipc, => .U, .sd, .sw, .sh, .sb, + + .fsd, + .fsw, => .S, .jal, @@ -147,11 +433,76 @@ pub const InstEnc = enum { .slt, .sltu, - .mul, + + .sll, + .srl, + .sra, + + .sllw, + .srlw, + .sraw, + + .div, + .divu, + .divw, + .divuw, + + .rem, + .remu, + .remw, + .remuw, + .xor, - .add, - .sub, .@"and", + .@"or", + + .add, + .addw, + + .sub, + .subw, + + .mul, + .mulw, + .mulh, + .mulhu, + .mulhsu, + + .fadds, + .faddd, + + .fsubs, + .fsubd, + + .fmuls, + .fmuld, + + .fdivs, + .fdivd, + + .fmins, + .fmind, + + .fmaxs, + .fmaxd, + + .fsqrts, + .fsqrtd, + + .fles, + .fled, + + .flts, + .fltd, + + .feqs, + .feqd, + + .fsgnjns, + .fsgnjnd, + + .fsgnjxs, + .fsgnjxd, => .R, .ecall, @@ -161,16 +512,17 @@ pub const InstEnc = enum { }; } - pub fn opsList(enc: InstEnc) [3]std.meta.FieldEnum(Operand) { + pub fn opsList(enc: InstEnc) [4]std.meta.FieldEnum(Operand) { return switch (enc) { // zig fmt: off - .R => .{ .reg, .reg, .reg, }, - .I => .{ .reg, .reg, .imm, }, - .S => .{ .reg, .reg, .imm, }, - .B => .{ .reg, .reg, .imm, }, - .U => .{ .reg, .imm, .none, }, - .J => .{ .reg, .imm, .none, }, - .system => .{ .none, .none, .none, }, + .R => .{ .reg, .reg, .reg, .none }, + .R4 => .{ .reg, .reg, .reg, .reg }, + .I => .{ .reg, .reg, .imm, .none }, + .S => .{ .reg, .reg, .imm, .none }, + .B => .{ .reg, .reg, .imm, .none }, + .U => .{ .reg, .imm, .none, .none }, + .J => .{ .reg, .imm, .none, .none }, + .system => .{ .none, .none, .none, .none }, // zig fmt: on }; } @@ -185,6 +537,15 @@ pub const Data = union(InstEnc) { rs2: u5, funct7: u7, }, + R4: packed struct { + opcode: u7, + rd: u5, + funct3: u3, + rs1: u5, + rs2: u5, + funct2: u2, + rs3: u5, + }, I: packed struct { opcode: u7, rd: u5, @@ -227,19 +588,21 @@ pub const Data = union(InstEnc) { pub fn toU32(self: Data) u32 { return switch (self) { - .R => |v| @as(u32, @bitCast(v)), - .I => |v| @as(u32, @bitCast(v)), - .S => |v| @as(u32, @bitCast(v)), - .B => |v| @as(u32, @intCast(v.opcode)) + (@as(u32, @intCast(v.imm11)) << 7) + (@as(u32, @intCast(v.imm1_4)) << 8) + (@as(u32, @intCast(v.funct3)) << 12) + (@as(u32, @intCast(v.rs1)) << 15) + (@as(u32, @intCast(v.rs2)) << 20) + (@as(u32, @intCast(v.imm5_10)) << 25) + (@as(u32, @intCast(v.imm12)) << 31), - .U => |v| @as(u32, @bitCast(v)), - .J => |v| @as(u32, @bitCast(v)), + // zig fmt: off + .R => |v| @bitCast(v), + .R4 => |v| @bitCast(v), + .I => |v| @bitCast(v), + .S => |v| @bitCast(v), + .B => |v| @as(u32, @intCast(v.opcode)) + (@as(u32, @intCast(v.imm11)) << 7) + (@as(u32, @intCast(v.imm1_4)) << 8) + (@as(u32, @intCast(v.funct3)) << 12) + (@as(u32, @intCast(v.rs1)) << 15) + (@as(u32, @intCast(v.rs2)) << 20) + (@as(u32, @intCast(v.imm5_10)) << 25) + (@as(u32, @intCast(v.imm12)) << 31), + .U => |v| @bitCast(v), + .J => |v| @bitCast(v), .system => unreachable, + // zig fmt: on }; } pub fn construct(mnem: Mnemonic, ops: []const Operand) !Data { const inst_enc = InstEnc.fromMnemonic(mnem); - const enc = mnem.encoding(); // special mnemonics @@ -251,17 +614,17 @@ pub const Data = union(InstEnc) { assert(ops.len == 0); return .{ .I = .{ - .rd = Register.zero.id(), - .rs1 = Register.zero.id(), + .rd = Register.zero.encodeId(), + .rs1 = Register.zero.encodeId(), .imm0_11 = switch (mnem) { .ecall => 0x000, .ebreak => 0x001, - .unimp => 0, + .unimp => 0x000, else => unreachable, }, - .opcode = enc.opcode, - .funct3 = enc.funct3.?, + .opcode = @intFromEnum(enc.opcode), + .funct3 = enc.data.f.funct3, }, }; }, @@ -272,14 +635,26 @@ pub const Data = union(InstEnc) { .R => { assert(ops.len == 3); return .{ - .R = .{ - .rd = ops[0].reg.id(), - .rs1 = ops[1].reg.id(), - .rs2 = ops[2].reg.id(), + .R = switch (enc.data) { + .ff => |ff| .{ + .rd = ops[0].reg.encodeId(), + .rs1 = ops[1].reg.encodeId(), + .rs2 = ops[2].reg.encodeId(), - .opcode = enc.opcode, - .funct3 = enc.funct3.?, - .funct7 = enc.funct7.?, + .opcode = @intFromEnum(enc.opcode), + .funct3 = ff.funct3, + .funct7 = ff.funct7, + }, + .fmt => |fmt| .{ + .rd = ops[0].reg.encodeId(), + .rs1 = ops[1].reg.encodeId(), + .rs2 = ops[2].reg.encodeId(), + + .opcode = @intFromEnum(enc.opcode), + .funct3 = fmt.rm, + .funct7 = (@as(u7, fmt.funct5) << 2) | @intFromEnum(fmt.fmt), + }, + else => unreachable, }, }; }, @@ -290,25 +665,37 @@ pub const Data = union(InstEnc) { return .{ .S = .{ .imm0_4 = @truncate(umm), - .rs1 = ops[0].reg.id(), - .rs2 = ops[1].reg.id(), + .rs1 = ops[0].reg.encodeId(), + .rs2 = ops[1].reg.encodeId(), .imm5_11 = @truncate(umm >> 5), - .opcode = enc.opcode, - .funct3 = enc.funct3.?, + .opcode = @intFromEnum(enc.opcode), + .funct3 = enc.data.f.funct3, }, }; }, .I => { assert(ops.len == 3); return .{ - .I = .{ - .rd = ops[0].reg.id(), - .rs1 = ops[1].reg.id(), - .imm0_11 = ops[2].imm.asBits(u12) + enc.offset, + .I = switch (enc.data) { + .f => |f| .{ + .rd = ops[0].reg.encodeId(), + .rs1 = ops[1].reg.encodeId(), + .imm0_11 = ops[2].imm.asBits(u12), - .opcode = enc.opcode, - .funct3 = enc.funct3.?, + .opcode = @intFromEnum(enc.opcode), + .funct3 = f.funct3, + }, + .sh => |sh| .{ + .rd = ops[0].reg.encodeId(), + .rs1 = ops[1].reg.encodeId(), + .imm0_11 = (@as(u12, sh.typ) << 6) | + if (sh.has_5) ops[2].imm.asBits(u6) else (@as(u6, 0) | ops[2].imm.asBits(u5)), + + .opcode = @intFromEnum(enc.opcode), + .funct3 = sh.funct3, + }, + else => unreachable, }, }; }, @@ -316,10 +703,10 @@ pub const Data = union(InstEnc) { assert(ops.len == 2); return .{ .U = .{ - .rd = ops[0].reg.id(), + .rd = ops[0].reg.encodeId(), .imm12_31 = ops[1].imm.asBits(u20), - .opcode = enc.opcode, + .opcode = @intFromEnum(enc.opcode), }, }; }, @@ -331,13 +718,13 @@ pub const Data = union(InstEnc) { return .{ .J = .{ - .rd = ops[0].reg.id(), + .rd = ops[0].reg.encodeId(), .imm1_10 = @truncate(umm >> 1), .imm11 = @truncate(umm >> 11), .imm12_19 = @truncate(umm >> 12), .imm20 = @truncate(umm >> 20), - .opcode = enc.opcode, + .opcode = @intFromEnum(enc.opcode), }, }; }, @@ -349,15 +736,15 @@ pub const Data = union(InstEnc) { return .{ .B = .{ - .rs1 = ops[0].reg.id(), - .rs2 = ops[1].reg.id(), + .rs1 = ops[0].reg.encodeId(), + .rs2 = ops[1].reg.encodeId(), .imm1_4 = @truncate(umm >> 1), .imm5_10 = @truncate(umm >> 5), .imm11 = @truncate(umm >> 11), .imm12 = @truncate(umm >> 12), - .opcode = enc.opcode, - .funct3 = enc.funct3.?, + .opcode = @intFromEnum(enc.opcode), + .funct3 = enc.data.f.funct3, }, }; }, @@ -376,13 +763,6 @@ pub fn findByMnemonic(mnem: Mnemonic, ops: []const Operand) !?Encoding { }; } -const Enc = struct { - opcode: u7, - funct3: ?u3, - funct7: ?u7, - offset: u12 = 0, -}; - fn verifyOps(mnem: Mnemonic, ops: []const Operand) bool { const inst_enc = InstEnc.fromMnemonic(mnem); const list = std.mem.sliceTo(&inst_enc.opsList(), .none); diff --git a/src/arch/riscv64/Lower.zig b/src/arch/riscv64/Lower.zig index 4b77f9cdee..247cf64647 100644 --- a/src/arch/riscv64/Lower.zig +++ b/src/arch/riscv64/Lower.zig @@ -14,7 +14,7 @@ result_relocs_len: u8 = undefined, result_insts: [ @max( 1, // non-pseudo instruction - abi.callee_preserved_regs.len, // spill / restore regs, + abi.Registers.all_preserved.len, // spill / restore regs, ) ]Instruction = undefined, result_relocs: [1]Reloc = undefined, @@ -32,8 +32,10 @@ pub const Reloc = struct { const Target = union(enum) { inst: Mir.Inst.Index, - /// Relocs the lowered_inst_index and the next one. + /// Relocs the lowered_inst_index and the next instruction. load_symbol_reloc: bits.Symbol, + /// Relocs the lowered_inst_index and the next instruction. + call_extern_fn_reloc: bits.Symbol, }; }; @@ -42,6 +44,8 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { insts: []const Instruction, relocs: []const Reloc, } { + const zcu = lower.bin_file.comp.module.?; + lower.result_insts = undefined; lower.result_relocs = undefined; errdefer lower.result_insts = undefined; @@ -69,11 +73,25 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { switch (inst.ops) { .pseudo_load_rm => { - const tag: Encoding.Mnemonic = switch (rm.m.mod.rm.size) { - .byte => .lb, - .hword => .lh, - .word => .lw, - .dword => .ld, + const dest_reg = rm.r; + const dest_reg_class = dest_reg.class(); + const float = dest_reg_class == .float; + + const src_size = rm.m.mod.size; + const unsigned = rm.m.mod.unsigned; + + const tag: Encoding.Mnemonic = if (!float) + switch (src_size) { + .byte => if (unsigned) .lbu else .lb, + .hword => if (unsigned) .lhu else .lh, + .word => if (unsigned) .lwu else .lw, + .dword => .ld, + } + else switch (src_size) { + .byte => unreachable, // Zig does not support 8-bit floats + .hword => return lower.fail("TODO: lowerMir pseudo_load_rm support 16-bit floats", .{}), + .word => .flw, + .dword => .fld, }; try lower.emit(tag, &.{ @@ -83,11 +101,25 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }); }, .pseudo_store_rm => { - const tag: Encoding.Mnemonic = switch (rm.m.mod.rm.size) { - .byte => .sb, - .hword => .sh, - .word => .sw, - .dword => .sd, + const src_reg = rm.r; + const src_reg_class = src_reg.class(); + const float = src_reg_class == .float; + + // TODO: do we actually need this? are all stores not usize? + const dest_size = rm.m.mod.size; + + const tag: Encoding.Mnemonic = if (!float) + switch (dest_size) { + .byte => .sb, + .hword => .sh, + .word => .sw, + .dword => .sd, + } + else switch (dest_size) { + .byte => unreachable, // Zig does not support 8-bit floats + .hword => return lower.fail("TODO: lowerMir pseudo_load_rm support 16-bit floats", .{}), + .word => .fsw, + .dword => .fsd, }; try lower.emit(tag, &.{ @@ -103,11 +135,27 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .pseudo_mv => { const rr = inst.data.rr; - try lower.emit(.addi, &.{ - .{ .reg = rr.rd }, - .{ .reg = rr.rs }, - .{ .imm = Immediate.s(0) }, - }); + const dst_class = rr.rd.class(); + const src_class = rr.rs.class(); + + assert(dst_class == src_class); + + switch (dst_class) { + .float => { + try lower.emit(if (lower.hasFeature(.d)) .fsgnjnd else .fsgnjns, &.{ + .{ .reg = rr.rd }, + .{ .reg = rr.rs }, + .{ .reg = rr.rs }, + }); + }, + .int => { + try lower.emit(.addi, &.{ + .{ .reg = rr.rd }, + .{ .reg = rr.rs }, + .{ .imm = Immediate.s(0) }, + }); + }, + } }, .pseudo_ret => { @@ -131,25 +179,31 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .pseudo_load_symbol => { const payload = inst.data.payload; const data = lower.mir.extraData(Mir.LoadSymbolPayload, payload).data; + const dst_reg: bits.Register = @enumFromInt(data.register); + assert(dst_reg.class() == .int); try lower.emit(.lui, &.{ - .{ .reg = @enumFromInt(data.register) }, - .{ .imm = lower.reloc(.{ .load_symbol_reloc = .{ - .atom_index = data.atom_index, - .sym_index = data.sym_index, - } }) }, + .{ .reg = dst_reg }, + .{ .imm = lower.reloc(.{ + .load_symbol_reloc = .{ + .atom_index = data.atom_index, + .sym_index = data.sym_index, + }, + }) }, }); // the above reloc implies this one try lower.emit(.addi, &.{ - .{ .reg = @enumFromInt(data.register) }, - .{ .reg = @enumFromInt(data.register) }, + .{ .reg = dst_reg }, + .{ .reg = dst_reg }, .{ .imm = Immediate.s(0) }, }); }, .pseudo_lea_rm => { const rm = inst.data.rm; + assert(rm.r.class() == .int); + const frame = rm.m.toFrameLoc(lower.mir); try lower.emit(.addi, &.{ @@ -159,6 +213,26 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }); }, + .pseudo_fabs => { + const fabs = inst.data.fabs; + assert(fabs.rs.class() == .float and fabs.rd.class() == .float); + + const mnem: Encoding.Mnemonic = switch (fabs.bits) { + 16 => return lower.fail("TODO: airAbs Float 16", .{}), + 32 => .fsgnjxs, + 64 => .fsgnjxd, + 80 => return lower.fail("TODO: airAbs Float 80", .{}), + 128 => return lower.fail("TODO: airAbs Float 128", .{}), + else => unreachable, + }; + + try lower.emit(mnem, &.{ + .{ .reg = fabs.rs }, + .{ .reg = fabs.rd }, + .{ .reg = fabs.rd }, + }); + }, + .pseudo_compare => { const compare = inst.data.compare; const op = compare.op; @@ -167,78 +241,142 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { const rs1 = compare.rs1; const rs2 = compare.rs2; - switch (op) { - .eq => { - try lower.emit(.xor, &.{ - .{ .reg = rd }, - .{ .reg = rs1 }, - .{ .reg = rs2 }, - }); + const class = rs1.class(); + const ty = compare.ty; + const size = std.math.ceilPowerOfTwo(u64, ty.bitSize(zcu)) catch { + return lower.fail("pseudo_compare size {}", .{ty.bitSize(zcu)}); + }; - try lower.emit(.sltiu, &.{ - .{ .reg = rd }, - .{ .reg = rd }, - .{ .imm = Immediate.s(1) }, - }); - }, - .neq => { - try lower.emit(.xor, &.{ - .{ .reg = rd }, - .{ .reg = rs1 }, - .{ .reg = rs2 }, - }); + const is_unsigned = ty.isUnsignedInt(zcu); - try lower.emit(.sltu, &.{ - .{ .reg = rd }, - .{ .reg = .zero }, - .{ .reg = rd }, - }); - }, - .gt => { - try lower.emit(.sltu, &.{ - .{ .reg = rd }, - .{ .reg = rs1 }, - .{ .reg = rs2 }, - }); - }, - .gte => { - try lower.emit(.sltu, &.{ - .{ .reg = rd }, - .{ .reg = rs1 }, - .{ .reg = rs2 }, - }); + const less_than: Encoding.Mnemonic = if (is_unsigned) .sltu else .slt; - try lower.emit(.xori, &.{ - .{ .reg = rd }, - .{ .reg = rd }, - .{ .imm = Immediate.s(1) }, - }); - }, - .lt => { - try lower.emit(.slt, &.{ - .{ .reg = rd }, - .{ .reg = rs1 }, - .{ .reg = rs2 }, - }); - }, - .lte => { - try lower.emit(.slt, &.{ - .{ .reg = rd }, - .{ .reg = rs2 }, - .{ .reg = rs1 }, - }); + switch (class) { + .int => switch (op) { + .eq => { + try lower.emit(.xor, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); - try lower.emit(.xori, &.{ - .{ .reg = rd }, - .{ .reg = rd }, - .{ .imm = Immediate.s(1) }, - }); + try lower.emit(.sltiu, &.{ + .{ .reg = rd }, + .{ .reg = rd }, + .{ .imm = Immediate.s(1) }, + }); + }, + .neq => { + try lower.emit(.xor, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + + try lower.emit(.sltu, &.{ + .{ .reg = rd }, + .{ .reg = .zero }, + .{ .reg = rd }, + }); + }, + .gt => { + try lower.emit(less_than, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + }, + .gte => { + try lower.emit(less_than, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + + try lower.emit(.xori, &.{ + .{ .reg = rd }, + .{ .reg = rd }, + .{ .imm = Immediate.s(1) }, + }); + }, + .lt => { + try lower.emit(less_than, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + }, + .lte => { + try lower.emit(less_than, &.{ + .{ .reg = rd }, + .{ .reg = rs2 }, + .{ .reg = rs1 }, + }); + + try lower.emit(.xori, &.{ + .{ .reg = rd }, + .{ .reg = rd }, + .{ .imm = Immediate.s(1) }, + }); + }, + }, + .float => switch (op) { + // eq + .eq => { + try lower.emit(if (size == 64) .feqd else .feqs, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + }, + // !(eq) + .neq => { + try lower.emit(if (size == 64) .feqd else .feqs, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + try lower.emit(.xori, &.{ + .{ .reg = rd }, + .{ .reg = rd }, + .{ .imm = Immediate.s(1) }, + }); + }, + .lt => { + try lower.emit(if (size == 64) .fltd else .flts, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + }, + .lte => { + try lower.emit(if (size == 64) .fled else .fles, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + }, + .gt => { + try lower.emit(if (size == 64) .fltd else .flts, &.{ + .{ .reg = rd }, + .{ .reg = rs2 }, + .{ .reg = rs1 }, + }); + }, + .gte => { + try lower.emit(if (size == 64) .fled else .fles, &.{ + .{ .reg = rd }, + .{ .reg = rs2 }, + .{ .reg = rs1 }, + }); + }, }, } }, .pseudo_not => { const rr = inst.data.rr; + assert(rr.rs.class() == .int and rr.rd.class() == .int); try lower.emit(.xori, &.{ .{ .reg = rr.rd }, @@ -247,6 +385,26 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }); }, + .pseudo_extern_fn_reloc => { + const inst_reloc = inst.data.reloc; + + try lower.emit(.auipc, &.{ + .{ .reg = .ra }, + .{ .imm = lower.reloc( + .{ .call_extern_fn_reloc = .{ + .atom_index = inst_reloc.atom_index, + .sym_index = inst_reloc.sym_index, + } }, + ) }, + }); + + try lower.emit(.jalr, &.{ + .{ .reg = .ra }, + .{ .reg = .ra }, + .{ .imm = Immediate.s(0) }, + }); + }, + else => return lower.fail("TODO lower: psuedo {s}", .{@tagName(inst.ops)}), }, } @@ -314,16 +472,19 @@ fn pushPopRegList(lower: *Lower, comptime spilling: bool, reg_list: Mir.Register var reg_i: u31 = 0; while (it.next()) |i| { const frame = lower.mir.frame_locs.get(@intFromEnum(bits.FrameIndex.spill_frame)); + const reg = abi.Registers.all_preserved[i]; + const reg_class = reg.class(); + const is_float_reg = reg_class == .float; if (spilling) { - try lower.emit(.sd, &.{ + try lower.emit(if (is_float_reg) .fsd else .sd, &.{ .{ .reg = frame.base }, - .{ .reg = abi.callee_preserved_regs[i] }, + .{ .reg = abi.Registers.all_preserved[i] }, .{ .imm = Immediate.s(frame.disp + reg_i) }, }); } else { - try lower.emit(.ld, &.{ - .{ .reg = abi.callee_preserved_regs[i] }, + try lower.emit(if (is_float_reg) .fld else .ld, &.{ + .{ .reg = abi.Registers.all_preserved[i] }, .{ .reg = frame.base }, .{ .imm = Immediate.s(frame.disp + reg_i) }, }); @@ -340,6 +501,12 @@ pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error { return error.LowerFail; } +fn hasFeature(lower: *Lower, feature: std.Target.riscv.Feature) bool { + const target = lower.bin_file.comp.module.?.getTarget(); + const features = target.cpu.features; + return std.Target.riscv.featureSetHas(features, feature); +} + const Lower = @This(); const abi = @import("abi.zig"); diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 0ce2185197..0753b142b1 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -20,90 +20,119 @@ pub const Inst = struct { pub const Index = u32; pub const Tag = enum(u16) { - /// Add immediate. Uses i_type payload. - addi, - /// Add immediate and produce a sign-extended result. - /// - /// Uses i-type payload. + // base extension + addi, addiw, jalr, lui, - mv, @"and", + andi, + xor, + @"or", ebreak, ecall, unimp, - /// OR instruction. Uses r_type payload. - @"or", - - /// Addition add, - /// Subtraction + addw, sub, - /// Multiply, uses r_type. Needs the M extension. - mul, - - /// Absolute Value, uses i_type payload. - abs, + subw, sltu, slt, - /// Immediate Logical Right Shift, uses i_type payload - srli, - /// Immediate Logical Left Shift, uses i_type payload slli, - /// Immediate Arithmetic Right Shift, uses i_type payload. + srli, srai, - /// Register Logical Left Shift, uses r_type payload - sllw, - /// Register Logical Right Shit, uses r_type payload - srlw, - /// Jumps, but stores the address of the instruction following the - /// jump in `rd`. - /// - /// Uses j_type payload. + slliw, + srliw, + sraiw, + + sll, + srl, + sra, + + sllw, + srlw, + sraw, + jal, - /// Immediate AND, uses i_type payload - andi, - - /// Branch if equal, Uses b_type beq, - /// Branch if not equal, Uses b_type bne, - /// Boolean NOT, Uses rr payload - not, - - /// Generates a NO-OP, uses nop payload nop, - /// Load double (64 bits), uses i_type payload ld, - /// Load word (32 bits), uses i_type payload lw, - /// Load half (16 bits), uses i_type payload lh, - /// Load byte (8 bits), uses i_type payload lb, - /// Store double (64 bits), uses s_type payload sd, - /// Store word (32 bits), uses s_type payload sw, - /// Store half (16 bits), uses s_type payload sh, - /// Store byte (8 bits), uses s_type payload sb, + // M extension + mul, + mulw, + + div, + divu, + divw, + divuw, + + rem, + remu, + remw, + remuw, + + // F extension (32-bit float) + fadds, + fsubs, + fmuls, + fdivs, + + fabss, + + fmins, + fmaxs, + + fsqrts, + + flw, + fsw, + + feqs, + flts, + fles, + + // D extension (64-bit float) + faddd, + fsubd, + fmuld, + fdivd, + + fabsd, + + fmind, + fmaxd, + + fsqrtd, + + fld, + fsd, + + feqd, + fltd, + fled, + /// A pseudo-instruction. Used for anything that isn't 1:1 with an /// assembly instruction. pseudo, @@ -192,6 +221,12 @@ pub const Inst = struct { rs: Register, }, + fabs: struct { + rd: Register, + rs: Register, + bits: u16, + }, + compare: struct { rd: Register, rs1: Register, @@ -204,6 +239,12 @@ pub const Inst = struct { lt, lte, }, + ty: Type, + }, + + reloc: struct { + atom_index: u32, + sym_index: u32, }, }; @@ -217,10 +258,7 @@ pub const Inst = struct { /// Two registers + immediate, uses the i_type payload. rri, - /// Two registers + Two Immediates - rrii, - - /// Two registers + another instruction. + //extern_fn_reloc/ Two registers + another instruction. rr_inst, /// Register + Memory @@ -268,6 +306,9 @@ pub const Inst = struct { /// Jumps. Uses `inst` payload. pseudo_j, + /// Floating point absolute value. + pseudo_fabs, + /// Dead inst, ignored by the emitter. pseudo_dead, @@ -286,6 +327,9 @@ pub const Inst = struct { pseudo_compare, pseudo_not, + + /// Generates an auipc + jalr pair, with a R_RISCV_CALL_PLT reloc + pseudo_extern_fn_reloc, }; // Make sure we don't accidentally make instructions bigger than expected. @@ -387,6 +431,8 @@ pub const RegisterList = struct { const Mir = @This(); const std = @import("std"); const builtin = @import("builtin"); +const Type = @import("../../type.zig").Type; + const assert = std.debug.assert; const bits = @import("bits.zig"); diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index 35f5659685..a5b54f0a1b 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -7,7 +7,7 @@ const InternPool = @import("../../InternPool.zig"); const Module = @import("../../Module.zig"); const assert = std.debug.assert; -pub const Class = enum { memory, byval, integer, double_integer, fields, none }; +pub const Class = enum { memory, byval, integer, double_integer, fields }; pub fn classifyType(ty: Type, mod: *Module) Class { const target = mod.getTarget(); @@ -93,11 +93,13 @@ pub fn classifyType(ty: Type, mod: *Module) Class { } } +pub const SystemClass = enum { integer, float, memory, none }; + /// There are a maximum of 8 possible return slots. Returned values are in /// the beginning of the array; unused slots are filled with .none. -pub fn classifySystem(ty: Type, zcu: *Module) [8]Class { - var result = [1]Class{.none} ** 8; - const memory_class = [_]Class{ +pub fn classifySystem(ty: Type, zcu: *Module) [8]SystemClass { + var result = [1]SystemClass{.none} ** 8; + const memory_class = [_]SystemClass{ .memory, .none, .none, .none, .none, .none, .none, .none, }; @@ -123,6 +125,7 @@ pub fn classifySystem(ty: Type, zcu: *Module) [8]Class { return result; } result[0] = .integer; + if (ty.optionalChild(zcu).abiSize(zcu) == 0) return result; result[1] = .integer; return result; }, @@ -139,6 +142,18 @@ pub fn classifySystem(ty: Type, zcu: *Module) [8]Class { } unreachable; // support > 128 bit int arguments }, + .Float => { + const target = zcu.getTarget(); + const features = target.cpu.features; + + const float_bits = ty.floatBits(zcu.getTarget()); + const float_reg_size: u32 = if (std.Target.riscv.featureSetHas(features, .d)) 64 else 32; + if (float_bits <= float_reg_size) { + result[0] = .float; + return result; + } + unreachable; // support split float args + }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(zcu); const payload_bits = payload_ty.bitSize(zcu); @@ -149,12 +164,7 @@ pub fn classifySystem(ty: Type, zcu: *Module) [8]Class { // anyerror!void can fit into one register if (payload_bits == 0) return result; - if (payload_bits <= 64) { - result[1] = .integer; - return result; - } - - std.debug.panic("TODO: classifySystem ErrorUnion > 64 bit payload", .{}); + return memory_class; }, .Struct => { const layout = ty.containerLayout(zcu); @@ -169,6 +179,19 @@ pub fn classifySystem(ty: Type, zcu: *Module) [8]Class { return memory_class; }, + .Array => { + const ty_size = ty.abiSize(zcu); + if (ty_size <= 8) { + result[0] = .integer; + return result; + } + if (ty_size <= 16) { + result[0] = .integer; + result[1] = .integer; + return result; + } + return memory_class; + }, else => |bad_ty| std.debug.panic("classifySystem {s}", .{@tagName(bad_ty)}), } } @@ -230,62 +253,81 @@ fn classifyStruct( } } -pub const callee_preserved_regs = [_]Register{ - // .s0 is ommited to be used as a frame pointer - .s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11, -}; - -pub const function_arg_regs = [_]Register{ - .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7, -}; - -pub const function_ret_regs = [_]Register{ - .a0, .a1, -}; - -pub const temporary_regs = [_]Register{ - .t0, .t1, .t2, .t3, .t4, .t5, .t6, -}; - -const allocatable_registers = callee_preserved_regs ++ function_arg_regs ++ temporary_regs; +const allocatable_registers = Registers.Integer.all_regs ++ Registers.Float.all_regs; pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, &allocatable_registers); // Register classes const RegisterBitSet = RegisterManager.RegisterBitSet; -pub const RegisterClass = struct { - pub const gp: RegisterBitSet = blk: { - var set = RegisterBitSet.initEmpty(); - set.setRangeValue(.{ - .start = 0, - .end = callee_preserved_regs.len, - }, true); - break :blk set; + +pub const RegisterClass = enum { + int, + float, +}; + +pub const Registers = struct { + pub const all_preserved = Integer.callee_preserved_regs ++ Float.callee_preserved_regs; + + pub const Integer = struct { + // zig fmt: off + pub const general_purpose = initRegBitSet(0, callee_preserved_regs.len); + pub const function_arg = initRegBitSet(callee_preserved_regs.len, function_arg_regs.len); + pub const function_ret = initRegBitSet(callee_preserved_regs.len, function_ret_regs.len); + pub const temporary = initRegBitSet(callee_preserved_regs.len + function_arg_regs.len, temporary_regs.len); + // zig fmt: on + + pub const callee_preserved_regs = [_]Register{ + // .s0 is omitted to be used as the frame pointer register + .s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11, + }; + + pub const function_arg_regs = [_]Register{ + .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7, + }; + + pub const function_ret_regs = [_]Register{ + .a0, .a1, + }; + + pub const temporary_regs = [_]Register{ + .t0, .t1, .t2, .t3, .t4, .t5, .t6, + }; + + pub const all_regs = callee_preserved_regs ++ function_arg_regs ++ temporary_regs; }; - pub const fa: RegisterBitSet = blk: { - var set = RegisterBitSet.initEmpty(); - set.setRangeValue(.{ - .start = callee_preserved_regs.len, - .end = callee_preserved_regs.len + function_arg_regs.len, - }, true); - break :blk set; - }; + pub const Float = struct { + // zig fmt: off + pub const general_purpose = initRegBitSet(Integer.all_regs.len, callee_preserved_regs.len); + pub const function_arg = initRegBitSet(Integer.all_regs.len + callee_preserved_regs.len, function_arg_regs.len); + pub const function_ret = initRegBitSet(Integer.all_regs.len + callee_preserved_regs.len, function_ret_regs.len); + pub const temporary = initRegBitSet(Integer.all_regs.len + callee_preserved_regs.len + function_arg_regs.len, temporary_regs.len); + // zig fmt: on - pub const fr: RegisterBitSet = blk: { - var set = RegisterBitSet.initEmpty(); - set.setRangeValue(.{ - .start = callee_preserved_regs.len, - .end = callee_preserved_regs.len + function_ret_regs.len, - }, true); - break :blk set; - }; + pub const callee_preserved_regs = [_]Register{ + .fs0, .fs1, .fs2, .fs3, .fs4, .fs5, .fs6, .fs7, .fs8, .fs9, .fs10, .fs11, + }; - pub const tp: RegisterBitSet = blk: { - var set = RegisterBitSet.initEmpty(); - set.setRangeValue(.{ - .start = callee_preserved_regs.len + function_arg_regs.len, - .end = callee_preserved_regs.len + function_arg_regs.len + temporary_regs.len, - }, true); - break :blk set; + pub const function_arg_regs = [_]Register{ + .fa0, .fa1, .fa2, .fa3, .fa4, .fa5, .fa6, .fa7, + }; + + pub const function_ret_regs = [_]Register{ + .fa0, .fa1, + }; + + pub const temporary_regs = [_]Register{ + .ft0, .ft1, .ft2, .ft3, .ft4, .ft5, .ft6, .ft7, .ft8, .ft9, .ft10, .ft11, + }; + + pub const all_regs = callee_preserved_regs ++ function_arg_regs ++ temporary_regs; }; }; + +fn initRegBitSet(start: usize, length: usize) RegisterBitSet { + var set = RegisterBitSet.initEmpty(); + set.setRangeValue(.{ + .start = start, + .end = start + length, + }, true); + return set; +} diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig index eef0828cdb..cb398ef620 100644 --- a/src/arch/riscv64/bits.zig +++ b/src/arch/riscv64/bits.zig @@ -2,8 +2,12 @@ const std = @import("std"); const DW = std.dwarf; const assert = std.debug.assert; const testing = std.testing; +const Target = std.Target; + +const Module = @import("../../Module.zig"); const Encoding = @import("Encoding.zig"); const Mir = @import("Mir.zig"); +const abi = @import("abi.zig"); pub const Memory = struct { base: Base, @@ -15,12 +19,10 @@ pub const Memory = struct { reloc: Symbol, }; - pub const Mod = union(enum(u1)) { - rm: struct { - size: Size, - disp: i32 = 0, - }, - off: i32, + pub const Mod = struct { + size: Size, + unsigned: bool, + disp: i32 = 0, }; pub const Size = enum(u4) { @@ -35,10 +37,10 @@ pub const Memory = struct { pub fn fromByteSize(size: u64) Size { return switch (size) { - 1 => .byte, - 2 => .hword, - 4 => .word, - 8 => .dword, + 1...1 => .byte, + 2...2 => .hword, + 3...4 => .word, + 5...8 => .dword, else => unreachable, }; } @@ -65,10 +67,7 @@ pub const Memory = struct { /// Asserts `mem` can be represented as a `FrameLoc`. pub fn toFrameLoc(mem: Memory, mir: Mir) Mir.FrameLoc { - const offset: i32 = switch (mem.mod) { - .off => |off| off, - .rm => |rm| rm.disp, - }; + const offset: i32 = mem.mod.disp; switch (mem.base) { .reg => |reg| { @@ -91,7 +90,7 @@ pub const Memory = struct { pub const Immediate = union(enum) { signed: i32, - unsigned: u32, + unsigned: u64, pub fn u(x: u64) Immediate { return .{ .unsigned = x }; @@ -119,24 +118,6 @@ pub const Immediate = union(enum) { }; } - pub fn asUnsigned(imm: Immediate, bit_size: u64) u64 { - return switch (imm) { - .signed => |x| switch (bit_size) { - 1, 8 => @as(u8, @bitCast(@as(i8, @intCast(x)))), - 16 => @as(u16, @bitCast(@as(i16, @intCast(x)))), - 32, 64 => @as(u32, @bitCast(x)), - else => unreachable, - }, - .unsigned => |x| switch (bit_size) { - 1, 8 => @as(u8, @intCast(x)), - 16 => @as(u16, @intCast(x)), - 32 => @as(u32, @intCast(x)), - 64 => x, - else => unreachable, - }, - }; - } - pub fn asBits(imm: Immediate, comptime T: type) T { const int_info = @typeInfo(T).Int; if (int_info.signedness != .unsigned) @compileError("Immediate.asBits needs unsigned T"); @@ -147,12 +128,10 @@ pub const Immediate = union(enum) { } }; -pub const Register = enum(u6) { +pub const Register = enum(u8) { // zig fmt: off - x0, x1, x2, x3, x4, x5, x6, x7, - x8, x9, x10, x11, x12, x13, x14, x15, - x16, x17, x18, x19, x20, x21, x22, x23, - x24, x25, x26, x27, x28, x29, x30, x31, + + // base extension registers zero, // zero ra, // return address. caller saved @@ -166,17 +145,82 @@ pub const Register = enum(u6) { a2, a3, a4, a5, a6, a7, // fn args. caller saved. s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, // saved registers. callee saved. t3, t4, t5, t6, // caller saved + + x0, x1, x2, x3, x4, x5, x6, x7, + x8, x9, x10, x11, x12, x13, x14, x15, + x16, x17, x18, x19, x20, x21, x22, x23, + x24, x25, x26, x27, x28, x29, x30, x31, + + + // F extension registers + + ft0, ft1, ft2, ft3, ft4, ft5, ft6, ft7, // float temporaries. caller saved. + fs0, fs1, // float saved. callee saved. + fa0, fa1, // float arg/ret. caller saved. + fa2, fa3, fa4, fa5, fa6, fa7, // float arg. called saved. + fs2, fs3, fs4, fs5, fs6, fs7, fs8, fs9, fs10, fs11, // float saved. callee saved. + ft8, ft9, ft10, ft11, // foat temporaries. calller saved. + + // this register is accessed only through API instructions instead of directly + // fcsr, + + f0, f1, f2, f3, f4, f5, f6, f7, + f8, f9, f10, f11, f12, f13, f14, f15, + f16, f17, f18, f19, f20, f21, f22, f23, + f24, f25, f26, f27, f28, f29, f30, f31, + // zig fmt: on - /// Returns the unique 5-bit ID of this register which is used in - /// the machine code - pub fn id(self: Register) u5 { - return @as(u5, @truncate(@intFromEnum(self))); + /// in RISC-V registers are stored as 5 bit IDs and a register can have + /// two names. Example being `zero` and `x0` are the same register and have the + /// same ID, but are two different entries in the enum. We store floating point + /// registers in the same enum. RISC-V uses the same IDs for `f0` and `x0` by + /// infering which register is being talked about given the instruction it's in. + /// + /// The goal of this function is to return the same ID for `zero` and `x0` but two + /// seperate IDs for `x0` and `f0`. We will assume that each register set has 32 registers + /// and is repeated twice, once for the named version, once for the number version. + pub fn id(reg: Register) u7 { + const base = switch (@intFromEnum(reg)) { + // zig fmt: off + @intFromEnum(Register.zero) ... @intFromEnum(Register.x31) => @intFromEnum(Register.zero), + @intFromEnum(Register.ft0) ... @intFromEnum(Register.f31) => @intFromEnum(Register.ft0), + else => unreachable, + // zig fmt: on + }; + + return @intCast(base + reg.encodeId()); + } + + pub fn encodeId(reg: Register) u5 { + return @truncate(@intFromEnum(reg)); } pub fn dwarfLocOp(reg: Register) u8 { return @as(u8, reg.id()); } + + pub fn bitSize(reg: Register, zcu: *const Module) u32 { + const features = zcu.getTarget().cpu.features; + + return switch (@intFromEnum(reg)) { + // zig fmt: off + @intFromEnum(Register.zero) ... @intFromEnum(Register.x31) => 64, + @intFromEnum(Register.ft0) ... @intFromEnum(Register.f31) => if (Target.riscv.featureSetHas(features, .d)) 64 else 32, + else => unreachable, + // zig fmt: on + }; + } + + pub fn class(reg: Register) abi.RegisterClass { + return switch (@intFromEnum(reg)) { + // zig fmt: off + @intFromEnum(Register.zero) ... @intFromEnum(Register.x31) => .int, + @intFromEnum(Register.ft0) ... @intFromEnum(Register.f31) => .float, + else => unreachable, + // zig fmt: on + }; + } }; pub const FrameIndex = enum(u32) { diff --git a/src/arch/riscv64/encoder.zig b/src/arch/riscv64/encoder.zig index ddd4f5f437..54d1549ebe 100644 --- a/src/arch/riscv64/encoder.zig +++ b/src/arch/riscv64/encoder.zig @@ -11,12 +11,11 @@ pub const Instruction = struct { pub fn new(mnemonic: Encoding.Mnemonic, ops: []const Operand) !Instruction { const encoding = (try Encoding.findByMnemonic(mnemonic, ops)) orelse { - std.log.err("no encoding found for: {s} {s} {s} {s} {s}", .{ + std.log.err("no encoding found for: {s} [{s} {s} {s}]", .{ @tagName(mnemonic), @tagName(if (ops.len > 0) ops[0] else .none), @tagName(if (ops.len > 1) ops[1] else .none), @tagName(if (ops.len > 2) ops[2] else .none), - @tagName(if (ops.len > 3) ops[3] else .none), }); return error.InvalidInstruction; }; @@ -33,6 +32,31 @@ pub const Instruction = struct { pub fn encode(inst: Instruction, writer: anytype) !void { try writer.writeInt(u32, inst.encoding.data.toU32(), .little); } + + pub fn format( + inst: Instruction, + comptime fmt: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) !void { + std.debug.assert(fmt.len == 0); + + const encoding = inst.encoding; + + try writer.print("{s} ", .{@tagName(encoding.mnemonic)}); + + var i: u32 = 0; + while (i < inst.ops.len and inst.ops[i] != .none) : (i += 1) { + if (i != inst.ops.len and i != 0) try writer.writeAll(", "); + + switch (@as(Instruction.Operand, inst.ops[i])) { + .none => unreachable, // it's sliced out above + .reg => |reg| try writer.writeAll(@tagName(reg)), + .imm => |imm| try writer.print("{d}", .{imm.asSigned(64)}), + .mem => unreachable, // there is no "mem" operand in the actual instructions + } + } + } }; const std = @import("std"); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 4f366052f2..d9818887e5 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -11151,7 +11151,6 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu } return o.builder.structType(.normal, types[0..types_len]); }, - .none => unreachable, } }, // TODO investigate C ABI for other architectures @@ -11409,7 +11408,6 @@ const ParamTypeIterator = struct { it.llvm_index += it.types_len - 1; return .multiple_llvm_types; }, - .none => unreachable, } }, // TODO investigate C ABI for other architectures diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 491e137b60..75d41a2813 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -5842,7 +5842,8 @@ pub fn tpAddress(self: *Elf) i64 { const addr = switch (self.getTarget().cpu.arch) { .x86_64 => mem.alignForward(u64, phdr.p_vaddr + phdr.p_memsz, phdr.p_align), .aarch64 => mem.alignBackward(u64, phdr.p_vaddr - 16, phdr.p_align), - else => @panic("TODO implement getTpAddress for this arch"), + .riscv64 => phdr.p_vaddr, + else => |arch| std.debug.panic("TODO implement getTpAddress for {s}", .{@tagName(arch)}), }; return @intCast(addr); } diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig index 239186ffaa..1c303980c3 100644 --- a/src/link/Elf/Atom.zig +++ b/src/link/Elf/Atom.zig @@ -1409,11 +1409,11 @@ const x86_64 = struct { .GOTPC64 => try cwriter.writeInt(i64, GOT + A, .little), .SIZE32 => { const size = @as(i64, @intCast(target.elfSym(elf_file).st_size)); - try cwriter.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(size + A)))), .little); + try cwriter.writeInt(u32, @bitCast(@as(i32, @intCast(size + A))), .little); }, .SIZE64 => { const size = @as(i64, @intCast(target.elfSym(elf_file).st_size)); - try cwriter.writeInt(i64, @as(i64, @intCast(size + A)), .little); + try cwriter.writeInt(i64, @intCast(size + A), .little); }, else => try atom.reportUnhandledRelocError(rel, elf_file), } @@ -2001,26 +2001,25 @@ const riscv = struct { const r_type: elf.R_RISCV = @enumFromInt(rel.r_type()); switch (r_type) { - .@"64" => { - try atom.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file); - }, - - .HI20 => { - try atom.scanReloc(symbol, rel, absRelocAction(symbol, elf_file), elf_file); - }, + .@"32" => try atom.scanReloc(symbol, rel, absRelocAction(symbol, elf_file), elf_file), + .@"64" => try atom.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file), + .HI20 => try atom.scanReloc(symbol, rel, absRelocAction(symbol, elf_file), elf_file), .CALL_PLT => if (symbol.flags.import) { symbol.flags.needs_plt = true; }, + .GOT_HI20 => symbol.flags.needs_got = true, - .GOT_HI20 => { - symbol.flags.needs_got = true; - }, + .TPREL_HI20, + .TPREL_LO12_I, + .TPREL_LO12_S, + .TPREL_ADD, .PCREL_HI20, .PCREL_LO12_I, .PCREL_LO12_S, .LO12_I, + .LO12_S, .ADD32, .SUB32, => {}, @@ -2058,6 +2057,8 @@ const riscv = struct { switch (r_type) { .NONE => unreachable, + .@"32" => try cwriter.writeInt(u32, @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little), + .@"64" => { try atom.resolveDynAbsReloc( target, @@ -2076,11 +2077,6 @@ const riscv = struct { riscv_util.writeInstU(code[r_offset..][0..4], value); }, - .LO12_I => { - const value: u32 = @bitCast(math.cast(i32, S + A) orelse return error.Overflow); - riscv_util.writeInstI(code[r_offset..][0..4], value); - }, - .GOT_HI20 => { assert(target.flags.has_got); const disp: u32 = @bitCast(math.cast(i32, G + GOT + A - P) orelse return error.Overflow); @@ -2143,6 +2139,39 @@ const riscv = struct { } }, + .LO12_I, + .LO12_S, + => { + const disp: u32 = @bitCast(math.cast(i32, S + A) orelse return error.Overflow); + switch (r_type) { + .LO12_I => riscv_util.writeInstI(code[r_offset..][0..4], disp), + .LO12_S => riscv_util.writeInstS(code[r_offset..][0..4], disp), + else => unreachable, + } + }, + + .TPREL_HI20 => { + const target_addr: u32 = @intCast(target.address(.{}, elf_file)); + const val: i32 = @intCast(S + A - target_addr); + riscv_util.writeInstU(code[r_offset..][0..4], @bitCast(val)); + }, + + .TPREL_LO12_I, + .TPREL_LO12_S, + => { + const target_addr: u32 = @intCast(target.address(.{}, elf_file)); + const val: i32 = @intCast(S + A - target_addr); + switch (r_type) { + .TPREL_LO12_I => riscv_util.writeInstI(code[r_offset..][0..4], @bitCast(val)), + .TPREL_LO12_S => riscv_util.writeInstS(code[r_offset..][0..4], @bitCast(val)), + else => unreachable, + } + }, + + .TPREL_ADD => { + // TODO: annotates an ADD instruction that can be removed when TPREL is relaxed + }, + else => |x| switch (@intFromEnum(x)) { // Zig custom relocations Elf.R_ZIG_GOT_HI20 => { diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index b27601b420..451c363f56 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -906,7 +906,9 @@ fn updateDeclCode( log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); - const required_alignment = decl.getAlignment(mod); + const required_alignment = decl.getAlignment(mod).max( + target_util.minFunctionAlignment(mod.getTarget()), + ); const sym = elf_file.symbol(sym_index); const esym = &self.local_esyms.items(.elf_sym)[sym.esym_index]; @@ -1634,6 +1636,7 @@ const log = std.log.scoped(.link); const mem = std.mem; const relocation = @import("relocation.zig"); const trace = @import("../../tracy.zig").trace; +const target_util = @import("../../target.zig"); const std = @import("std"); const Air = @import("../../Air.zig"); diff --git a/src/target.zig b/src/target.zig index 6af301e001..dccc91382b 100644 --- a/src/target.zig +++ b/src/target.zig @@ -431,6 +431,23 @@ pub fn defaultFunctionAlignment(target: std.Target) Alignment { }; } +pub fn minFunctionAlignment(target: std.Target) Alignment { + return switch (target.cpu.arch) { + .arm, + .armeb, + .aarch64, + .aarch64_32, + .aarch64_be, + .riscv32, + .riscv64, + .sparc, + .sparcel, + .sparc64, + => .@"2", + else => .@"1", + }; +} + pub fn supportsFunctionAlignment(target: std.Target) bool { return switch (target.cpu.arch) { .wasm32, .wasm64 => false, diff --git a/test/behavior/abs.zig b/test/behavior/abs.zig index 21f02b2a3d..88d01de5c6 100644 --- a/test/behavior/abs.zig +++ b/test/behavior/abs.zig @@ -6,7 +6,6 @@ test "@abs integers" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testAbsIntegers(); try testAbsIntegers(); @@ -93,18 +92,17 @@ test "@abs floats" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testAbsFloats(f16); - try testAbsFloats(f16); + if (builtin.zig_backend != .stage2_riscv64) try testAbsFloats(f16); try comptime testAbsFloats(f32); try testAbsFloats(f32); try comptime testAbsFloats(f64); try testAbsFloats(f64); try comptime testAbsFloats(f80); - if (builtin.zig_backend != .stage2_wasm and builtin.zig_backend != .stage2_spirv64) try testAbsFloats(f80); + if (builtin.zig_backend != .stage2_wasm and builtin.zig_backend != .stage2_spirv64 and builtin.zig_backend != .stage2_riscv64) try testAbsFloats(f80); try comptime testAbsFloats(f128); - if (builtin.zig_backend != .stage2_wasm and builtin.zig_backend != .stage2_spirv64) try testAbsFloats(f128); + if (builtin.zig_backend != .stage2_wasm and builtin.zig_backend != .stage2_spirv64 and builtin.zig_backend != .stage2_riscv64) try testAbsFloats(f128); } fn testAbsFloats(comptime T: type) !void { diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 659733962b..1ede6ad433 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -54,7 +54,6 @@ fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 { } test "@alignCast pointers" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO var x: u32 align(4) = 1; expectsOnly1(&x); try expect(x == 2); @@ -238,7 +237,6 @@ fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 { } test "specifying alignment allows pointer cast" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -251,7 +249,6 @@ fn testBytesAlign(b: u8) !void { } test "@alignCast slices" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -312,7 +309,6 @@ test "function alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // function alignment is a compile error on wasm32/wasm64 if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest; @@ -426,7 +422,6 @@ test "function callconv expression depends on generic parameter" { } test "runtime-known array index has best alignment possible" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO // take full advantage of over-alignment @@ -562,7 +557,6 @@ test "align(@alignOf(T)) T does not force resolution of T" { } test "align(N) on functions" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -608,7 +602,6 @@ test "comptime alloc alignment" { } test "@alignCast null" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -624,7 +617,6 @@ test "alignment of slice element" { } test "sub-aligned pointer field access" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; diff --git a/test/behavior/alignof.zig b/test/behavior/alignof.zig index a3e71a254f..e08a42cf19 100644 --- a/test/behavior/alignof.zig +++ b/test/behavior/alignof.zig @@ -29,8 +29,6 @@ test "comparison of @alignOf(T) against zero" { } test "correct alignment for elements and slices of aligned array" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var buf: [1024]u8 align(64) = undefined; var start: usize = 1; var end: usize = undefined; diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 49a03c05e2..f5fa95c770 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -50,7 +50,6 @@ fn getArrayLen(a: []const u32) usize { test "array concat with undefined" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -89,7 +88,6 @@ test "array concat with tuple" { test "array init with concat" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = 'a'; var i: [4]u8 = [2]u8{ a, 'b' } ++ [2]u8{ 'c', 'd' }; @@ -99,7 +97,6 @@ test "array init with concat" { test "array init with mult" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = 'a'; var i: [8]u8 = [2]u8{ a, 'b' } ** 4; @@ -231,7 +228,6 @@ test "nested arrays of integers" { test "implicit comptime in array type size" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var arr: [plusOne(10)]bool = undefined; _ = &arr; @@ -245,7 +241,6 @@ fn plusOne(x: u32) u32 { test "single-item pointer to array indexing and slicing" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSingleItemPtrArrayIndexSlice(); try comptime testSingleItemPtrArrayIndexSlice(); @@ -271,7 +266,6 @@ fn doSomeMangling(array: *[4]u8) void { test "implicit cast zero sized array ptr to slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var b = "".*; @@ -310,7 +304,6 @@ const Str = struct { a: []Sub }; test "set global var array via slice embedded in struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var s = Str{ .a = s_array[0..] }; @@ -347,7 +340,6 @@ test "read/write through global variable array of struct fields initialized via test "implicit cast single-item pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testImplicitCastSingleItemPtr(); try comptime testImplicitCastSingleItemPtr(); @@ -378,7 +370,6 @@ test "comptime evaluating function that takes array by value" { test "runtime initialize array elem and then implicit cast to slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var two: i32 = 2; _ = &two; @@ -389,7 +380,6 @@ test "runtime initialize array elem and then implicit cast to slice" { test "array literal as argument to function" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry(two: i32) !void { @@ -418,7 +408,6 @@ test "double nested array to const slice cast in array literal" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry(two: i32) !void { @@ -505,7 +494,6 @@ test "anonymous literal in array" { test "access the null element of a null terminated array" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -525,7 +513,6 @@ test "type deduction for array subscript expression" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -716,7 +703,6 @@ test "array of array agregate init" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = [1]u32{11} ** 10; var b = [1][10]u32{a} ** 2; @@ -767,7 +753,6 @@ test "slicing array of zero-sized values" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var arr: [32]u0 = undefined; @@ -778,8 +763,6 @@ test "slicing array of zero-sized values" { } test "array init with no result pointer sets field result types" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { // A function parameter has a result type, but no result pointer. fn f(arr: [1]u32) u32 { @@ -964,7 +947,6 @@ test "array initialized with string literal" { test "array initialized with array with sentinel" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: u32, @@ -1051,7 +1033,6 @@ test "union that needs padding bytes inside an array" { test "runtime index of array of zero-bit values" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var runtime: struct { array: [1]void, index: usize } = undefined; runtime = .{ .array = .{{}}, .index = 0 }; diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index eabac35787..fb61247b11 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -483,7 +483,6 @@ fn testStructInFn() !void { test "fn call returning scalar optional in equality expression" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(getNull() == null); } @@ -494,7 +493,6 @@ fn getNull() ?*i32 { test "global variable assignment with optional unwrapping with var initialized to undefined" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { var data: i32 = 1234; @@ -513,7 +511,6 @@ var global_foo: *i32 = undefined; test "peer result location with typed parent, runtime condition, comptime prongs" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(arg: i32) i32 { @@ -593,7 +590,6 @@ test "equality compare fn ptrs" { test "self reference through fn ptr field" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = struct { @@ -690,7 +686,6 @@ test "string concatenation" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = "OK" ++ " IT " ++ "WORKED"; const b = "OK IT WORKED"; @@ -714,7 +709,6 @@ test "result location is optional inside error union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x = maybe(true) catch unreachable; try expect(x.? == 42); @@ -730,7 +724,6 @@ fn maybe(x: bool) anyerror!?u32 { test "auto created variables have correct alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(str: [*]const u8) u32 { @@ -838,7 +831,6 @@ test "labeled block implicitly ends in a break" { test "catch in block has correct result location" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn open() error{A}!@This() { @@ -870,7 +862,6 @@ test "labeled block with runtime branch forwards its result location type to bre test "try in labeled block doesn't cast to wrong type" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: u32, @@ -897,7 +888,6 @@ test "weird array and tuple initializations" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = enum { a, b }; const S = struct { e: E }; @@ -1016,7 +1006,6 @@ comptime { test "switch inside @as gets correct type" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u32 = 0; _ = &a; @@ -1101,8 +1090,6 @@ test "orelse coercion as function argument" { } test "runtime-known globals initialized with undefined" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { var array: [10]u32 = [_]u32{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; var vp: [*]u32 = undefined; @@ -1246,8 +1233,6 @@ test "pointer to tuple field can be dereferenced at comptime" { } test "proper value is returned from labeled block" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn hash(v: *u32, key: anytype) void { const Key = @TypeOf(key); @@ -1385,8 +1370,6 @@ test "allocation and looping over 3-byte integer" { } test "loading array from struct is not optimized away" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { arr: [1]u32 = .{0}, fn doTheTest(self: *@This()) !void { diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index 28c797cef3..6d513a4ac7 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -250,7 +250,6 @@ test "bitcast packed struct to integer and back" { test "implicit cast to error union by returning" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -280,8 +279,6 @@ test "comptime bitcast used in expression has the correct type" { } test "bitcast passed as tuple element" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn foo(args: anytype) !void { comptime assert(@TypeOf(args[0]) == f32); @@ -292,8 +289,6 @@ test "bitcast passed as tuple element" { } test "triple level result location with bitcast sandwich passed as tuple element" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn foo(args: anytype) !void { comptime assert(@TypeOf(args[0]) == f64); diff --git a/test/behavior/byteswap.zig b/test/behavior/byteswap.zig index fd7e2af850..0c6e655b25 100644 --- a/test/behavior/byteswap.zig +++ b/test/behavior/byteswap.zig @@ -100,6 +100,7 @@ test "@byteSwap vectors u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime vector8(); try vector8(); diff --git a/test/behavior/byval_arg_var.zig b/test/behavior/byval_arg_var.zig index 6b48769500..3a82ca86ad 100644 --- a/test/behavior/byval_arg_var.zig +++ b/test/behavior/byval_arg_var.zig @@ -5,7 +5,6 @@ var result: []const u8 = "wrong"; test "pass string literal byvalue to a generic var param" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; start(); diff --git a/test/behavior/call.zig b/test/behavior/call.zig index 2f737f098c..8636955215 100644 --- a/test/behavior/call.zig +++ b/test/behavior/call.zig @@ -60,7 +60,6 @@ test "tuple parameters" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const add = struct { fn add(a: i32, b: i32) i32 { @@ -94,7 +93,6 @@ test "result location of function call argument through runtime condition and st if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = enum { a, b }; const S = struct { @@ -411,7 +409,6 @@ test "recursive inline call with comptime known argument" { test "inline while with @call" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn inc(a: *u32) void { @@ -427,8 +424,6 @@ test "inline while with @call" { } test "method call as parameter type" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn foo(x: anytype, y: @TypeOf(x).Inner()) @TypeOf(y) { return y; @@ -477,7 +472,6 @@ test "argument to generic function has correct result type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(_: anytype, e: enum { a, b }) bool { diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index a3ffb7cb3a..6cc881b64d 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -57,8 +57,6 @@ test "@intCast to comptime_int" { } test "implicit cast comptime numbers to any type when the value fits" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const a: u64 = 255; var b: u8 = a; _ = &b; @@ -188,7 +186,6 @@ fn expectIntFromFloat(comptime F: type, f: F, comptime I: type, i: I) !void { test "implicitly cast indirect pointer to maybe-indirect pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Self = @This(); @@ -249,7 +246,6 @@ test "coerce undefined to optional" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(MakeType(void).getNull() == null); try expect(MakeType(void).getNonNull() != null); @@ -270,7 +266,6 @@ fn MakeType(comptime T: type) type { test "implicit cast from *[N]T to [*c]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: [4]u16 = [4]u16{ 0, 1, 2, 3 }; var y: [*c]u16 = &x; @@ -347,7 +342,6 @@ test "array coercion to undefined at runtime" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @setRuntimeSafety(true); @@ -413,7 +407,6 @@ test "peer type unsigned int to signed" { test "expected [*c]const u8, found [*:0]const u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [*:0]const u8 = "hello"; _ = &a; @@ -483,7 +476,6 @@ fn castToOptionalTypeError(z: i32) !void { test "implicitly cast from [0]T to anyerror![]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCastZeroArrayToErrSliceMut(); try comptime testCastZeroArrayToErrSliceMut(); @@ -501,7 +493,6 @@ test "peer type resolution: [0]u8, []const u8, and anyerror![]u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() anyerror!void { @@ -558,7 +549,6 @@ fn testCastConstArrayRefToConstSlice() !void { test "peer type resolution: error and [N]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, try testPeerErrorAndArray(0), "OK")); comptime assert(mem.eql(u8, try testPeerErrorAndArray(0), "OK")); @@ -583,7 +573,6 @@ fn testPeerErrorAndArray2(x: u8) anyerror![]const u8 { test "single-item pointer of array to slice to unknown length pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCastPtrOfArrayToSliceAndPtr(); try comptime testCastPtrOfArrayToSliceAndPtr(); @@ -841,7 +830,6 @@ test "peer cast *[0]T to E![]const T" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buffer: [5]u8 = "abcde".*; const buf: anyerror![]const u8 = buffer[0..]; @@ -857,7 +845,6 @@ test "peer cast *[0]T to []const T" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buffer: [5]u8 = "abcde".*; const buf: []const u8 = buffer[0..]; @@ -881,7 +868,6 @@ test "peer resolution of string literals" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const E = enum { a, b, c, d }; @@ -903,7 +889,6 @@ test "peer resolution of string literals" { test "peer cast [:x]T to []T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -920,7 +905,6 @@ test "peer cast [:x]T to []T" { test "peer cast [N:x]T to [N]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -937,7 +921,6 @@ test "peer cast [N:x]T to [N]T" { test "peer cast *[N:x]T to *[N]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -995,7 +978,6 @@ test "peer cast [:x]T to [*:x]T" { test "peer type resolution implicit cast to return type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1016,7 +998,6 @@ test "peer type resolution implicit cast to return type" { test "peer type resolution implicit cast to variable type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1060,7 +1041,6 @@ test "cast between C pointer with different but compatible types" { test "peer type resolve string lit with sentinel-terminated mutable slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var array: [4:0]u8 = undefined; array[4] = 0; // TODO remove this when #4372 is solved @@ -1127,7 +1107,6 @@ test "implicit cast from [*]T to ?*anyopaque" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = [_]u8{ 3, 2, 1 }; var runtime_zero: usize = 0; @@ -1158,7 +1137,6 @@ fn foobar(func: PFN_void) !void { test "cast function with an opaque parameter" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) { // https://github.com/ziglang/zig/issues/16845 @@ -1191,7 +1169,6 @@ test "implicit ptr to *anyopaque" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u32 = 1; const ptr: *align(@alignOf(u32)) anyopaque = &a; @@ -1205,7 +1182,6 @@ test "implicit ptr to *anyopaque" { test "return null from fn () anyerror!?&T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = returnNullFromOptionalTypeErrorRef(); const b = returnNullLitFromOptionalTypeErrorRef(); @@ -1296,7 +1272,6 @@ test "implicit cast from *T to ?*anyopaque" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u8 = 1; incrementVoidPtrValue(&a); @@ -1310,7 +1285,6 @@ fn incrementVoidPtrValue(value: ?*anyopaque) void { test "implicit cast *[0]T to E![]const u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x = @as(anyerror![]const u8, &[0]u8{}); _ = &x; @@ -1332,7 +1306,6 @@ test "*const [N]null u8 to ?[]const u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1369,7 +1342,6 @@ test "assignment to optional pointer result loc" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo: struct { ptr: ?*anyopaque } = .{ .ptr = &global_struct }; _ = &foo; @@ -1377,7 +1349,6 @@ test "assignment to optional pointer result loc" { } test "cast between *[N]void and []void" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var a: [4]void = undefined; @@ -1445,7 +1416,6 @@ test "peer type resolution: unreachable, null, slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(num: usize, word: []const u8) !void { @@ -1486,7 +1456,6 @@ test "cast compatible optional types" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: ?[:0]const u8 = null; _ = &a; @@ -1497,7 +1466,6 @@ test "cast compatible optional types" { test "coerce undefined single-item pointer of array to error union of slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = @as([*]u8, undefined)[0..0]; var b: error{a}![]const u8 = a; @@ -1600,7 +1568,6 @@ test "bitcast packed struct with u0" { test "optional pointer coerced to optional allowzero pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var p: ?*u32 = undefined; var q: ?*allowzero u32 = undefined; @@ -1617,8 +1584,6 @@ test "optional slice coerced to allowzero many pointer" { } test "optional slice passed as parameter coerced to allowzero many pointer" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const ns = struct { const Color = struct { r: u8, @@ -1638,8 +1603,6 @@ test "optional slice passed as parameter coerced to allowzero many pointer" { } test "single item pointer to pointer to array to slice" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var x: i32 = 1234; try expect(@as([]const i32, @as(*[1]i32, &x))[0] == 1234); const z1 = @as([]const i32, @as(*[1]i32, &x)); @@ -1682,8 +1645,6 @@ test "@volatileCast without a result location" { } test "coercion from single-item pointer to @as to slice" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var x: u32 = 1; // Why the following line gets a compile error? @@ -1696,7 +1657,6 @@ test "peer type resolution: const sentinel slice and mutable non-sentinel slice" if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(comptime T: type, comptime s: T) !void { @@ -1727,7 +1687,6 @@ test "peer type resolution: float and comptime-known fixed-width integer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const i: u8 = 100; var f: f32 = 1.234; @@ -1750,7 +1709,6 @@ test "peer type resolution: same array type with sentinel" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [2:0]u32 = .{ 0, 1 }; var b: [2:0]u32 = .{ 2, 3 }; @@ -1773,7 +1731,6 @@ test "peer type resolution: array with sentinel and array without sentinel" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [2:0]u32 = .{ 0, 1 }; var b: [2]u32 = .{ 2, 3 }; @@ -1843,7 +1800,6 @@ test "peer type resolution: error union and optional of same type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = error{Foo}; var a: E!*u8 = error.Foo; @@ -1867,7 +1823,6 @@ test "peer type resolution: C pointer and @TypeOf(null)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [*c]c_int = 0x1000; _ = &a; @@ -1981,7 +1936,6 @@ test "peer type resolution: array and tuple" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var arr: [3]i32 = .{ 1, 2, 3 }; _ = &arr; @@ -2116,7 +2070,6 @@ test "peer type resolution: tuple pointer and optional slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // Miscompilation on Intel's OpenCL CPU runtime. if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky @@ -2209,7 +2162,6 @@ test "peer type resolution: tuples with comptime fields" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = .{ 1, 2 }; const b = .{ @as(u32, 3), @as(i16, 4) }; @@ -2241,7 +2193,6 @@ test "peer type resolution: C pointer and many pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf = "hello".*; @@ -2309,7 +2260,6 @@ test "peer type resolution: arrays of compatible types" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var e0: u8 = 3; var e1: u8 = 2; @@ -2365,7 +2315,6 @@ test "cast builtins can wrap result in error union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const MyEnum = enum(u32) { _ }; @@ -2404,7 +2353,6 @@ test "cast builtins can wrap result in error union and optional" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const MyEnum = enum(u32) { _ }; @@ -2600,7 +2548,6 @@ test "@intFromBool on vector" { test "numeric coercions with undefined" { if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const from: i32 = undefined; var to: f32 = from; @@ -2624,7 +2571,6 @@ test "@as does not corrupt values with incompatible representations" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x: f32 = @as(f16, blk: { if (false) { diff --git a/test/behavior/comptime_memory.zig b/test/behavior/comptime_memory.zig index 597ba62dd4..968b7be79d 100644 --- a/test/behavior/comptime_memory.zig +++ b/test/behavior/comptime_memory.zig @@ -408,8 +408,6 @@ test "mutate entire slice at comptime" { } test "dereference undefined pointer to zero-bit type" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const p0: *void = undefined; try testing.expectEqual({}, p0.*); @@ -515,7 +513,5 @@ fn fieldPtrTest() u32 { return a.value; } test "pointer in aggregate field can mutate comptime state" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try comptime std.testing.expect(fieldPtrTest() == 2); } diff --git a/test/behavior/defer.zig b/test/behavior/defer.zig index ba0d949a7d..fc764f55e3 100644 --- a/test/behavior/defer.zig +++ b/test/behavior/defer.zig @@ -116,7 +116,6 @@ test "errdefer with payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo() !i32 { @@ -139,7 +138,6 @@ test "reference to errdefer payload" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo() !i32 { @@ -162,7 +160,6 @@ test "reference to errdefer payload" { test "simple else prong doesn't emit an error for unreachable else prong" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo() error{Foo}!void { diff --git a/test/behavior/empty_union.zig b/test/behavior/empty_union.zig index a42dfda7e1..f05feacfaf 100644 --- a/test/behavior/empty_union.zig +++ b/test/behavior/empty_union.zig @@ -48,8 +48,6 @@ test "empty extern union" { } test "empty union passed as argument" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const U = union(enum) { fn f(u: @This()) void { switch (u) {} @@ -59,8 +57,6 @@ test "empty union passed as argument" { } test "empty enum passed as argument" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const E = enum { fn f(e: @This()) void { switch (e) {} diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index 5f59f61355..8b55ff26bc 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -610,7 +610,6 @@ fn testEnumWithSpecifiedTagValues(x: MultipleChoice) !void { test "enum with specified tag values" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testEnumWithSpecifiedTagValues(MultipleChoice.C); try comptime testEnumWithSpecifiedTagValues(MultipleChoice.C); @@ -684,7 +683,6 @@ test "empty non-exhaustive enum" { test "single field non-exhaustive enum" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const E = enum(u8) { a, _ }; @@ -749,7 +747,6 @@ test "cast integer literal to enum" { test "enum with specified and unspecified tag values" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2.D); try comptime testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2.D); @@ -858,8 +855,6 @@ fn doALoopThing(id: EnumWithOneMember) void { } test "comparison operator on enum with one member is comptime-known" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - doALoopThing(EnumWithOneMember.Eof); } diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 8db9703f51..e0f0b224c1 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -31,7 +31,6 @@ fn shouldBeNotEqual(a: anyerror, b: anyerror) void { test "error binary operator" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = errBinaryOperatorG(true) catch 3; const b = errBinaryOperatorG(false) catch 3; @@ -63,14 +62,12 @@ pub fn baz() anyerror!i32 { test "error wrapping" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect((baz() catch unreachable) == 15); } test "unwrap simple value from error" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const i = unwrapSimpleValueFromErrorDo() catch unreachable; try expect(i == 13); @@ -81,7 +78,6 @@ fn unwrapSimpleValueFromErrorDo() anyerror!isize { test "error return in assignment" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; doErrReturnInAssignment() catch unreachable; } @@ -104,7 +100,6 @@ test "syntax: optional operator in front of error union operator" { test "widen cast integer payload of error union function call" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn errorable() !u64 { @@ -129,7 +124,6 @@ test "debug info for optional error set" { test "implicit cast to optional to error union to return result loc" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -241,8 +235,6 @@ fn testExplicitErrorSetCast(set1: Set1) !void { } test "@errorCast on error unions" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { { @@ -270,7 +262,6 @@ test "@errorCast on error unions" { test "comptime test error for empty error set" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testComptimeTestErrorEmptySet(1234); try comptime testComptimeTestErrorEmptySet(1234); @@ -306,8 +297,6 @@ test "inferred empty error set comptime catch" { } test "error inference with an empty set" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { const Struct = struct { pub fn func() (error{})!usize { @@ -362,7 +351,6 @@ fn quux_1() !i32 { test "error: Zero sized error set returned with value payload crash" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; _ = try foo3(0); _ = try comptime foo3(0); @@ -376,7 +364,6 @@ fn foo3(b: usize) Error!usize { test "error: Infer error set from literals" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; _ = nullLiteral("n") catch |err| handleErrors(err); _ = floatLiteral("n") catch |err| handleErrors(err); @@ -498,7 +485,6 @@ test "optional error set is the same size as error set" { test "nested catch" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -524,7 +510,6 @@ test "nested catch" { test "function pointer with return type that is error union with payload which is pointer of parent struct" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Foo = struct { @@ -582,7 +567,6 @@ test "error payload type is correctly resolved" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const MyIntWrapper = struct { const Self = @This(); @@ -755,7 +739,6 @@ test "ret_ptr doesn't cause own inferred error set to be resolved" { test "simple else prong allowed even when all errors handled" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo() !u8 { @@ -873,7 +856,6 @@ test "alignment of wrapping an error union payload" { test "compare error union and error set" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: anyerror = error.Foo; var b: anyerror!u32 = error.Bar; @@ -1039,7 +1021,6 @@ test "function called at runtime is properly analyzed for inferred error set" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo() !void { @@ -1063,7 +1044,6 @@ test "generic type constructed from inferred error set of unresolved function" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn write(_: void, bytes: []const u8) !usize { @@ -1079,8 +1059,6 @@ test "generic type constructed from inferred error set of unresolved function" { } test "errorCast to adhoc inferred error set" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { inline fn baz() !i32 { return @errorCast(err()); @@ -1093,8 +1071,6 @@ test "errorCast to adhoc inferred error set" { } test "errorCast from error sets to error unions" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const err_union: Set1!void = @errorCast(error.A); try expectError(error.A, err_union); } @@ -1103,8 +1079,8 @@ test "result location initialization of error union with OPV payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: u0, diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 4147338b9c..2be355e064 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -73,7 +73,6 @@ fn constExprEvalOnSingleExprBlocksFn(x: i32, b: bool) i32 { test "constant expressions" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var array: [array_size]u8 = undefined; _ = &array; @@ -506,7 +505,6 @@ test "comptime shlWithOverflow" { test "const ptr to variable data changes at runtime" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(foo_ref.name[0] == 'a'); foo_ref.name = "b"; @@ -549,7 +547,6 @@ test "static eval list init" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(static_vec3.data[2] == 1.0); try expect(vec3(0.0, 0.0, 3.0).data[2] == 3.0); @@ -721,8 +718,6 @@ fn loopNTimes(comptime n: usize) void { } test "variable inside inline loop that has different types on different iterations" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testVarInsideInlineLoop(.{ true, @as(u32, 42) }); } @@ -746,7 +741,6 @@ test "array concatenation of function calls" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = oneItem(3) ++ oneItem(4); try expect(std.mem.eql(i32, &a, &[_]i32{ 3, 4 })); @@ -756,7 +750,6 @@ test "array multiplication of function calls" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = oneItem(3) ** scalar(2); try expect(std.mem.eql(i32, &a, &[_]i32{ 3, 3 })); @@ -774,7 +767,6 @@ test "array concatenation peer resolves element types - value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = [2]u3{ 1, 7 }; var b = [3]u8{ 200, 225, 255 }; @@ -1088,7 +1080,6 @@ test "comptime break operand passing through runtime condition converted to runt test "comptime break operand passing through runtime switch converted to runtime break" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(runtime: u8) !void { @@ -1556,7 +1547,6 @@ test "non-optional and optional array elements concatenated" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const array = [1]u8{'A'} ++ [1]?u8{null}; var index: usize = 0; @@ -1631,8 +1621,6 @@ test "struct in comptime false branch is not evaluated" { } test "result of nested switch assigned to variable" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var zds: u32 = 0; zds = switch (zds) { 0 => switch (zds) { @@ -1647,8 +1635,6 @@ test "result of nested switch assigned to variable" { } test "inline for loop of functions returning error unions" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const T1 = struct { fn v() error{}!usize { return 1; @@ -1667,8 +1653,6 @@ test "inline for loop of functions returning error unions" { } test "if inside a switch" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var condition = true; var wave_type: u32 = 0; _ = .{ &condition, &wave_type }; diff --git a/test/behavior/extern.zig b/test/behavior/extern.zig index 135f5e5648..9469b4dc21 100644 --- a/test/behavior/extern.zig +++ b/test/behavior/extern.zig @@ -20,7 +20,6 @@ test "function extern symbol" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = @extern(*const fn () callconv(.C) i32, .{ .name = "a_mystery_function" }); try expect(a() == 4567); @@ -34,7 +33,6 @@ test "function extern symbol matches extern decl" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { extern fn another_mystery_function() u32; diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index d32319c644..65d889776a 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -22,8 +22,6 @@ test "add f16" { } test "add f32/f64" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testAdd(f32); try comptime testAdd(f32); try testAdd(f64); @@ -60,8 +58,6 @@ test "sub f16" { } test "sub f32/f64" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testSub(f32); try comptime testSub(f32); try testSub(f64); @@ -98,8 +94,6 @@ test "mul f16" { } test "mul f32/f64" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testMul(f32); try comptime testMul(f32); try testMul(f64); @@ -1005,7 +999,6 @@ test "@abs f32/f64" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testFabs(f32); try comptime testFabs(f32); @@ -1622,7 +1615,6 @@ test "comptime inf >= runtime 1" { test "comptime isNan(nan * 1)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const nan_times_one = comptime std.math.nan(f64) * 1; try std.testing.expect(std.math.isNan(nan_times_one)); @@ -1630,7 +1622,6 @@ test "comptime isNan(nan * 1)" { test "runtime isNan(nan * 1)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const nan_times_one = std.math.nan(f64) * 1; try std.testing.expect(std.math.isNan(nan_times_one)); @@ -1638,7 +1629,6 @@ test "runtime isNan(nan * 1)" { test "comptime isNan(nan * 0)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const nan_times_zero = comptime std.math.nan(f64) * 0; try std.testing.expect(std.math.isNan(nan_times_zero)); @@ -1648,7 +1638,6 @@ test "comptime isNan(nan * 0)" { test "runtime isNan(nan * 0)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const nan_times_zero = std.math.nan(f64) * 0; try std.testing.expect(std.math.isNan(nan_times_zero)); @@ -1658,7 +1647,6 @@ test "runtime isNan(nan * 0)" { test "comptime isNan(inf * 0)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const inf_times_zero = comptime std.math.inf(f64) * 0; try std.testing.expect(std.math.isNan(inf_times_zero)); @@ -1668,7 +1656,6 @@ test "comptime isNan(inf * 0)" { test "runtime isNan(inf * 0)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const inf_times_zero = std.math.inf(f64) * 0; try std.testing.expect(std.math.isNan(inf_times_zero)); diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index b6eafeefc1..73ef9bdbfe 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -71,7 +71,6 @@ fn outer(y: u32) *const fn (u32) u32 { test "return inner function which references comptime variable of outer function" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const func = outer(10); try expect(func(3) == 7); @@ -81,7 +80,6 @@ test "discard the result of a function that returns a struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() void { @@ -106,7 +104,6 @@ test "inline function call that calls optional function pointer, return pointer if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { field: u32, @@ -191,7 +188,6 @@ test "function with complex callconv and return type expressions" { test "pass by non-copying value" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(addPointCoords(Point{ .x = 1, .y = 2 }) == 3); } @@ -207,7 +203,6 @@ fn addPointCoords(pt: Point) i32 { test "pass by non-copying value through var arg" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect((try addPointCoordsVar(Point{ .x = 1, .y = 2 })) == 3); } @@ -219,7 +214,6 @@ fn addPointCoordsVar(pt: anytype) !i32 { test "pass by non-copying value as method" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var pt = Point2{ .x = 1, .y = 2 }; try expect(pt.addPointCoords() == 3); @@ -236,7 +230,6 @@ const Point2 = struct { test "pass by non-copying value as method, which is generic" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var pt = Point3{ .x = 1, .y = 2 }; try expect(pt.addPointCoords(i32) == 3); @@ -265,7 +258,6 @@ test "implicit cast fn call result to optional in field result" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -292,7 +284,6 @@ test "implicit cast fn call result to optional in field result" { test "void parameters" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try voidFun(1, void{}, 2, {}); } @@ -356,7 +347,6 @@ test "function call with anon list literal" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -377,7 +367,6 @@ test "function call with anon list literal - 2D" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -414,8 +403,6 @@ test "ability to give comptime types and non comptime types to same parameter" { } test "function with inferred error set but returning no error" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn foo() !void {} }; @@ -426,7 +413,6 @@ test "function with inferred error set but returning no error" { test "import passed byref to function in return type" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn get() @import("std").ArrayListUnmanaged(i32) { @@ -485,7 +471,6 @@ test "method call with optional and error union first param" { test "method call with optional pointer first param" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: i32 = 1234, @@ -505,7 +490,6 @@ test "using @ptrCast on function pointers" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = struct { data: [4]u8 }; @@ -543,7 +527,6 @@ test "function returns function returning type" { test "peer type resolution of inferred error set with non-void payload" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn openDataFile(mode: enum { read, write }) !u32 { @@ -586,8 +569,6 @@ test "lazy values passed to anytype parameter" { } test "pass and return comptime-only types" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn returnNull(comptime x: @Type(.Null)) @Type(.Null) { return x; diff --git a/test/behavior/fn_delegation.zig b/test/behavior/fn_delegation.zig index 6a3d46c15d..95dbfeb4b2 100644 --- a/test/behavior/fn_delegation.zig +++ b/test/behavior/fn_delegation.zig @@ -34,7 +34,6 @@ fn custom(comptime T: type, comptime num: u64) fn (T) u64 { test "fn delegation" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const foo = Foo{}; try expect(foo.one() == 11); diff --git a/test/behavior/for.zig b/test/behavior/for.zig index 1eac03ec79..4f873bbbe4 100644 --- a/test/behavior/for.zig +++ b/test/behavior/for.zig @@ -69,7 +69,6 @@ test "basic for loop" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const expected_result = [_]u8{ 9, 8, 7, 6, 0, 1, 2, 3 } ** 3; @@ -134,7 +133,6 @@ test "for with null and T peer types and inferred result location type" { test "2 break statements and an else" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry(t: bool, f: bool) !void { @@ -183,7 +181,6 @@ fn mangleString(s: []u8) void { test "for copies its payload" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -203,7 +200,6 @@ test "for on slice with allowzero ptr" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(slice: []const u8) !void { @@ -219,7 +215,6 @@ test "for on slice with allowzero ptr" { test "else continue outer for" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var i: usize = 6; var buf: [5]u8 = undefined; @@ -283,7 +278,6 @@ test "two counters" { test "1-based counter and ptr to array" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var ok: usize = 0; @@ -317,7 +311,6 @@ test "slice and two counters, one is offset and one is runtime" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const slice: []const u8 = "blah"; var start: usize = 0; @@ -347,7 +340,6 @@ test "two slices, one captured by-ref" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [10]u8 = undefined; const slice1: []const u8 = "blah"; @@ -367,7 +359,6 @@ test "raw pointer and slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [10]u8 = undefined; const slice: []const u8 = "blah"; @@ -387,7 +378,6 @@ test "raw pointer and counter" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [10]u8 = undefined; const ptr: [*]u8 = &buf; @@ -406,7 +396,6 @@ test "inline for with slice as the comptime-known" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const comptime_slice = "hello"; var runtime_i: usize = 3; @@ -438,7 +427,6 @@ test "inline for with counter as the comptime-known" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var runtime_slice = "hello"; var runtime_i: usize = 3; @@ -471,7 +459,6 @@ test "inline for on tuple pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { u32, u32, u32 }; var s: S = .{ 100, 200, 300 }; @@ -487,7 +474,6 @@ test "ref counter that starts at zero" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; for ([_]usize{ 0, 1, 2 }, 0..) |i, j| { try expectEqual(i, j); diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index 7ed75f0ead..6bd627dfe3 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -117,7 +117,6 @@ test "function with return type type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var list: List(i32) = undefined; var list2: List(i32) = undefined; @@ -159,7 +158,6 @@ test "generic fn with implicit cast" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(getFirstByte(u8, &[_]u8{13}) == 13); try expect(getFirstByte(u16, &[_]u16{ @@ -287,7 +285,6 @@ test "generic function instantiation turns into comptime call" { test "generic function with void and comptime parameter" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: i32 }; const namespace = struct { @@ -304,7 +301,6 @@ test "generic function with void and comptime parameter" { test "anonymous struct return type referencing comptime parameter" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { pub fn extraData(comptime T: type, index: usize) struct { data: T, end: usize } { @@ -323,7 +319,6 @@ test "generic function instantiation non-duplicates" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.os.tag == .wasi) return error.SkipZigTest; const S = struct { @@ -395,7 +390,6 @@ test "extern function used as generic parameter" { test "generic struct as parameter type" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(comptime Int: type, thing: struct { int: Int }) !void { @@ -436,7 +430,6 @@ test "null sentinel pointer passed as generic argument" { test "generic function passed as comptime argument" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doMath(comptime f: fn (type, i32, i32) error{Overflow}!i32, a: i32, b: i32) !void { @@ -449,7 +442,6 @@ test "generic function passed as comptime argument" { test "return type of generic function is function pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn b(comptime T: type) ?*const fn () error{}!T { @@ -462,7 +454,6 @@ test "return type of generic function is function pointer" { test "coerced function body has inequal value with its uncoerced body" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = B(i32, c); @@ -547,7 +538,6 @@ test "call generic function with from function called by the generic function" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64 and builtin.os.tag == .windows) return error.SkipZigTest; diff --git a/test/behavior/globals.zig b/test/behavior/globals.zig index 0c988450c0..f7a23b725f 100644 --- a/test/behavior/globals.zig +++ b/test/behavior/globals.zig @@ -7,7 +7,6 @@ test "store to global array" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(pos[1] == 0.0); pos = [2]f32{ 0.0, 1.0 }; @@ -30,7 +29,6 @@ test "slices pointing at the same address as global array." { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const a = [_]u8{ 1, 2, 3 }; diff --git a/test/behavior/if.zig b/test/behavior/if.zig index 8cb923dd43..a82d9a5c61 100644 --- a/test/behavior/if.zig +++ b/test/behavior/if.zig @@ -45,7 +45,6 @@ var global_with_err: anyerror!u32 = error.SomeError; test "unwrap mutable global var" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (global_with_val) |v| { try expect(v == 0); @@ -139,7 +138,6 @@ test "if-else expression with runtime condition result location is inferred opti if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = struct { b: u64, c: u64 }; var d: bool = true; diff --git a/test/behavior/incomplete_struct_param_tld.zig b/test/behavior/incomplete_struct_param_tld.zig index 485156de04..4edf974dab 100644 --- a/test/behavior/incomplete_struct_param_tld.zig +++ b/test/behavior/incomplete_struct_param_tld.zig @@ -23,7 +23,6 @@ fn foo(a: A) i32 { test "incomplete struct param top level declaration" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = A{ .b = B{ diff --git a/test/behavior/inline_switch.zig b/test/behavior/inline_switch.zig index 444697b091..d0621ad198 100644 --- a/test/behavior/inline_switch.zig +++ b/test/behavior/inline_switch.zig @@ -5,7 +5,6 @@ const builtin = @import("builtin"); test "inline scalar prongs" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: usize = 0; switch (x) { @@ -21,7 +20,6 @@ test "inline scalar prongs" { test "inline prong ranges" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: usize = 0; _ = &x; @@ -37,7 +35,6 @@ const E = enum { a, b, c, d }; test "inline switch enums" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: E = .a; _ = &x; @@ -79,7 +76,6 @@ test "inline switch unions" { test "inline else bool" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = true; _ = &a; @@ -92,7 +88,6 @@ test "inline else bool" { test "inline else error" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Err = error{ a, b, c }; var a = Err.a; @@ -106,7 +101,6 @@ test "inline else error" { test "inline else enum" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E2 = enum(u8) { a = 2, b = 3, c = 4, d = 5 }; var a: E2 = .a; @@ -120,7 +114,6 @@ test "inline else enum" { test "inline else int with gaps" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u8 = 0; _ = &a; @@ -139,7 +132,6 @@ test "inline else int with gaps" { test "inline else int all values" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u2 = 0; _ = &a; diff --git a/test/behavior/ir_block_deps.zig b/test/behavior/ir_block_deps.zig index e3bb57cf89..a46ad2d8a8 100644 --- a/test/behavior/ir_block_deps.zig +++ b/test/behavior/ir_block_deps.zig @@ -21,7 +21,6 @@ test "ir block deps" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect((foo(1) catch unreachable) == 0); try expect((foo(2) catch unreachable) == 0); diff --git a/test/behavior/lower_strlit_to_vector.zig b/test/behavior/lower_strlit_to_vector.zig index 79315e7a53..948d708aa7 100644 --- a/test/behavior/lower_strlit_to_vector.zig +++ b/test/behavior/lower_strlit_to_vector.zig @@ -6,7 +6,6 @@ test "strlit to vector" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const strlit = "0123456789abcdef0123456789ABCDEF"; const vec_from_strlit: @Vector(32, u8) = strlit.*; diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 66f86ede89..fefcf4b0e8 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -236,7 +236,6 @@ test "float equality" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x: f64 = 0.012; const y: f64 = x + 1.0; @@ -593,8 +592,6 @@ fn testSignedWrappingEval(x: i32) !void { } test "signed negation wrapping" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testSignedNegationWrappingEval(minInt(i16)); try comptime testSignedNegationWrappingEval(minInt(i16)); } @@ -605,8 +602,6 @@ fn testSignedNegationWrappingEval(x: i16) !void { } test "unsigned negation wrapping" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testUnsignedNegationWrappingEval(1); try comptime testUnsignedNegationWrappingEval(1); } @@ -667,8 +662,6 @@ test "bit shift a u1" { } test "truncating shift right" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testShrTrunc(maxInt(u16)); try comptime testShrTrunc(maxInt(u16)); } @@ -1436,8 +1429,6 @@ test "quad hex float literal parsing accurate" { } test "truncating shift left" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testShlTrunc(maxInt(u16)); try comptime testShlTrunc(maxInt(u16)); } @@ -1460,8 +1451,6 @@ fn testShlExact(x: u8) !void { } test "exact shift right" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testShrExact(0b10110100); try comptime testShrExact(0b10110100); } @@ -1471,8 +1460,6 @@ fn testShrExact(x: u8) !void { } test "shift left/right on u0 operand" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { var x: u0 = 0; @@ -1821,7 +1808,6 @@ test "absFloat" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testAbsFloat(); try comptime testAbsFloat(); diff --git a/test/behavior/member_func.zig b/test/behavior/member_func.zig index 1563ad7a4a..bb1e1e1769 100644 --- a/test/behavior/member_func.zig +++ b/test/behavior/member_func.zig @@ -31,7 +31,6 @@ test "standard field calls" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(HasFuncs.one(0) == 1); try expect(HasFuncs.two(0) == 2); @@ -76,7 +75,6 @@ test "@field field calls" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(@field(HasFuncs, "one")(0) == 1); try expect(@field(HasFuncs, "two")(0) == 2); diff --git a/test/behavior/memset.zig b/test/behavior/memset.zig index 185c6fafe1..2def7e6ee2 100644 --- a/test/behavior/memset.zig +++ b/test/behavior/memset.zig @@ -7,7 +7,6 @@ test "@memset on array pointers" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testMemsetArray(); try comptime testMemsetArray(); @@ -73,7 +72,6 @@ test "memset with bool element" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [5]bool = undefined; @memset(&buf, true); @@ -86,7 +84,6 @@ test "memset with 1-byte struct element" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: bool }; var buf: [5]S = undefined; @@ -100,7 +97,6 @@ test "memset with 1-byte array element" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = [1]bool; var buf: [5]A = undefined; @@ -170,7 +166,6 @@ test "zero keys with @memset" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Keys = struct { up: bool, diff --git a/test/behavior/merge_error_sets.zig b/test/behavior/merge_error_sets.zig index b1f7f69d56..492cb27699 100644 --- a/test/behavior/merge_error_sets.zig +++ b/test/behavior/merge_error_sets.zig @@ -13,7 +13,6 @@ fn foo() C!void { test "merge error sets" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (foo()) { @panic("unexpected"); diff --git a/test/behavior/nan.zig b/test/behavior/nan.zig index e177afa9d0..fc5ce4d0f9 100644 --- a/test/behavior/nan.zig +++ b/test/behavior/nan.zig @@ -26,7 +26,6 @@ test "nan memory equality" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // signaled try testing.expect(mem.eql(u8, mem.asBytes(&snan_u16), mem.asBytes(&snan_f16))); diff --git a/test/behavior/null.zig b/test/behavior/null.zig index 323f47c896..ebc390c36a 100644 --- a/test/behavior/null.zig +++ b/test/behavior/null.zig @@ -85,7 +85,6 @@ fn testTestNullRuntime(x: ?i32) !void { test "optional void" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try optionalVoidImpl(); try comptime optionalVoidImpl(); @@ -109,7 +108,6 @@ const Empty = struct {}; test "optional struct{}" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; _ = try optionalEmptyStructImpl(); _ = try comptime optionalEmptyStructImpl(); @@ -135,7 +133,6 @@ test "null with default unwrap" { test "optional pointer to 0 bit type null value at runtime" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const EmptyStruct = struct {}; var x: ?*EmptyStruct = null; diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig index 7884fec6cd..f9c71d3bea 100644 --- a/test/behavior/optional.zig +++ b/test/behavior/optional.zig @@ -29,7 +29,6 @@ pub const EmptyStruct = struct {}; test "optional pointer to size zero struct" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var e = EmptyStruct{}; const o: ?*EmptyStruct = &e; @@ -60,7 +59,6 @@ fn testNullPtrsEql() !void { test "optional with zero-bit type" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = struct { @@ -241,7 +239,6 @@ test "compare optionals with modified payloads" { test "unwrap function call with optional pointer return value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -373,7 +370,6 @@ test "0-bit child type coerced to optional return ptr result location" { test "0-bit child type coerced to optional" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -492,7 +488,6 @@ const NoReturn = struct { test "optional of noreturn used with if" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; NoReturn.a = 64; if (NoReturn.loop()) |_| { @@ -504,7 +499,6 @@ test "optional of noreturn used with if" { test "optional of noreturn used with orelse" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; NoReturn.a = 64; const val = NoReturn.testOrelse(); @@ -601,7 +595,6 @@ test "cast slice to const slice nested in error union and optional" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn inner() !?[]u8 { @@ -615,8 +608,6 @@ test "cast slice to const slice nested in error union and optional" { } test "variable of optional of noreturn" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var null_opv: ?noreturn = null; _ = &null_opv; try std.testing.expectEqual(@as(?noreturn, null), null_opv); @@ -641,7 +632,6 @@ test "result location initialization of optional with OPV payload" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = struct { diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index 89289d6063..88e5457627 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -238,7 +238,6 @@ test "regular in irregular packed struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Irregular = packed struct { bar: Regular = Regular{}, @@ -494,7 +493,6 @@ test "@intFromPtr on a packed struct field" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (native_endian != .little) return error.SkipZigTest; const S = struct { @@ -518,7 +516,6 @@ test "@intFromPtr on a packed struct field unaligned and nested" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet const S1 = packed struct { @@ -1191,7 +1188,6 @@ test "packed struct field pointer aligned properly" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Foo = packed struct { a: i32, diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index ffeeca3986..c574f487b3 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -174,7 +174,6 @@ test "implicit cast error unions with non-optional to optional pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -202,7 +201,6 @@ test "allowzero pointer and slice" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var ptr: [*]allowzero i32 = @ptrFromInt(0); const opt_ptr: ?[*]allowzero i32 = ptr; @@ -222,7 +220,6 @@ test "assign null directly to C pointer and test null equality" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: [*c]i32 = null; _ = &x; @@ -442,7 +439,6 @@ test "indexing array with sentinel returns correct type" { test "element pointer to slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -490,7 +486,6 @@ test "element pointer arithmetic to slice" { test "array slicing to slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -541,7 +536,6 @@ test "pointer alignment and element type include call expression" { test "pointer to array has explicit alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Base = extern struct { a: u8 }; diff --git a/test/behavior/prefetch.zig b/test/behavior/prefetch.zig index 1f21d23001..e98e848393 100644 --- a/test/behavior/prefetch.zig +++ b/test/behavior/prefetch.zig @@ -3,7 +3,6 @@ const std = @import("std"); test "@prefetch()" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [2]u32 = .{ 42, 42 }; var a_len = a.len; diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig index fc8a8b7482..11afc9474a 100644 --- a/test/behavior/ptrcast.zig +++ b/test/behavior/ptrcast.zig @@ -58,7 +58,6 @@ fn testReinterpretStructWrappedBytesAsInteger() !void { test "reinterpret bytes of an array into an extern struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testReinterpretBytesAsExternStruct(); try comptime testReinterpretBytesAsExternStruct(); @@ -233,7 +232,6 @@ test "implicit optional pointer to optional anyopaque pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [4]u8 = "aoeu".*; const x: ?[*]u8 = &buf; @@ -246,7 +244,6 @@ test "@ptrCast slice to slice" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(slice: []u32) []i32 { diff --git a/test/behavior/ptrfromint.zig b/test/behavior/ptrfromint.zig index 5e4c6175c3..89706be891 100644 --- a/test/behavior/ptrfromint.zig +++ b/test/behavior/ptrfromint.zig @@ -3,8 +3,6 @@ const builtin = @import("builtin"); const expectEqual = std.testing.expectEqual; test "casting integer address to function pointer" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - addressToFunction(); comptime addressToFunction(); } @@ -19,7 +17,6 @@ test "mutate through ptr initialized with constant ptrFromInt value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; forceCompilerAnalyzeBranchHardCodedPtrDereference(false); } @@ -37,7 +34,6 @@ test "@ptrFromInt creates null pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const ptr = @as(?*u32, @ptrFromInt(0)); try expectEqual(@as(?*u32, null), ptr); @@ -47,7 +43,6 @@ test "@ptrFromInt creates allowzero zero pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const ptr = @as(*allowzero u32, @ptrFromInt(0)); try expectEqual(@as(usize, 0), @intFromPtr(ptr)); diff --git a/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig b/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig index 366730424a..bb6d5b1359 100644 --- a/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig +++ b/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig @@ -8,7 +8,6 @@ test "reference a variable in an if after an if in the 2nd switch prong" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try foo(true, Num.Two, false, "aoeu"); try expect(!ok); diff --git a/test/behavior/return_address.zig b/test/behavior/return_address.zig index 675e0e6191..3e8c18c04a 100644 --- a/test/behavior/return_address.zig +++ b/test/behavior/return_address.zig @@ -10,7 +10,6 @@ test "return address" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; _ = retAddr(); // TODO: #14938 diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig index 506baa2666..b6206df491 100644 --- a/test/behavior/sizeof_and_typeof.zig +++ b/test/behavior/sizeof_and_typeof.zig @@ -81,7 +81,6 @@ const P = packed struct { test "@offsetOf" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // Packed structs have fixed memory layout try expect(@offsetOf(P, "a") == 0); @@ -158,7 +157,6 @@ test "@TypeOf() has no runtime side effects" { test "branching logic inside @TypeOf" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { var data: i32 = 0; @@ -273,7 +271,6 @@ test "runtime instructions inside typeof in comptime only scope" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var y: i8 = 2; @@ -330,7 +327,6 @@ test "peer type resolution with @TypeOf doesn't trigger dependency loop check" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = struct { next: @TypeOf(null, @as(*const @This(), undefined)), @@ -412,7 +408,6 @@ test "Extern function calls, dereferences and field access in @TypeOf" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Test = struct { fn test_fn_1(a: c_long) @TypeOf(c_fopen("test", "r").*) { diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index a1f38b1dfe..e1576ca302 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -67,7 +67,6 @@ test "comptime slice of undefined pointer of length 0" { test "implicitly cast array of size 0 to slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var msg = [_]u8{}; try assertLenIsZero(&msg); @@ -124,7 +123,6 @@ test "slice of type" { test "generic malloc free" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = memAlloc(u8, 10) catch unreachable; memFree(u8, a); @@ -186,8 +184,6 @@ test "slicing zero length array" { } test "slicing pointer by length" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 }; const ptr: [*]const u8 = @as([*]const u8, @ptrCast(&array)); const slice = ptr[1..][0..5]; @@ -236,7 +232,6 @@ test "runtime safety lets us slice from len..len" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var an_array = [_]u8{ 1, 2, 3 }; try expect(mem.eql(u8, sliceFromLenToLen(an_array[0..], 3, 3), "")); @@ -249,7 +244,6 @@ fn sliceFromLenToLen(a_slice: []u8, start: usize, end: usize) []u8 { test "C pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [*c]const u8 = "kjdhfkjdhfdkjhfkfjhdfkjdhfkdjhfdkjhf"; var len: u32 = 10; @@ -293,7 +287,6 @@ fn sliceSum(comptime q: []const u8) i32 { test "slice type with custom alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const LazilyResolvedType = struct { anything: i32, @@ -307,7 +300,6 @@ test "slice type with custom alignment" { test "obtaining a null terminated slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // here we have a normal array var buf: [50]u8 = undefined; @@ -352,7 +344,6 @@ test "empty array to slice" { test "@ptrCast slice to pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -407,7 +398,6 @@ test "slice syntax resulting in pointer-to-array" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = struct { @@ -627,7 +617,6 @@ test "slice syntax resulting in pointer-to-array" { test "slice pointer-to-array null terminated" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; comptime { var array = [5:0]u8{ 1, 2, 3, 4, 5 }; @@ -646,7 +635,6 @@ test "slice pointer-to-array null terminated" { test "slice pointer-to-array zero length" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; comptime { { @@ -681,7 +669,6 @@ test "type coercion of pointer to anon struct literal to pointer to slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const U = union { @@ -773,7 +760,6 @@ test "slice sentinel access at comptime" { test "slicing array with sentinel as end index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn do() !void { @@ -792,7 +778,6 @@ test "slicing array with sentinel as end index" { test "slicing slice with sentinel as end index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn do() !void { @@ -863,7 +848,6 @@ test "global slice field access" { } test "slice of void" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var n: usize = 10; @@ -874,8 +858,6 @@ test "slice of void" { } test "slice with dereferenced value" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var a: usize = 0; const idx: *usize = &a; _ = blk: { @@ -989,7 +971,6 @@ test "get address of element of zero-sized slice" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = struct { @@ -1004,7 +985,6 @@ test "sentinel-terminated 0-length slices" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const u32s: [4]u32 = [_]u32{ 0, 1, 2, 3 }; diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 602be7e95e..520c3ff409 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -92,7 +92,6 @@ test "structs" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo: StructFoo = undefined; @memset(@as([*]u8, @ptrCast(&foo))[0..@sizeOf(StructFoo)], 0); @@ -111,7 +110,6 @@ fn testMutation(foo: *StructFoo) void { test "struct byval assign" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo1: StructFoo = undefined; var foo2: StructFoo = undefined; @@ -176,7 +174,6 @@ const MemberFnTestFoo = struct { test "call member function directly" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const instance = MemberFnTestFoo{ .x = 1234 }; const result = MemberFnTestFoo.member(instance); @@ -185,7 +182,6 @@ test "call member function directly" { test "store member function in variable" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const instance = MemberFnTestFoo{ .x = 1234 }; const memberFn = MemberFnTestFoo.member; @@ -207,7 +203,6 @@ const MemberFnRand = struct { test "return struct byval from function" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Bar = struct { x: i32, @@ -256,7 +251,6 @@ test "usingnamespace within struct scope" { test "struct field init with catch" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -300,7 +294,6 @@ const Val = struct { test "struct point to self" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var root: Node = undefined; root.val.x = 1; @@ -355,7 +348,6 @@ test "self-referencing struct via array member" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = struct { children: [1]*@This(), @@ -403,7 +395,6 @@ test "packed struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo = APackedStruct{ .x = 1, @@ -633,7 +624,6 @@ fn getC(data: *const BitField1) u2 { test "default struct initialization fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: i32 = 1234, @@ -809,7 +799,6 @@ test "fn with C calling convention returns struct by value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -909,8 +898,6 @@ test "anonymous struct literal syntax" { } test "fully anonymous struct" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { try dump(.{ @@ -933,8 +920,6 @@ test "fully anonymous struct" { } test "fully anonymous list literal" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { try dump(.{ @as(u32, 1234), @as(f64, 12.34), true, "hi" }); @@ -982,7 +967,6 @@ test "tuple element initialized with fn call" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1023,7 +1007,6 @@ test "struct with 0-length union array field" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = union { a: u32, @@ -1044,7 +1027,6 @@ test "type coercion of anon struct literal to struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const S2 = struct { @@ -1084,7 +1066,6 @@ test "type coercion of pointer to anon struct literal to pointer to struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const S2 = struct { @@ -1299,7 +1280,6 @@ test "initialize struct with empty literal" { test "loading a struct pointer perfoms a copy" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: i32, @@ -1561,7 +1541,6 @@ test "discarded struct initialization works as expected" { test "function pointer in struct returns the struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = struct { const A = @This(); @@ -1732,7 +1711,6 @@ test "extern struct field pointer has correct alignment" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1770,8 +1748,6 @@ test "extern struct field pointer has correct alignment" { } test "packed struct field in anonymous struct" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const T = packed struct { f1: bool = false, }; @@ -1783,8 +1759,6 @@ fn countFields(v: anytype) usize { } test "struct init with no result pointer sets field result types" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { // A function parameter has a result type, but no result pointer. fn f(s: struct { x: u32 }) u32 { @@ -1863,8 +1837,6 @@ test "comptimeness of optional and error union payload is analyzed properly" { } test "initializer uses own alignment" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { x: u32 = @alignOf(@This()) + 1, }; @@ -1876,8 +1848,6 @@ test "initializer uses own alignment" { } test "initializer uses own size" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { x: u32 = @sizeOf(@This()) + 1, }; @@ -1889,8 +1859,6 @@ test "initializer uses own size" { } test "initializer takes a pointer to a variable inside its struct" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const namespace = struct { const S = struct { s: *S = &S.instance, @@ -1909,8 +1877,6 @@ test "initializer takes a pointer to a variable inside its struct" { } test "circular dependency through pointer field of a struct" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { const StructInner = extern struct { outer: StructOuter = std.mem.zeroes(StructOuter), @@ -1932,8 +1898,6 @@ test "circular dependency through pointer field of a struct" { } test "field calls do not force struct field init resolution" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { x: u32 = blk: { _ = @TypeOf(make().dummyFn()); // runtime field call - S not fully resolved - dummyFn call should not force field init resolution @@ -2067,7 +2031,6 @@ test "runtime value in nested initializer passed as pointer to function" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Bar = struct { b: u32, @@ -2142,7 +2105,6 @@ test "assignment of field with padding" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Mesh = extern struct { id: u32, @@ -2173,7 +2135,6 @@ test "initiate global variable with runtime value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { field: i32, @@ -2192,7 +2153,6 @@ test "initiate global variable with runtime value" { test "struct containing optional pointer to array of @This()" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: ?*const [1]@This(), diff --git a/test/behavior/struct_contains_null_ptr_itself.zig b/test/behavior/struct_contains_null_ptr_itself.zig index d3dacc50cd..d0cb3ef443 100644 --- a/test/behavior/struct_contains_null_ptr_itself.zig +++ b/test/behavior/struct_contains_null_ptr_itself.zig @@ -5,7 +5,6 @@ const builtin = @import("builtin"); test "struct contains null pointer which contains original struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: ?*NodeLineComment = null; _ = &x; diff --git a/test/behavior/struct_contains_slice_of_itself.zig b/test/behavior/struct_contains_slice_of_itself.zig index 6f6d829567..adb1c31047 100644 --- a/test/behavior/struct_contains_slice_of_itself.zig +++ b/test/behavior/struct_contains_slice_of_itself.zig @@ -13,7 +13,6 @@ const NodeAligned = struct { test "struct contains slice of itself" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var other_nodes = [_]Node{ Node{ @@ -54,7 +53,6 @@ test "struct contains slice of itself" { test "struct contains aligned slice of itself" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var other_nodes = [_]NodeAligned{ NodeAligned{ diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index 78365e8763..8c5fcda8c2 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -7,7 +7,6 @@ const expectEqual = std.testing.expectEqual; test "switch with numbers" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSwitchWithNumbers(13); } @@ -23,7 +22,6 @@ fn testSwitchWithNumbers(x: u32) !void { test "switch with all ranges" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(testSwitchWithAllRanges(50, 3) == 1); try expect(testSwitchWithAllRanges(101, 0) == 2); @@ -57,27 +55,25 @@ test "implicit comptime switch" { test "switch on enum" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const fruit = Fruit.Orange; - nonConstSwitchOnEnum(fruit); + try expect(nonConstSwitchOnEnum(fruit)); } const Fruit = enum { Apple, Orange, Banana, }; -fn nonConstSwitchOnEnum(fruit: Fruit) void { - switch (fruit) { - Fruit.Apple => unreachable, - Fruit.Orange => {}, - Fruit.Banana => unreachable, - } +fn nonConstSwitchOnEnum(fruit: Fruit) bool { + return switch (fruit) { + Fruit.Apple => false, + Fruit.Orange => true, + Fruit.Banana => false, + }; } test "switch statement" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try nonConstSwitch(SwitchStatementFoo.C); } @@ -94,7 +90,6 @@ const SwitchStatementFoo = enum { A, B, C, D }; test "switch with multiple expressions" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x = switch (returnsFive()) { 1, 2, 3 => 1, @@ -123,7 +118,6 @@ fn trueIfBoolFalseOtherwise(comptime T: type) bool { test "switching on booleans" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSwitchOnBools(); try comptime testSwitchOnBools(); @@ -179,7 +173,6 @@ test "undefined.u0" { test "switch with disjoint range" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var q: u8 = 0; _ = &q; @@ -191,8 +184,6 @@ test "switch with disjoint range" { } test "switch variable for range and multiple prongs" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { try doTheSwitch(16); @@ -224,7 +215,6 @@ fn poll() void { test "switch on global mutable var isn't constant-folded" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; while (state < 2) { poll(); @@ -286,7 +276,6 @@ fn testSwitchEnumPtrCapture() !void { test "switch handles all cases of number" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSwitchHandleAllCases(); try comptime testSwitchHandleAllCases(); @@ -382,7 +371,6 @@ test "anon enum literal used in switch on union enum" { test "switch all prongs unreachable" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testAllProngsUnreachable(); try comptime testAllProngsUnreachable(); @@ -406,7 +394,6 @@ fn switchWithUnreachable(x: i32) i32 { test "capture value of switch with all unreachable prongs" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x = return_a_number() catch |err| switch (err) { else => unreachable, @@ -420,7 +407,6 @@ fn return_a_number() anyerror!i32 { test "switch on integer with else capturing expr" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -442,7 +428,6 @@ test "else prong of switch on error set excludes other cases" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -478,7 +463,6 @@ test "switch prongs with error set cases make a new error set type for capture v if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -513,7 +497,6 @@ test "switch prongs with error set cases make a new error set type for capture v test "return result loc and then switch with range implicit casted to error union" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -662,7 +645,6 @@ test "switch prong pointer capture alignment" { test "switch on pointer type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const X = struct { @@ -735,7 +717,6 @@ test "switch capture copies its payload" { test "capture of integer forwards the switch condition directly" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(x: u8) !void { @@ -757,7 +738,6 @@ test "capture of integer forwards the switch condition directly" { test "enum value without tag name used as switch item" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = enum(u32) { a = 1, @@ -775,8 +755,6 @@ test "enum value without tag name used as switch item" { } test "switch item sizeof" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { var a: usize = 0; @@ -873,8 +851,6 @@ test "switch pointer capture peer type resolution" { } test "inline switch range that includes the maximum value of the switched type" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const inputs: [3]u8 = .{ 0, 254, 255 }; for (inputs) |input| { switch (input) { @@ -933,7 +909,6 @@ test "peer type resolution on switch captures ignores unused payload bits" { test "switch prong captures range" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn a(b: []u3, c: u3) void { @@ -970,8 +945,6 @@ test "prong with inline call to unreachable" { } test "block error return trace index is reset between prongs" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn returnError() error{TestFailed} { return error.TestFailed; diff --git a/test/behavior/this.zig b/test/behavior/this.zig index 330f9a714d..3638168a4f 100644 --- a/test/behavior/this.zig +++ b/test/behavior/this.zig @@ -51,7 +51,6 @@ test "this used as optional function parameter" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var global: State = undefined; global.enter = prev; diff --git a/test/behavior/try.zig b/test/behavior/try.zig index e8ab96e5c9..cc76658e93 100644 --- a/test/behavior/try.zig +++ b/test/behavior/try.zig @@ -4,7 +4,6 @@ const expect = std.testing.expect; test "try on error union" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try tryOnErrorUnionImpl(); try comptime tryOnErrorUnionImpl(); @@ -52,7 +51,6 @@ test "`try`ing an if/else expression" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn getError() !void { diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig index 736bbad806..142768454d 100644 --- a/test/behavior/tuple.zig +++ b/test/behavior/tuple.zig @@ -10,7 +10,6 @@ test "tuple concatenation" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -56,7 +55,6 @@ test "more tuple concatenation" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = struct { fn consume_tuple(tuple: anytype, len: usize) !void { @@ -326,8 +324,6 @@ test "tuple type with void field" { } test "zero sized struct in tuple handled correctly" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const State = struct { const Self = @This(); data: @Type(.{ @@ -369,7 +365,6 @@ test "branching inside tuple literal" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(a: anytype) !void { @@ -474,7 +469,6 @@ test "coerce anon tuple to tuple" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u8 = 1; var y: u16 = 2; @@ -579,8 +573,6 @@ test "comptime fields in tuple can be initialized" { } test "tuple default values" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const T = struct { usize, usize = 123, diff --git a/test/behavior/type.zig b/test/behavior/type.zig index 6150a490cf..1a36f576f1 100644 --- a/test/behavior/type.zig +++ b/test/behavior/type.zig @@ -203,7 +203,6 @@ test "Type.Opaque" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Opaque = @Type(.{ .Opaque = .{ @@ -261,7 +260,6 @@ test "Type.Struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = @Type(@typeInfo(struct { x: u8, y: u32 })); const infoA = @typeInfo(A).Struct; diff --git a/test/behavior/undefined.zig b/test/behavior/undefined.zig index bc613585d3..8d31c95ea5 100644 --- a/test/behavior/undefined.zig +++ b/test/behavior/undefined.zig @@ -91,7 +91,6 @@ test "reslice of undefined global var slice" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var stack_buf: [100]u8 = [_]u8{0} ** 100; buf = &stack_buf; diff --git a/test/behavior/underscore.zig b/test/behavior/underscore.zig index a53fec489b..66b49e52d5 100644 --- a/test/behavior/underscore.zig +++ b/test/behavior/underscore.zig @@ -8,7 +8,6 @@ test "ignore lval with underscore" { test "ignore lval with underscore (while loop)" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; while (optionalReturnError()) |_| { while (optionalReturnError()) |_| { diff --git a/test/behavior/union.zig b/test/behavior/union.zig index 004774bd17..d2009f57df 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -418,7 +418,6 @@ test "tagged union initialization with runtime void" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(testTaggedUnionInit({})); } @@ -1744,7 +1743,6 @@ test "union with 128 bit integer" { test "memset extern union" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = extern union { foo: u8, @@ -1766,7 +1764,6 @@ test "memset extern union" { test "memset packed union" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = packed union { a: u32, @@ -1977,8 +1974,6 @@ test "reinterpret packed union inside packed struct" { } test "inner struct initializer uses union layout" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const namespace = struct { const U = union { a: struct { @@ -2004,7 +1999,6 @@ test "inner struct initializer uses union layout" { test "inner struct initializer uses packed union layout" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const namespace = struct { const U = packed union { @@ -2031,7 +2025,6 @@ test "inner struct initializer uses packed union layout" { test "extern union initialized via reintepreted struct field initializer" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd }; @@ -2155,7 +2148,6 @@ test "pass register-sized field as non-register-sized union" { test "circular dependency through pointer field of a union" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const UnionInner = extern struct { diff --git a/test/behavior/var_args.zig b/test/behavior/var_args.zig index a3d4f09d2e..8445df14d0 100644 --- a/test/behavior/var_args.zig +++ b/test/behavior/var_args.zig @@ -14,8 +14,6 @@ fn add(args: anytype) i32 { } test "add arbitrary args" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try expect(add(.{ @as(i32, 1), @as(i32, 2), @as(i32, 3), @as(i32, 4) }) == 10); try expect(add(.{@as(i32, 1234)}) == 1234); try expect(add(.{}) == 0); @@ -26,15 +24,12 @@ fn readFirstVarArg(args: anytype) void { } test "send void arg to var args" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - readFirstVarArg(.{{}}); } test "pass args directly" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(addSomeStuff(.{ @as(i32, 1), @as(i32, 2), @as(i32, 3), @as(i32, 4) }) == 10); try expect(addSomeStuff(.{@as(i32, 1234)}) == 1234); @@ -48,7 +43,6 @@ fn addSomeStuff(args: anytype) i32 { test "runtime parameter before var args" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect((try extraFn(10, .{})) == 0); try expect((try extraFn(10, .{false})) == 1); @@ -87,15 +81,11 @@ fn foo2(args: anytype) bool { } test "array of var args functions" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try expect(foos[0](.{})); try expect(!foos[1](.{})); } test "pass zero length array to var args param" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - doNothingWithFirstArg(.{""}); } diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 2e860e1001..5a4da799c2 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -166,7 +166,6 @@ test "array to vector" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -434,6 +433,7 @@ test "load vector elements via runtime index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { diff --git a/test/behavior/void.zig b/test/behavior/void.zig index 5c4215b870..26d7a4e4c7 100644 --- a/test/behavior/void.zig +++ b/test/behavior/void.zig @@ -37,7 +37,6 @@ test "void optional" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: ?void = {}; _ = &x; diff --git a/test/behavior/while.zig b/test/behavior/while.zig index e1e5ebbfb3..71641ea265 100644 --- a/test/behavior/while.zig +++ b/test/behavior/while.zig @@ -258,7 +258,6 @@ fn returnWithImplicitCastFromWhileLoopTest() anyerror!void { test "while on error union with else result follow else prong" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const result = while (returnError()) |value| { break value; @@ -268,7 +267,6 @@ test "while on error union with else result follow else prong" { test "while on error union with else result follow break prong" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const result = while (returnSuccess(10)) |value| { break value; @@ -315,7 +313,6 @@ test "while error 2 break statements and an else" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry(opt_t: anyerror!bool, f: bool) !void { @@ -382,7 +379,6 @@ test "while loop with comptime true condition needs no else block to return valu test "int returned from switch in while" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u32 = 3; const val: usize = while (true) switch (x) { diff --git a/test/behavior/widening.zig b/test/behavior/widening.zig index 16f97550b5..5033d76313 100644 --- a/test/behavior/widening.zig +++ b/test/behavior/widening.zig @@ -32,7 +32,6 @@ test "implicit unsigned integer to signed integer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u8 = 250; var b: i16 = a; @@ -80,7 +79,6 @@ test "cast small unsigned to larger signed" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(castSmallUnsignedToLargerSigned1(200) == @as(i16, 200)); try expect(castSmallUnsignedToLargerSigned2(9999) == @as(i64, 9999));