From dceff2592f6a6305770916499c688071563ddf0d Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 10 Mar 2024 19:06:37 -0700 Subject: [PATCH 01/44] riscv: initial cleanup and work --- lib/std/builtin.zig | 8 +- src/arch/riscv64/CodeGen.zig | 734 ++++++++++++++++++++++++----------- src/arch/riscv64/Emit.zig | 193 ++++++++- src/arch/riscv64/Mir.zig | 111 +++++- 4 files changed, 784 insertions(+), 262 deletions(-) diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 6354cb261a..e0e0edf906 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -759,6 +759,13 @@ else pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr: ?usize) noreturn { @setCold(true); + // stage2_riscv64 backend doesn't support loops yet. + if (builtin.zig_backend == .stage2_riscv64 or + builtin.cpu.arch == .riscv64) + { + unreachable; + } + // For backends that cannot handle the language features depended on by the // default panic handler, we have a simpler panic handler: if (builtin.zig_backend == .stage2_wasm or @@ -766,7 +773,6 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr builtin.zig_backend == .stage2_aarch64 or builtin.zig_backend == .stage2_x86 or (builtin.zig_backend == .stage2_x86_64 and (builtin.target.ofmt != .elf and builtin.target.ofmt != .macho)) or - builtin.zig_backend == .stage2_riscv64 or builtin.zig_backend == .stage2_sparc64 or builtin.zig_backend == .stage2_spirv64) { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 5abe3afcfd..de78dd75dc 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -33,7 +33,6 @@ const abi = @import("abi.zig"); const Register = bits.Register; const RegisterManager = abi.RegisterManager; const RegisterLock = RegisterManager.RegisterLock; -const Instruction = abi.Instruction; const callee_preserved_regs = abi.callee_preserved_regs; const gp = abi.RegisterClass.gp; @@ -96,6 +95,8 @@ air_bookkeeping: @TypeOf(air_bookkeeping_init) = air_bookkeeping_init, const air_bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {}; +const SymbolOffset = struct { sym: u32, off: i32 = 0 }; + const MCValue = union(enum) { /// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc. /// TODO Look into deleting this tag and using `dead` instead, since every use @@ -110,6 +111,9 @@ const MCValue = union(enum) { /// A pointer-sized integer that fits in a register. /// If the type is a pointer, this is the pointer address in virtual address space. immediate: u64, + /// The value is in memory at an address not-yet-allocated by the linker. + /// This traditionally corresponds to a relocation emitted in a relocatable object file. + load_symbol: SymbolOffset, /// The value is in a target-specific register. register: Register, /// The value is in memory at a hard-coded address. @@ -145,6 +149,7 @@ const MCValue = union(enum) { .memory, .ptr_stack_offset, .undef, + .load_symbol, => false, .register, @@ -165,12 +170,12 @@ const Branch = struct { const StackAllocation = struct { inst: Air.Inst.Index, - /// TODO do we need size? should be determined by inst.ty.abiSize() + /// TODO: make the size inferred from the bits of the inst size: u32, }; const BlockData = struct { - relocs: std.ArrayListUnmanaged(Reloc), + relocs: std.ArrayListUnmanaged(Mir.Inst.Index), /// The first break instruction encounters `null` here and chooses a /// machine code value for the block result, populating this field. /// Following break instructions encounter that value and use it for @@ -178,18 +183,6 @@ const BlockData = struct { mcv: MCValue, }; -const Reloc = union(enum) { - /// The value is an offset into the `Function` `code` from the beginning. - /// To perform the reloc, write 32-bit signed little-endian integer - /// which is a relative jump, based on the address following the reloc. - rel32: usize, - /// A branch in the ARM instruction set - arm_branch: struct { - pos: usize, - cond: @import("../arm/bits.zig").Condition, - }, -}; - const BigTomb = struct { function: *Self, inst: Air.Inst.Index, @@ -272,6 +265,7 @@ pub fn generate( }, else => |e| return e, }; + defer call_info.deinit(&function); function.args = call_info.args; @@ -328,6 +322,13 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { return result_index; } +fn addNop(self: *Self) error{OutOfMemory}!Mir.Inst.Index { + return try self.addInst(.{ + .tag = .nop, + .data = .{ .nop = {} }, + }); +} + pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 { const fields = std.meta.fields(@TypeOf(extra)); try self.mir_extra.ensureUnusedCapacity(self.gpa, fields.len); @@ -350,115 +351,45 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { fn gen(self: *Self) !void { const mod = self.bin_file.comp.module.?; const cc = self.fn_type.fnCallingConvention(mod); - if (cc != .Naked) { - // TODO Finish function prologue and epilogue for riscv64. - // TODO Backpatch stack offset - // addi sp, sp, -16 - _ = try self.addInst(.{ - .tag = .addi, - .data = .{ .i_type = .{ - .rd = .sp, - .rs1 = .sp, - .imm12 = -16, - } }, - }); + if (cc == .Naked) return self.fail("TODO: gen support callconv(.{s})", .{@tagName(cc)}); - // sd ra, 8(sp) - _ = try self.addInst(.{ - .tag = .sd, - .data = .{ .i_type = .{ - .rd = .ra, - .rs1 = .sp, - .imm12 = 8, - } }, - }); + _ = try self.addInst(.{ + .tag = .psuedo_prologue, + .data = .{ .imm12 = 0 }, // Backpatched later. + }); - // sd s0, 0(sp) - _ = try self.addInst(.{ - .tag = .sd, - .data = .{ .i_type = .{ - .rd = .s0, - .rs1 = .sp, - .imm12 = 0, - } }, - }); + _ = try self.addInst(.{ + .tag = .dbg_prologue_end, + .data = .{ .nop = {} }, + }); - _ = try self.addInst(.{ - .tag = .dbg_prologue_end, - .data = .{ .nop = {} }, - }); + try self.genBody(self.air.getMainBody()); - try self.genBody(self.air.getMainBody()); + // Backpatch prologue stack size + if (math.cast(i12, self.max_end_stack)) |casted_stack_size| { + self.mir_instructions.items(.data)[0].imm12 = casted_stack_size; + } else return self.fail("TODO support larger stack sizes, got {}", .{self.max_end_stack}); - _ = try self.addInst(.{ - .tag = .dbg_epilogue_begin, - .data = .{ .nop = {} }, - }); + _ = try self.addInst(.{ + .tag = .dbg_epilogue_begin, + .data = .{ .nop = {} }, + }); - // exitlude jumps - if (self.exitlude_jump_relocs.items.len > 0 and - self.exitlude_jump_relocs.items[self.exitlude_jump_relocs.items.len - 1] == self.mir_instructions.len - 2) - { - // If the last Mir instruction (apart from the - // dbg_epilogue_begin) is the last exitlude jump - // relocation (which would just jump one instruction - // further), it can be safely removed - self.mir_instructions.orderedRemove(self.exitlude_jump_relocs.pop()); - } + // exitlude jumps + if (self.exitlude_jump_relocs.items.len > 0 and + self.exitlude_jump_relocs.items[self.exitlude_jump_relocs.items.len - 1] == self.mir_instructions.len - 2) + { + // If the last Mir instruction (apart from the + // dbg_epilogue_begin) is the last exitlude jump + // relocation (which would just jump one instruction + // further), it can be safely removed + self.mir_instructions.orderedRemove(self.exitlude_jump_relocs.pop()); + } - for (self.exitlude_jump_relocs.items) |jmp_reloc| { - _ = jmp_reloc; - return self.fail("TODO add branches in RISCV64", .{}); - } - - // ld ra, 8(sp) - _ = try self.addInst(.{ - .tag = .ld, - .data = .{ .i_type = .{ - .rd = .ra, - .rs1 = .sp, - .imm12 = 8, - } }, - }); - - // ld s0, 0(sp) - _ = try self.addInst(.{ - .tag = .ld, - .data = .{ .i_type = .{ - .rd = .s0, - .rs1 = .sp, - .imm12 = 0, - } }, - }); - - // addi sp, sp, 16 - _ = try self.addInst(.{ - .tag = .addi, - .data = .{ .i_type = .{ - .rd = .sp, - .rs1 = .sp, - .imm12 = 16, - } }, - }); - - // ret - _ = try self.addInst(.{ - .tag = .ret, - .data = .{ .nop = {} }, - }); - } else { - _ = try self.addInst(.{ - .tag = .dbg_prologue_end, - .data = .{ .nop = {} }, - }); - - try self.genBody(self.air.getMainBody()); - - _ = try self.addInst(.{ - .tag = .dbg_epilogue_begin, - .data = .{ .nop = {} }, - }); + for (self.exitlude_jump_relocs.items) |jmp_reloc| { + _ = jmp_reloc; + return self.fail("TODO add branches in RISCV64", .{}); } // Drop them off at the rbrace. @@ -535,12 +466,12 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), - .cmp_lt => try self.airCmp(inst, .lt), - .cmp_lte => try self.airCmp(inst, .lte), - .cmp_eq => try self.airCmp(inst, .eq), - .cmp_gte => try self.airCmp(inst, .gte), - .cmp_gt => try self.airCmp(inst, .gt), - .cmp_neq => try self.airCmp(inst, .neq), + .cmp_lt => try self.airCmp(inst), + .cmp_lte => try self.airCmp(inst), + .cmp_eq => try self.airCmp(inst), + .cmp_gte => try self.airCmp(inst), + .cmp_gt => try self.airCmp(inst), + .cmp_neq => try self.airCmp(inst), .cmp_vector => try self.airCmpVector(inst), .cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst), @@ -565,6 +496,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .frame_addr => try self.airFrameAddress(inst), .fence => try self.airFence(), .cond_br => try self.airCondBr(inst), + .dbg_stmt => try self.airDbgStmt(inst), .fptrunc => try self.airFptrunc(inst), .fpext => try self.airFpext(inst), .intcast => try self.airIntCast(inst), @@ -617,17 +549,17 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .union_init => try self.airUnionInit(inst), .prefetch => try self.airPrefetch(inst), .mul_add => try self.airMulAdd(inst), - .addrspace_cast => @panic("TODO"), + .addrspace_cast => return self.fail("TODO: addrspace_cast", .{}), - .@"try" => @panic("TODO"), - .try_ptr => @panic("TODO"), + .@"try" => return self.fail("TODO: try", .{}), + .try_ptr => return self.fail("TODO: try_ptr", .{}), - .dbg_stmt => try self.airDbgStmt(inst), - .dbg_inline_block => try self.airDbgInlineBlock(inst), .dbg_var_ptr, .dbg_var_val, => try self.airDbgVar(inst), + .dbg_inline_block => try self.airDbgInlineBlock(inst), + .call => try self.airCall(inst, .auto), .call_always_tail => try self.airCall(inst, .always_tail), .call_never_tail => try self.airCall(inst, .never_tail), @@ -1019,17 +951,20 @@ fn binOpRegister( const mir_tag: Mir.Inst.Tag = switch (tag) { .add => .add, .sub => .sub, - else => unreachable, + .cmp_eq => .cmp_eq, + .cmp_gt => .cmp_gt, + else => return self.fail("TODO: binOpRegister {s}", .{@tagName(tag)}), }; const mir_data: Mir.Inst.Data = switch (tag) { .add, .sub, + .cmp_eq, => .{ .r_type = .{ .rd = dest_reg, .rs1 = lhs_reg, .rs2 = rhs_reg, } }, - else => unreachable, + else => return self.fail("TODO: binOpRegister {s}", .{@tagName(tag)}), }; _ = try self.addInst(.{ @@ -1052,6 +987,8 @@ fn binOpRegister( /// looks at the lhs and rhs and determines which kind of lowering /// would be best suitable and then delegates the lowering to other /// functions. +/// +/// `maybe_inst` **needs** to be a bin_op, make sure of that. fn binOp( self: *Self, tag: Air.Inst.Tag, @@ -1066,6 +1003,12 @@ fn binOp( // Arithmetic operations on integers and floats .add, .sub, + .cmp_eq, + .cmp_neq, + .cmp_gt, + .cmp_gte, + .cmp_lt, + .cmp_lte, => { switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO binary operations on floats", .{}), @@ -1180,8 +1123,19 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { } fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { - _ = inst; - return self.fail("TODO implement airAddWithOverflow for {}", .{self.target.cpu.arch}); + const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const lhs = try self.resolveInst(extra.lhs); + const rhs = try self.resolveInst(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); + + break :result try self.binOp(.add, null, lhs, rhs, lhs_ty, rhs_ty); + }; + + return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { @@ -1352,13 +1306,30 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice_ptr for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mcv = try self.resolveInst(ty_op.operand); + break :result try self.slicePtr(mcv); + }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } +fn slicePtr(self: *Self, mcv: MCValue) !MCValue { + switch (mcv) { + .dead, .unreach, .none => unreachable, + .register => unreachable, // a slice doesn't fit in one register + .stack_offset => |off| { + return MCValue{ .stack_offset = off }; + }, + .memory => |addr| { + return MCValue{ .memory = addr }; + }, + else => return self.fail("TODO slicePtr {s}", .{@tagName(mcv)}), + } +} + fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice_len for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSliceLen for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } @@ -1500,6 +1471,7 @@ fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_ind fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { const mod = self.bin_file.comp.module.?; const elem_ty = ptr_ty.childType(mod); + switch (ptr) { .none => unreachable, .undef => unreachable, @@ -1507,9 +1479,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo .dead => unreachable, .immediate => |imm| try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm }), .ptr_stack_offset => |off| try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off }), - .register => { - return self.fail("TODO implement loading from MCValue.register", .{}); - }, + .register => |src_reg| try self.setRegOrMem(elem_ty, dst_mcv, .{ .register = src_reg }), .memory, .stack_offset, => { @@ -1520,6 +1490,10 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo try self.genSetReg(ptr_ty, reg, ptr); try self.load(dst_mcv, .{ .register = reg }, ptr_ty); }, + .load_symbol => { + const reg = try self.copyToTmpRegister(ptr_ty, ptr); + try self.load(dst_mcv, .{ .register = reg }, ptr_ty); + }, } } @@ -1553,6 +1527,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) !void { _ = ptr_ty; + log.debug("storing {s}", .{@tagName(ptr)}); + switch (ptr) { .none => unreachable, .undef => unreachable, @@ -1573,6 +1549,9 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type .stack_offset => { return self.fail("TODO implement storing to MCValue.stack_offset", .{}); }, + .load_symbol => { + return self.fail("TODO implement storing to MCValue.load_symbol", .{}); + }, } } @@ -1596,27 +1575,32 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; - return self.structFieldPtr(extra.struct_operand, ty_pl.ty, extra.field_index); + const result = try self.structFieldPtr(inst, extra.struct_operand, ty_pl.ty, extra.field_index); + return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); } fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - return self.structFieldPtr(ty_op.operand, ty_op.ty, index); + const result = try self.structFieldPtr(inst, ty_op.operand, ty_op.ty, index); + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn structFieldPtr(self: *Self, operand: Air.Inst.Ref, ty: Air.Inst.Ref, index: u32) !void { + +fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Air.Inst.Ref, index: u32) !MCValue { + _ = inst; _ = operand; _ = ty; _ = index; - return self.fail("TODO implement codegen struct_field_ptr", .{}); - //return self.finishAir(inst, result, .{ extra.struct_ptr, .none, .none }); + + return self.fail("TODO: structFieldPtr", .{}); } fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; - _ = extra; - return self.fail("TODO implement codegen struct_field_val", .{}); - //return self.finishAir(inst, result, .{ extra.struct_ptr, .none, .none }); + _ = ty_pl; + + return self.fail("TODO: airStructFieldVal", .{}); + + // return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { @@ -1732,12 +1716,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier try self.register_manager.getReg(reg, null); try self.genSetReg(arg_ty, reg, arg_mcv); }, - .stack_offset => { - return self.fail("TODO implement calling with parameters in memory", .{}); - }, + .stack_offset => |off| try self.genSetStack(arg_ty, off, arg_mcv), .ptr_stack_offset => { return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); }, + .load_symbol => { + return self.fail("TODO implement calling with MCValue.load_symbol", .{}); + }, } } @@ -1747,7 +1732,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func.owner_decl); const sym = elf_file.symbol(sym_index); _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); - const got_addr: u32 = @intCast(sym.zigGotAddress(elf_file)); + const got_addr = sym.zigGotAddress(elf_file); try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr }); _ = try self.addInst(.{ .tag = .jalr, @@ -1830,7 +1815,8 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { //return self.finishAir(inst, .dead, .{ un_op, .none, .none }); } -fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { +fn airCmp(self: *Self, inst: Air.Inst.Index) !void { + const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1842,12 +1828,12 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - _ = op; - _ = lhs; - _ = rhs; + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); - return self.fail("TODO implement cmp for {}", .{self.target.cpu.arch}); - // return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + const result = try self.binOp(tag, null, lhs, rhs, lhs_ty, rhs_ty); + + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void { @@ -1878,13 +1864,11 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload); - const func = mod.funcInfo(extra.data.func); - // TODO emit debug info for function change - _ = func; - try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len])); + _ = extra; + // TODO: emit debug info for this block + return self.finishAir(inst, .dead, .{ .none, .none, .none }); } fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { @@ -1897,10 +1881,165 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { } fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { - _ = inst; + const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const cond = try self.resolveInst(pl_op.operand); + const cond_ty = self.typeOf(pl_op.operand); + const extra = self.air.extraData(Air.CondBr, pl_op.payload); + const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.then_body_len]); + const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]); + const liveness_condbr = self.liveness.getCondBr(inst); - return self.fail("TODO implement condbr {}", .{self.target.cpu.arch}); - // return self.finishAir(inst, .unreach, .{ pl_op.operand, .none, .none }); + // A branch to the false section. Uses beq + const reloc = try self.condBr(cond_ty, cond); + + // If the condition dies here in this condbr instruction, process + // that death now instead of later as this has an effect on + // whether it needs to be spilled in the branches + if (self.liveness.operandDies(inst, 0)) { + if (pl_op.operand.toIndex()) |op_index| { + self.processDeath(op_index); + } + } + + // Save state + const parent_next_stack_offset = self.next_stack_offset; + const parent_free_registers = self.register_manager.free_registers; + var parent_stack = try self.stack.clone(self.gpa); + defer parent_stack.deinit(self.gpa); + const parent_registers = self.register_manager.registers; + + try self.branch_stack.append(.{}); + errdefer { + _ = self.branch_stack.pop(); + } + + try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len); + for (liveness_condbr.then_deaths) |operand| { + self.processDeath(operand); + } + try self.genBody(then_body); + + // Revert to the previous register and stack allocation state. + + var saved_then_branch = self.branch_stack.pop(); + defer saved_then_branch.deinit(self.gpa); + + self.register_manager.registers = parent_registers; + + self.stack.deinit(self.gpa); + self.stack = parent_stack; + parent_stack = .{}; + + self.next_stack_offset = parent_next_stack_offset; + self.register_manager.free_registers = parent_free_registers; + + try self.performReloc(reloc); + const else_branch = self.branch_stack.addOneAssumeCapacity(); + else_branch.* = .{}; + + try self.ensureProcessDeathCapacity(liveness_condbr.else_deaths.len); + for (liveness_condbr.else_deaths) |operand| { + self.processDeath(operand); + } + try self.genBody(else_body); + + // At this point, each branch will possibly have conflicting values for where + // each instruction is stored. They agree, however, on which instructions are alive/dead. + // We use the first ("then") branch as canonical, and here emit + // instructions into the second ("else") branch to make it conform. + // We continue respect the data structure semantic guarantees of the else_branch so + // that we can use all the code emitting abstractions. This is why at the bottom we + // assert that parent_branch.free_registers equals the saved_then_branch.free_registers + // rather than assigning it. + const parent_branch = &self.branch_stack.items[self.branch_stack.items.len - 2]; + try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, else_branch.inst_table.count()); + const else_slice = else_branch.inst_table.entries.slice(); + const else_keys = else_slice.items(.key); + const else_values = else_slice.items(.value); + for (else_keys, 0..) |else_key, else_idx| { + const else_value = else_values[else_idx]; + const canon_mcv = if (saved_then_branch.inst_table.fetchSwapRemove(else_key)) |then_entry| blk: { + // The instruction's MCValue is overridden in both branches. + log.debug("condBr put branch table (key = %{d}, value = {})", .{ else_key, then_entry.value }); + parent_branch.inst_table.putAssumeCapacity(else_key, then_entry.value); + if (else_value == .dead) { + assert(then_entry.value == .dead); + continue; + } + break :blk then_entry.value; + } else blk: { + if (else_value == .dead) + continue; + // The instruction is only overridden in the else branch. + var i: usize = self.branch_stack.items.len - 2; + while (true) { + i -= 1; // If this overflows, the question is: why wasn't the instruction marked dead? + if (self.branch_stack.items[i].inst_table.get(else_key)) |mcv| { + assert(mcv != .dead); + break :blk mcv; + } + } + }; + log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); + // TODO make sure the destination stack offset / register does not already have something + // going on there. + try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value); + // TODO track the new register / stack allocation + } + try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); + const then_slice = saved_then_branch.inst_table.entries.slice(); + const then_keys = then_slice.items(.key); + const then_values = then_slice.items(.value); + for (then_keys, 0..) |then_key, then_idx| { + const then_value = then_values[then_idx]; + // We already deleted the items from this table that matched the else_branch. + // So these are all instructions that are only overridden in the then branch. + parent_branch.inst_table.putAssumeCapacity(then_key, then_value); + if (then_value == .dead) + continue; + const parent_mcv = blk: { + var i: usize = self.branch_stack.items.len - 2; + while (true) { + i -= 1; + if (self.branch_stack.items[i].inst_table.get(then_key)) |mcv| { + assert(mcv != .dead); + break :blk mcv; + } + } + }; + log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); + // TODO make sure the destination stack offset / register does not already have something + // going on there. + try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value); + // TODO track the new register / stack allocation + } + + { + var item = self.branch_stack.pop(); + item.deinit(self.gpa); + } + + return self.finishAir(inst, .unreach, .{ .none, .none, .none }); +} + +fn condBr(self: *Self, cond_ty: Type, condition: MCValue) !Mir.Inst.Index { + _ = cond_ty; + + const reg = switch (condition) { + .register => |r| r, + else => try self.copyToTmpRegister(Type.bool, condition), + }; + + return try self.addInst(.{ + .tag = .beq, + .data = .{ + .b_type = .{ + .rs1 = reg, + .rs2 = .zero, + .imm12 = 0, // patched later. + }, + }, + }); } fn isNull(self: *Self, operand: MCValue) !MCValue { @@ -2044,25 +2183,26 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]); - const start_index = self.code.items.len; + + const start_index: Mir.Inst.Index = @intCast(self.code.items.len); + try self.genBody(body); try self.jump(start_index); + return self.finishAirBookkeeping(); } /// Send control flow to the `index` of `self.code`. -fn jump(self: *Self, index: usize) !void { - _ = index; - return self.fail("TODO implement jump for {}", .{self.target.cpu.arch}); +fn jump(self: *Self, index: Mir.Inst.Index) !void { + _ = try self.addInst(.{ + .tag = .psuedo_jump, + .data = .{ + .inst = index, + }, + }); } fn airBlock(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Block, ty_pl.payload); - try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len])); -} - -fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) !void { try self.blocks.putNoClobber(self.gpa, inst, .{ // A block is a setup to be able to jump to the end. .relocs = .{}, @@ -2074,10 +2214,16 @@ fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) ! .mcv = MCValue{ .none = {} }, }); defer self.blocks.getPtr(inst).?.relocs.deinit(self.gpa); + + const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = self.air.extraData(Air.Block, ty_pl.payload); + const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]); // TODO emit debug info lexical block try self.genBody(body); - for (self.blocks.getPtr(inst).?.relocs.items) |reloc| try self.performReloc(reloc); + for (self.blocks.getPtr(inst).?.relocs.items) |reloc| { + try self.performReloc(reloc); + } const result = self.blocks.getPtr(inst).?.mcv; return self.finishAir(inst, result, .{ .none, .none, .none }); @@ -2091,11 +2237,12 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { // return self.finishAir(inst, .dead, .{ condition, .none, .none }); } -fn performReloc(self: *Self, reloc: Reloc) !void { - _ = self; - switch (reloc) { - .rel32 => unreachable, - .arm_branch => unreachable, +fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { + const tag = self.mir_instructions.items(.tag)[inst]; + + switch (tag) { + .beq => self.mir_instructions.items(.data)[inst].b_type.imm12 = @intCast(inst), + else => return self.fail("TODO: performReloc {s}", .{@tagName(tag)}), } } @@ -2135,7 +2282,15 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void { // Emit a jump with a relocation. It will be patched up after the block ends. try block_data.relocs.ensureUnusedCapacity(self.gpa, 1); - return self.fail("TODO implement brvoid for {}", .{self.target.cpu.arch}); + block_data.relocs.appendAssumeCapacity(try self.addInst(.{ + .tag = .jal, + .data = .{ + .j_type = .{ + .rd = .ra, + .imm21 = undefined, // populated later through performReloc + }, + }, + })); } fn airAsm(self: *Self, inst: Air.Inst.Index) !void { @@ -2261,28 +2416,138 @@ fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigT /// Sets the value without any modifications to register allocation metadata or stack allocation metadata. fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { + if (!loc.isMutable()) { + return std.debug.panic("tried to setRegOrMem immutable: {s}", .{@tagName(loc)}); + } + switch (loc) { .none => return, .register => |reg| return self.genSetReg(ty, reg, val), .stack_offset => |off| return self.genSetStack(ty, off, val), - .memory => { - return self.fail("TODO implement setRegOrMem for memory", .{}); - }, - else => unreachable, + else => return self.fail("TODO: setRegOrMem {s}", .{@tagName(loc)}), } } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - _ = ty; - _ = stack_offset; - _ = mcv; - return self.fail("TODO implement getSetStack for {}", .{self.target.cpu.arch}); + const mod = self.bin_file.comp.module.?; + const abi_size: u32 = @intCast(ty.abiSize(mod)); + + switch (mcv) { + .none => return, + .dead => unreachable, + .immediate => { + const reg = try self.copyToTmpRegister(ty, mcv); + return self.genSetStack(ty, stack_offset, .{ .register = reg }); + }, + .register => |reg| { + switch (abi_size) { + 1, 2, 4, 8 => { + assert(std.mem.isAlignedGeneric(u32, stack_offset, abi_size)); + + const tag: Mir.Inst.Tag = switch (abi_size) { + 1 => .sb, + 2 => .sh, + 4 => .sw, + 8 => .sd, + else => unreachable, + }; + + _ = try self.addInst(.{ + .tag = tag, + .data = .{ .i_type = .{ + .rd = reg, + .rs1 = .sp, + .imm12 = @intCast(stack_offset), + } }, + }); + }, + else => return self.fail("TODO: genSetStack for size={d}", .{abi_size}), + } + }, + .stack_offset, .load_symbol => { + if (abi_size <= 8) { + const reg = try self.copyToTmpRegister(ty, mcv); + return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); + } + + const ptr_ty = try mod.singleMutPtrType(ty); + + // TODO call extern memcpy + const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); + const regs_locks = self.register_manager.lockRegsAssumeUnused(5, regs); + defer for (regs_locks) |reg| { + self.register_manager.unlockReg(reg); + }; + + const src_reg = regs[0]; + const dst_reg = regs[1]; + const len_reg = regs[2]; + const count_reg = regs[3]; + const tmp_reg = regs[4]; + + switch (mcv) { + .stack_offset => |offset| { + if (offset == stack_offset) return; + try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = offset }); + }, + .load_symbol => |sym_off| { + const atom_index = atom: { + const decl_index = mod.funcOwnerDeclIndex(self.func_index); + + if (self.bin_file.cast(link.File.Elf)) |elf_file| { + const atom_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, decl_index); + break :atom atom_index; + } else return self.fail("TODO genSetStack for {s}", .{@tagName(self.bin_file.tag)}); + }; + + _ = try self.addInst(.{ + .tag = .load_symbol, + .data = .{ + .payload = try self.addExtra(Mir.LoadSymbolPayload{ + .register = @intFromEnum(src_reg), + .atom_index = atom_index, + .sym_index = sym_off.sym, + }), + }, + }); + }, + else => return self.fail("TODO: genSetStack unreachable {s}", .{@tagName(mcv)}), + } + + try self.genSetReg(ptr_ty, dst_reg, .{ .ptr_stack_offset = stack_offset }); + try self.genSetReg(Type.usize, len_reg, .{ .immediate = abi_size }); + + // memcpy(src, dst, len) + try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg); + }, + else => return self.fail("TODO: genSetStack {s}", .{@tagName(mcv)}), + } +} + +fn genInlineMemcpy( + self: *Self, + src: Register, + dst: Register, + len: Register, + count: Register, + tmp: Register, +) !void { + _ = src; + _ = dst; + _ = len; + _ = count; + _ = tmp; + + return self.fail("TODO: genInlineMemcpy", .{}); } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + const mod = self.bin_file.comp.module.?; + const abi_size: u32 = @intCast(ty.abiSize(mod)); + switch (mcv) { .dead => unreachable, - .ptr_stack_offset => unreachable, + .ptr_stack_offset => return self.fail("TODO genSetReg ptr_stack_offset", .{}), .unreach, .none => return, // Nothing to do. .undef => { if (!self.wantSafety()) @@ -2343,8 +2608,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void }); }, .memory => |addr| { - // The value is in memory at a hard-coded address. - // If the type is a pointer, it means the pointer address is at this memory location. try self.genSetReg(ty, reg, .{ .immediate = addr }); _ = try self.addInst(.{ @@ -2355,11 +2618,51 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .imm12 = 0, } }, }); - // LOAD imm=[i12 offset = 0], rs1 = - // return self.fail("TODO implement genSetReg memory for riscv64"); + // LOAD imm=[i12 offset = 0], rs1 + }, + .stack_offset => |off| { + const tag: Mir.Inst.Tag = switch (abi_size) { + 1 => .lb, + 2 => .lh, + 4 => .lw, + 8 => .ld, + else => return self.fail("TODO: genSetReg for size {d}", .{abi_size}), + }; + + _ = try self.addInst(.{ + .tag = tag, + .data = .{ .i_type = .{ + .rd = reg, + .rs1 = .sp, + .imm12 = @intCast(off), + } }, + }); + }, + .load_symbol => |sym_off| { + assert(sym_off.off == 0); + + const decl_index = mod.funcOwnerDeclIndex(self.func_index); + + const atom_index = switch (self.bin_file.tag) { + .elf => blk: { + const elf_file = self.bin_file.cast(link.File.Elf).?; + const atom_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, decl_index); + break :blk atom_index; + }, + else => return self.fail("TODO genSetReg load_symbol for {s}", .{@tagName(self.bin_file.tag)}), + }; + _ = try self.addInst(.{ + .tag = .load_symbol, + .data = .{ + .payload = try self.addExtra(Mir.LoadSymbolPayload{ + .register = @intFromEnum(reg), + .atom_index = atom_index, + .sym_index = sym_off.sym, + }), + }, + }); }, - else => return self.fail("TODO implement getSetReg for riscv64 {}", .{mcv}), } } @@ -2579,9 +2882,12 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, - .load_got, .load_symbol, .load_direct, .load_tlv => unreachable, // TODO + .load_symbol => |sym_index| .{ .load_symbol = .{ .sym = sym_index } }, .immediate => |imm| .{ .immediate = imm }, .memory => |addr| .{ .memory = addr }, + .load_got, .load_direct, .load_tlv => { + return self.fail("TODO: genTypedValue {s}", .{@tagName(mcv)}); + }, }, .fail => |msg| { self.err_msg = msg; @@ -2634,41 +2940,17 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { // TODO make this generic with other ABIs, in particular // with different hardware floating-point calling // conventions - var next_register: usize = 0; - var next_stack_offset: u32 = 0; - // TODO: this is never assigned, which is a bug, but I don't know how this code works - // well enough to try and fix it. I *think* `next_register += next_stack_offset` is - // supposed to be `next_stack_offset += param_size` in every case where it appears. - _ = &next_stack_offset; - - const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 }; + var stack_offset: u32 = 0; for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| { - const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(mod)); - if (param_size <= 8) { - if (next_register < argument_registers.len) { - result_arg.* = .{ .register = argument_registers[next_register] }; - next_register += 1; - } else { - result_arg.* = .{ .stack_offset = next_stack_offset }; - next_register += next_stack_offset; - } - } else if (param_size <= 16) { - if (next_register < argument_registers.len - 1) { - return self.fail("TODO MCValues with 2 registers", .{}); - } else if (next_register < argument_registers.len) { - return self.fail("TODO MCValues split register + stack", .{}); - } else { - result_arg.* = .{ .stack_offset = next_stack_offset }; - next_register += next_stack_offset; - } - } else { - result_arg.* = .{ .stack_offset = next_stack_offset }; - next_register += next_stack_offset; - } + const param_type = Type.fromInterned(ty); + const param_size: u32 = @intCast(param_type.abiSize(mod)); + + result_arg.* = .{ .stack_offset = stack_offset }; + stack_offset += param_size; } - result.stack_byte_count = next_stack_offset; + result.stack_byte_count = stack_offset; result.stack_align = .@"16"; }, else => return self.fail("TODO implement function parameters for {} on riscv64", .{cc}), diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index f382f6f9eb..6e2b638b76 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -27,6 +27,8 @@ prev_di_column: u32, /// Relative to the beginning of `code`. prev_di_pc: usize, +const log = std.log.scoped(.emit); + const InnerError = error{ OutOfMemory, EmitFail, @@ -37,33 +39,57 @@ pub fn emitMir( ) InnerError!void { const mir_tags = emit.mir.instructions.items(.tag); + // TODO: compute branch offsets + // try emit.lowerMir(); + // Emit machine code for (mir_tags, 0..) |tag, index| { const inst = @as(u32, @intCast(index)); + log.debug("emitMir: {s}", .{@tagName(tag)}); switch (tag) { .add => try emit.mirRType(inst), .sub => try emit.mirRType(inst), + .cmp_eq => try emit.mirRType(inst), + .cmp_gt => try emit.mirRType(inst), + + .beq => try emit.mirBType(inst), + .bne => try emit.mirBType(inst), + .addi => try emit.mirIType(inst), .jalr => try emit.mirIType(inst), - .ld => try emit.mirIType(inst), - .sd => try emit.mirIType(inst), + + .jal => try emit.mirJType(inst), .ebreak => try emit.mirSystem(inst), .ecall => try emit.mirSystem(inst), .unimp => try emit.mirSystem(inst), .dbg_line => try emit.mirDbgLine(inst), - .dbg_prologue_end => try emit.mirDebugPrologueEnd(), .dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(), + .psuedo_prologue => try emit.mirPsuedo(inst), + .psuedo_jump => try emit.mirPsuedo(inst), + .mv => try emit.mirRR(inst), .nop => try emit.mirNop(inst), .ret => try emit.mirNop(inst), .lui => try emit.mirUType(inst), + + .ld => try emit.mirIType(inst), + .sd => try emit.mirIType(inst), + .lw => try emit.mirIType(inst), + .sw => try emit.mirIType(inst), + .lh => try emit.mirIType(inst), + .sh => try emit.mirIType(inst), + .lb => try emit.mirIType(inst), + .sb => try emit.mirIType(inst), + .ldr_ptr_stack => try emit.mirIType(inst), + + .load_symbol => try emit.mirLoadSymbol(inst), } } } @@ -86,15 +112,19 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { return error.EmitFail; } -fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { - const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(self.prev_di_line)); - const delta_pc: usize = self.code.items.len - self.prev_di_pc; - switch (self.debug_output) { +fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void { + log.debug("Line: {} {}\n", .{ line, emit.prev_di_line }); + const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line)); + const delta_pc: usize = emit.code.items.len - emit.prev_di_pc; + log.debug("(advance pc={d} and line={d})", .{ delta_pc, delta_line }); + switch (emit.debug_output) { .dwarf => |dw| { + if (column != emit.prev_di_column) try dw.setColumn(column); + if (delta_line == 0) return; // TODO: remove this try dw.advancePCAndLine(delta_line, delta_pc); - self.prev_di_line = line; - self.prev_di_column = column; - self.prev_di_pc = self.code.items.len; + emit.prev_di_line = line; + emit.prev_di_column = column; + emit.prev_di_pc = emit.code.items.len; }, .plan9 => |dbg_out| { if (delta_pc <= 0) return; // only do this when the pc changes @@ -113,12 +143,12 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { // we don't need to do anything, because adding the pc quanta does it for us } else unreachable; if (dbg_out.start_line == null) - dbg_out.start_line = self.prev_di_line; + dbg_out.start_line = emit.prev_di_line; dbg_out.end_line = line; // only do this if the pc changed - self.prev_di_line = line; - self.prev_di_column = column; - self.prev_di_pc = self.code.items.len; + emit.prev_di_line = line; + emit.prev_di_column = column; + emit.prev_di_pc = emit.code.items.len; }, .none => {}, } @@ -131,6 +161,19 @@ fn mirRType(emit: *Emit, inst: Mir.Inst.Index) !void { switch (tag) { .add => try emit.writeInstruction(Instruction.add(r_type.rd, r_type.rs1, r_type.rs2)), .sub => try emit.writeInstruction(Instruction.sub(r_type.rd, r_type.rs1, r_type.rs2)), + .cmp_eq => try emit.writeInstruction(Instruction.slt(r_type.rd, r_type.rs1, r_type.rs2)), + else => unreachable, + } +} + +fn mirBType(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const b_type = emit.mir.instructions.items(.data)[inst].b_type; + + // const inst = b_type.imm12; + + switch (tag) { + .beq => try emit.writeInstruction(Instruction.beq(b_type.rs1, b_type.rs2, b_type.imm12)), else => unreachable, } } @@ -142,8 +185,30 @@ fn mirIType(emit: *Emit, inst: Mir.Inst.Index) !void { switch (tag) { .addi => try emit.writeInstruction(Instruction.addi(i_type.rd, i_type.rs1, i_type.imm12)), .jalr => try emit.writeInstruction(Instruction.jalr(i_type.rd, i_type.imm12, i_type.rs1)), + .ld => try emit.writeInstruction(Instruction.ld(i_type.rd, i_type.imm12, i_type.rs1)), .sd => try emit.writeInstruction(Instruction.sd(i_type.rd, i_type.imm12, i_type.rs1)), + .lw => try emit.writeInstruction(Instruction.lw(i_type.rd, i_type.imm12, i_type.rs1)), + .sw => try emit.writeInstruction(Instruction.sw(i_type.rd, i_type.imm12, i_type.rs1)), + .lh => try emit.writeInstruction(Instruction.lh(i_type.rd, i_type.imm12, i_type.rs1)), + .sh => try emit.writeInstruction(Instruction.sh(i_type.rd, i_type.imm12, i_type.rs1)), + .lb => try emit.writeInstruction(Instruction.lb(i_type.rd, i_type.imm12, i_type.rs1)), + .sb => try emit.writeInstruction(Instruction.sb(i_type.rd, i_type.imm12, i_type.rs1)), + + .ldr_ptr_stack => try emit.writeInstruction(Instruction.add(i_type.rd, i_type.rs1, .sp)), + + else => unreachable, + } +} + +fn mirJType(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const j_type = emit.mir.instructions.items(.data)[inst].j_type; + + switch (tag) { + .jal => { + try emit.writeInstruction(Instruction.jal(j_type.rd, j_type.imm21)); + }, else => unreachable, } } @@ -169,28 +234,55 @@ fn mirDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void { } } -fn mirDebugPrologueEnd(self: *Emit) !void { - switch (self.debug_output) { +fn mirDebugPrologueEnd(emit: *Emit) !void { + switch (emit.debug_output) { .dwarf => |dw| { try dw.setPrologueEnd(); - try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column); + try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column); }, .plan9 => {}, .none => {}, } } -fn mirDebugEpilogueBegin(self: *Emit) !void { - switch (self.debug_output) { +fn mirDebugEpilogueBegin(emit: *Emit) !void { + switch (emit.debug_output) { .dwarf => |dw| { try dw.setEpilogueBegin(); - try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column); + try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column); }, .plan9 => {}, .none => {}, } } +fn mirPsuedo(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const data = emit.mir.instructions.items(.data)[inst]; + + switch (tag) { + .psuedo_prologue => { + const imm12 = data.imm12; + const stack_size: i12 = @max(32, imm12); + + try emit.writeInstruction(Instruction.addi(.sp, .sp, -stack_size)); + try emit.writeInstruction(Instruction.sd(.ra, stack_size - 8, .sp)); + try emit.writeInstruction(Instruction.sd(.s0, stack_size - 16, .sp)); + try emit.writeInstruction(Instruction.addi(.s0, .sp, stack_size)); + }, + + .psuedo_jump => { + const target = data.inst; + const offset: i12 = @intCast(emit.code.items.len); + _ = target; + + try emit.writeInstruction(Instruction.jal(.s0, offset)); + }, + + else => unreachable, + } +} + fn mirRR(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const rr = emit.mir.instructions.items(.data)[inst].rr; @@ -200,6 +292,7 @@ fn mirRR(emit: *Emit, inst: Mir.Inst.Index) !void { else => unreachable, } } + fn mirUType(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const u_type = emit.mir.instructions.items(.data)[inst].u_type; @@ -219,3 +312,63 @@ fn mirNop(emit: *Emit, inst: Mir.Inst.Index) !void { else => unreachable, } } + +fn mirLoadSymbol(emit: *Emit, inst: Mir.Inst.Index) !void { + // const tag = emit.mir.instructions.items(.tag)[inst]; + const payload = emit.mir.instructions.items(.data)[inst].payload; + const data = emit.mir.extraData(Mir.LoadSymbolPayload, payload).data; + const reg = @as(Register, @enumFromInt(data.register)); + + const end_offset = @as(u32, @intCast(emit.code.items.len)); + try emit.writeInstruction(Instruction.lui(reg, 0)); + try emit.writeInstruction(Instruction.lw(reg, 0, reg)); + + switch (emit.bin_file.tag) { + .elf => { + const elf_file = emit.bin_file.cast(link.File.Elf).?; + const atom_ptr = elf_file.symbol(data.atom_index).atom(elf_file).?; + + const hi_r_type = @intFromEnum(std.elf.R_RISCV.HI20); + + try atom_ptr.addReloc(elf_file, .{ + .r_offset = end_offset, + .r_info = (@as(u64, @intCast(data.sym_index)) << 32) | hi_r_type, + .r_addend = 0, + }); + + const lo_r_type = @intFromEnum(std.elf.R_RISCV.LO12_I); + + try atom_ptr.addReloc(elf_file, .{ + .r_offset = end_offset + 4, + .r_info = (@as(u64, @intCast(data.sym_index)) << 32) | lo_r_type, + .r_addend = 0, + }); + }, + else => unreachable, + } +} + +fn isBranch(tag: Mir.Inst.Tag) bool { + switch (tag) { + .psuedo_jump => true, + else => false, + } +} + +fn lowerMir(emit: *Emit) !void { + const comp = emit.bin_file.comp; + const gpa = comp.gpa; + const mir_tags = emit.mir.instructions.items(.tag); + + _ = gpa; + + for (mir_tags, 0..) |tag, index| { + const inst: u32 = @intCast(index); + + if (isBranch(tag)) { + const target_inst = emit.mir.instructions.items(.data)[inst].inst; + + _ = target_inst; + } + } +} diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 9fe29a7ecd..738012022c 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -24,25 +24,72 @@ pub const Inst = struct { data: Data, pub const Tag = enum(u16) { - add, addi, + jalr, + lui, + mv, + + unimp, + ebreak, + ecall, + + /// Addition + add, + /// Subtraction + sub, + + jal, + + // TODO: Maybe create a special data for compares that includes the ops + /// Compare equal, uses r_type + cmp_eq, + /// Compare greater than, uses r_type + cmp_gt, + + /// Branch if equal Uses b_type + beq, + /// Branch if not eql Uses b_type + bne, + + nop, + ret, + + /// Load double (64 bits) + ld, + /// Store double (64 bits) + sd, + /// Load word (32 bits) + lw, + /// Store word (32 bits) + sw, + /// Load half (16 bits) + lh, + /// Store half (16 bits) + sh, + /// Load byte (8 bits) + lb, + /// Store byte (8 bits) + sb, + /// Pseudo-instruction: End of prologue dbg_prologue_end, /// Pseudo-instruction: Beginning of epilogue dbg_epilogue_begin, /// Pseudo-instruction: Update debug line dbg_line, - unimp, - ebreak, - ecall, - jalr, - ld, - lui, - mv, - nop, - ret, - sd, - sub, + + /// Psuedo-instruction that will generate a backpatched + /// function prologue. + psuedo_prologue, + /// Jumps. Uses `inst` payload. + psuedo_jump, + + // TODO: add description + load_symbol, + + // TODO: add description + // this is bad, remove this + ldr_ptr_stack, }; /// The position of an MIR instruction within the `Mir` instructions array. @@ -63,7 +110,11 @@ pub const Inst = struct { /// A 16-bit immediate value. /// /// Used by e.g. svc - imm16: u16, + imm16: i16, + /// A 12-bit immediate value. + /// + /// Used by e.g. psuedo_prologue + imm12: i12, /// Index into `extra`. Meaning of what can be found there is context-dependent. /// /// Used by e.g. load_memory @@ -95,6 +146,21 @@ pub const Inst = struct { rs1: Register, rs2: Register, }, + /// B-Type + /// + /// Used by e.g. beq + b_type: struct { + rs1: Register, + rs2: Register, + imm12: i13, + }, + /// J-Type + /// + /// Used by e.g. jal + j_type: struct { + rd: Register, + imm21: i21, + }, /// U-Type /// /// Used by e.g. lui @@ -111,10 +177,19 @@ pub const Inst = struct { }, }; + const CompareOp = enum { + eq, + neq, + gt, + gte, + lt, + lte, + }; + // Make sure we don't accidentally make instructions bigger than expected. - // Note that in safety builds, Zig is allowed to insert a secret field for safety checks. + // Note that in Debug builds, Zig is allowed to insert a secret field for safety checks. // comptime { - // if (!std.debug.runtime_safety) { + // if (builtin.mode != .Debug) { // assert(@sizeOf(Inst) == 8); // } // } @@ -145,3 +220,9 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end .end = i, }; } + +pub const LoadSymbolPayload = struct { + register: u32, + atom_index: u32, + sym_index: u32, +}; From 5e770407cf50ae8cd103644c8ca297da52adb5b8 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Wed, 13 Mar 2024 18:40:35 -0700 Subject: [PATCH 02/44] riscv: basic function arguments - rename setRegOrMem -> setValue - a naive method of passing arguments by register - gather the prologue and epilogue and generate them in Emit.zig. this is cleaner because we have the final stack size in the emit step. - define the "fa" register set, which contains the RISC-V calling convention defined function argument registers --- src/arch/riscv64/CodeGen.zig | 275 ++++++++++++++++------------------- src/arch/riscv64/Emit.zig | 96 ++++++++---- src/arch/riscv64/Mir.zig | 7 +- src/arch/riscv64/abi.zig | 18 ++- 4 files changed, 217 insertions(+), 179 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index de78dd75dc..4e91c0852a 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -34,7 +34,10 @@ const Register = bits.Register; const RegisterManager = abi.RegisterManager; const RegisterLock = RegisterManager.RegisterLock; const callee_preserved_regs = abi.callee_preserved_regs; +/// General Purpose const gp = abi.RegisterClass.gp; +/// Function Args +const fa = abi.RegisterClass.fa; const InnerError = CodeGenError || error{OutOfRegisters}; @@ -297,6 +300,7 @@ pub fn generate( .prev_di_pc = 0, .prev_di_line = func.lbrace_line, .prev_di_column = func.lbrace_column, + .stack_size = @max(32, function.max_end_stack), }; defer emit.deinit(); @@ -349,14 +353,9 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { - const mod = self.bin_file.comp.module.?; - const cc = self.fn_type.fnCallingConvention(mod); - - if (cc == .Naked) return self.fail("TODO: gen support callconv(.{s})", .{@tagName(cc)}); - _ = try self.addInst(.{ .tag = .psuedo_prologue, - .data = .{ .imm12 = 0 }, // Backpatched later. + .data = .{ .nop = {} }, // Backpatched later. }); _ = try self.addInst(.{ @@ -366,32 +365,6 @@ fn gen(self: *Self) !void { try self.genBody(self.air.getMainBody()); - // Backpatch prologue stack size - if (math.cast(i12, self.max_end_stack)) |casted_stack_size| { - self.mir_instructions.items(.data)[0].imm12 = casted_stack_size; - } else return self.fail("TODO support larger stack sizes, got {}", .{self.max_end_stack}); - - _ = try self.addInst(.{ - .tag = .dbg_epilogue_begin, - .data = .{ .nop = {} }, - }); - - // exitlude jumps - if (self.exitlude_jump_relocs.items.len > 0 and - self.exitlude_jump_relocs.items[self.exitlude_jump_relocs.items.len - 1] == self.mir_instructions.len - 2) - { - // If the last Mir instruction (apart from the - // dbg_epilogue_begin) is the last exitlude jump - // relocation (which would just jump one instruction - // further), it can be safely removed - self.mir_instructions.orderedRemove(self.exitlude_jump_relocs.pop()); - } - - for (self.exitlude_jump_relocs.items) |jmp_reloc| { - _ = jmp_reloc; - return self.fail("TODO add branches in RISCV64", .{}); - } - // Drop them off at the rbrace. _ = try self.addInst(.{ .tag = .dbg_line, @@ -501,7 +474,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .fpext => try self.airFpext(inst), .intcast => try self.airIntCast(inst), .trunc => try self.airTrunc(inst), - .int_from_bool => try self.airIntFromBool(inst), + .int_from_bool => try self.airIntFromBool(inst), .is_non_null => try self.airIsNonNull(inst), .is_non_null_ptr => try self.airIsNonNullPtr(inst), .is_null => try self.airIsNull(inst), @@ -513,17 +486,17 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .load => try self.airLoad(inst), .loop => try self.airLoop(inst), .not => try self.airNot(inst), - .int_from_ptr => try self.airIntFromPtr(inst), - .ret => try self.airRet(inst), - .ret_safe => try self.airRet(inst), // TODO + .int_from_ptr => try self.airIntFromPtr(inst), + .ret => try self.airRet(inst, false), + .ret_safe => try self.airRet(inst, true), .ret_load => try self.airRetLoad(inst), .store => try self.airStore(inst, false), .store_safe => try self.airStore(inst, true), .struct_field_ptr=> try self.airStructFieldPtr(inst), .struct_field_val=> try self.airStructFieldVal(inst), .array_to_slice => try self.airArrayToSlice(inst), - .float_from_int => try self.airFloatFromInt(inst), - .int_from_float => try self.airIntFromFloat(inst), + .float_from_int => try self.airFloatFromInt(inst), + .int_from_float => try self.airIntFromFloat(inst), .cmpxchg_strong => try self.airCmpxchg(inst), .cmpxchg_weak => try self.airCmpxchg(inst), .atomic_rmw => try self.airAtomicRmw(inst), @@ -792,6 +765,7 @@ fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCVa fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMemPtr(inst); + log.debug("airAlloc offset: {}", .{stack_offset}); return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none }); } @@ -1468,30 +1442,30 @@ fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_ind return true; } -fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { +fn load(self: *Self, dst_mcv: MCValue, src_ptr: MCValue, ptr_ty: Type) InnerError!void { const mod = self.bin_file.comp.module.?; const elem_ty = ptr_ty.childType(mod); - switch (ptr) { + switch (src_ptr) { .none => unreachable, .undef => unreachable, .unreach => unreachable, .dead => unreachable, - .immediate => |imm| try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm }), - .ptr_stack_offset => |off| try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off }), - .register => |src_reg| try self.setRegOrMem(elem_ty, dst_mcv, .{ .register = src_reg }), + .immediate => |imm| try self.setValue(elem_ty, dst_mcv, .{ .memory = imm }), + .ptr_stack_offset => |off| try self.setValue(elem_ty, dst_mcv, .{ .stack_offset = off }), + .register => try self.setValue(elem_ty, dst_mcv, src_ptr), .memory, .stack_offset, => { const reg = try self.register_manager.allocReg(null, gp); const reg_lock = self.register_manager.lockRegAssumeUnused(reg); - defer self.register_manager.unlockReg(reg_lock); + errdefer self.register_manager.unlockReg(reg_lock); - try self.genSetReg(ptr_ty, reg, ptr); + try self.genSetReg(ptr_ty, reg, src_ptr); try self.load(dst_mcv, .{ .register = reg }, ptr_ty); }, .load_symbol => { - const reg = try self.copyToTmpRegister(ptr_ty, ptr); + const reg = try self.copyToTmpRegister(ptr_ty, src_ptr); try self.load(dst_mcv, .{ .register = reg }, ptr_ty); }, } @@ -1524,34 +1498,18 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) !void { +fn store(self: *Self, dst_ptr: MCValue, src_val: MCValue, ptr_ty: Type, value_ty: Type) !void { _ = ptr_ty; - log.debug("storing {s}", .{@tagName(ptr)}); + log.debug("storing {s}", .{@tagName(dst_ptr)}); - switch (ptr) { + switch (dst_ptr) { .none => unreachable, .undef => unreachable, .unreach => unreachable, .dead => unreachable, - .immediate => |imm| { - try self.setRegOrMem(value_ty, .{ .memory = imm }, value); - }, - .ptr_stack_offset => |off| { - try self.genSetStack(value_ty, off, value); - }, - .register => { - return self.fail("TODO implement storing to MCValue.register", .{}); - }, - .memory => { - return self.fail("TODO implement storing to MCValue.memory", .{}); - }, - .stack_offset => { - return self.fail("TODO implement storing to MCValue.stack_offset", .{}); - }, - .load_symbol => { - return self.fail("TODO implement storing to MCValue.load_symbol", .{}); - }, + .ptr_stack_offset => |off| try self.genSetStack(value_ty, off, src_val), + else => return self.fail("TODO implement storing to MCValue.{s}", .{@tagName(dst_ptr)}), } } @@ -1629,29 +1587,32 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void { } fn airArg(self: *Self, inst: Air.Inst.Index) !void { - const arg_index = self.arg_index; - self.arg_index += 1; + var arg_index = self.arg_index; - const ty = self.typeOfIndex(inst); - _ = ty; + // we skip over args that have no bits + while (self.args[arg_index] == .none) arg_index += 1; + self.arg_index = arg_index + 1; - const result = self.args[arg_index]; - // TODO support stack-only arguments - // TODO Copy registers to the stack - const mcv = result; - try self.genArgDbgInfo(inst, mcv); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { + const arg_ty = self.typeOfIndex(inst); + _ = arg_ty; + const src_mcv = self.args[arg_index]; - if (self.liveness.isUnused(inst)) - return self.finishAirBookkeeping(); + const dst_mcv = switch (src_mcv) { + .register => |src_reg| dst: { + self.register_manager.getRegAssumeFree(src_reg, inst); + break :dst src_mcv; + }, + // don't need to allocate anything, can just be used immediately. + .stack_offset => src_mcv, + else => return self.fail("TODO: airArg {s}", .{@tagName(src_mcv)}), + }; - switch (mcv) { - .register => |reg| { - self.register_manager.getRegAssumeFree(reg, inst); - }, - else => {}, - } + try self.genArgDbgInfo(inst, src_mcv); + break :result dst_mcv; + }; - return self.finishAir(inst, mcv, .{ .none, .none, .none }); + return self.finishAir(inst, result, .{ .none, .none, .none }); } fn airTrap(self: *Self) !void { @@ -1704,26 +1665,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const arg = args[arg_i]; const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); - - switch (mc_arg) { - .none => continue, - .undef => unreachable, - .immediate => unreachable, - .unreach => unreachable, - .dead => unreachable, - .memory => unreachable, - .register => |reg| { - try self.register_manager.getReg(reg, null); - try self.genSetReg(arg_ty, reg, arg_mcv); - }, - .stack_offset => |off| try self.genSetStack(arg_ty, off, arg_mcv), - .ptr_stack_offset => { - return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); - }, - .load_symbol => { - return self.fail("TODO implement calling with MCValue.load_symbol", .{}); - }, - } + try self.setValue(arg_ty, mc_arg, arg_mcv); } if (try self.air.value(callee, mod)) |func_value| { @@ -1791,19 +1733,39 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier fn ret(self: *Self, mcv: MCValue) !void { const mod = self.bin_file.comp.module.?; const ret_ty = self.fn_type.fnReturnType(mod); - try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); + try self.setValue(ret_ty, self.ret_mcv, mcv); + // Just add space for an instruction, patch this later const index = try self.addInst(.{ - .tag = .nop, + .tag = .ret, .data = .{ .nop = {} }, }); + try self.exitlude_jump_relocs.append(self.gpa, index); } -fn airRet(self: *Self, inst: Air.Inst.Index) !void { +fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void { + if (safety) { + // safe + } else { + // not safe + } + const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); + + _ = try self.addInst(.{ + .tag = .dbg_epilogue_begin, + .data = .{ .nop = {} }, + }); + + _ = try self.addInst(.{ + .tag = .psuedo_epilogue, + .data = .{ .nop = {} }, + }); + try self.ret(operand); + return self.finishAir(inst, .dead, .{ un_op, .none, .none }); } @@ -1983,7 +1945,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value); + try self.setValue(self.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); @@ -2010,7 +1972,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value); + try self.setValue(self.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -2195,7 +2157,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void { /// Send control flow to the `index` of `self.code`. fn jump(self: *Self, index: Mir.Inst.Index) !void { _ = try self.addInst(.{ - .tag = .psuedo_jump, + .tag = .j, .data = .{ .inst = index, }, @@ -2270,7 +2232,7 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { if (block_mcv == .none) { block_data.mcv = operand_mcv; } else { - try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); + try self.setValue(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -2415,28 +2377,32 @@ fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigT } /// Sets the value without any modifications to register allocation metadata or stack allocation metadata. -fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { - if (!loc.isMutable()) { - return std.debug.panic("tried to setRegOrMem immutable: {s}", .{@tagName(loc)}); +fn setValue(self: *Self, ty: Type, dst_val: MCValue, src_val: MCValue) !void { + // There isn't anything to store + if (dst_val == .none) return; + + if (!dst_val.isMutable()) { + return std.debug.panic("tried to setValue immutable: {s}", .{@tagName(dst_val)}); } - switch (loc) { - .none => return, - .register => |reg| return self.genSetReg(ty, reg, val), - .stack_offset => |off| return self.genSetStack(ty, off, val), - else => return self.fail("TODO: setRegOrMem {s}", .{@tagName(loc)}), + switch (dst_val) { + .register => |reg| return self.genSetReg(ty, reg, src_val), + .stack_offset => |off| return self.genSetStack(ty, off, src_val), + .memory => |addr| return self.genSetMem(ty, addr, src_val), + else => return self.fail("TODO: setValue {s}", .{@tagName(dst_val)}), } } -fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { +/// Sets the value of `src_val` into stack memory at `stack_offset`. +fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_val: MCValue) InnerError!void { const mod = self.bin_file.comp.module.?; const abi_size: u32 = @intCast(ty.abiSize(mod)); - switch (mcv) { + switch (src_val) { .none => return, .dead => unreachable, .immediate => { - const reg = try self.copyToTmpRegister(ty, mcv); + const reg = try self.copyToTmpRegister(ty, src_val); return self.genSetStack(ty, stack_offset, .{ .register = reg }); }, .register => |reg| { @@ -2456,8 +2422,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro .tag = tag, .data = .{ .i_type = .{ .rd = reg, - .rs1 = .sp, - .imm12 = @intCast(stack_offset), + .rs1 = .s0, + .imm12 = math.cast(i12, stack_offset) orelse { + return self.fail("TODO: genSetStack bigger stack values", .{}); + }, } }, }); }, @@ -2466,7 +2434,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro }, .stack_offset, .load_symbol => { if (abi_size <= 8) { - const reg = try self.copyToTmpRegister(ty, mcv); + const reg = try self.copyToTmpRegister(ty, src_val); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } @@ -2485,7 +2453,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const count_reg = regs[3]; const tmp_reg = regs[4]; - switch (mcv) { + switch (src_val) { .stack_offset => |offset| { if (offset == stack_offset) return; try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = offset }); @@ -2511,7 +2479,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro }, }); }, - else => return self.fail("TODO: genSetStack unreachable {s}", .{@tagName(mcv)}), + else => return self.fail("TODO: genSetStack unreachable {s}", .{@tagName(src_val)}), } try self.genSetReg(ptr_ty, dst_reg, .{ .ptr_stack_offset = stack_offset }); @@ -2520,10 +2488,20 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro // memcpy(src, dst, len) try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg); }, - else => return self.fail("TODO: genSetStack {s}", .{@tagName(mcv)}), + else => return self.fail("TODO: genSetStack {s}", .{@tagName(src_val)}), } } +fn genSetMem(self: *Self, ty: Type, addr: u64, src_val: MCValue) InnerError!void { + const mod = self.bin_file.comp.module.?; + const abi_size: u32 = @intCast(ty.abiSize(mod)); + _ = abi_size; + _ = addr; + _ = src_val; + + return self.fail("TODO: genSetMem", .{}); +} + fn genInlineMemcpy( self: *Self, src: Register, @@ -2541,11 +2519,12 @@ fn genInlineMemcpy( return self.fail("TODO: genInlineMemcpy", .{}); } -fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { +/// Sets the value of `src_val` into `reg`. Assumes you have a lock on it. +fn genSetReg(self: *Self, ty: Type, reg: Register, src_val: MCValue) InnerError!void { const mod = self.bin_file.comp.module.?; const abi_size: u32 = @intCast(ty.abiSize(mod)); - switch (mcv) { + switch (src_val) { .dead => unreachable, .ptr_stack_offset => return self.fail("TODO genSetReg ptr_stack_offset", .{}), .unreach, .none => return, // Nothing to do. @@ -2634,8 +2613,10 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = tag, .data = .{ .i_type = .{ .rd = reg, - .rs1 = .sp, - .imm12 = @intCast(off), + .rs1 = .s0, + .imm12 = math.cast(i12, off) orelse { + return self.fail("TODO: genSetReg support larger stack sizes", .{}); + }, } }, }); }, @@ -2685,7 +2666,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const dest = try self.allocRegOrMem(inst, true); - try self.setRegOrMem(self.typeOfIndex(inst), dest, operand); + try self.setValue(self.typeOfIndex(inst), dest, operand); break :result dest; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -2912,7 +2893,6 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const mod = self.bin_file.comp.module.?; - const ip = &mod.intern_pool; const fn_info = mod.typeToFunc(fn_ty).?; const cc = fn_info.cc; var result: CallMCValues = .{ @@ -2935,21 +2915,20 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { return result; }, .Unspecified, .C => { - // LP64D ABI - // - // TODO make this generic with other ABIs, in particular - // with different hardware floating-point calling - // conventions - var stack_offset: u32 = 0; - - for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| { - const param_type = Type.fromInterned(ty); - const param_size: u32 = @intCast(param_type.abiSize(mod)); - - result_arg.* = .{ .stack_offset = stack_offset }; - stack_offset += param_size; + if (result.args.len > 8) { + return self.fail("TODO: support more than 8 function args", .{}); } + for (0..result.args.len) |i| { + const arg_reg = try self.register_manager.allocReg(null, fa); + result.args[i] = .{ .register = arg_reg }; + } + + // stack_offset = num s registers spilled + local var space + var stack_offset: u32 = 0; + _ = &stack_offset; + // TODO: spill used s registers here + result.stack_byte_count = stack_offset; result.stack_align = .@"16"; }, diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 6e2b638b76..3cdf3ce48e 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -27,6 +27,8 @@ prev_di_column: u32, /// Relative to the beginning of `code`. prev_di_pc: usize, +stack_size: u32, + const log = std.log.scoped(.emit); const InnerError = error{ @@ -39,10 +41,8 @@ pub fn emitMir( ) InnerError!void { const mir_tags = emit.mir.instructions.items(.tag); - // TODO: compute branch offsets - // try emit.lowerMir(); + try emit.lowerMir(); - // Emit machine code for (mir_tags, 0..) |tag, index| { const inst = @as(u32, @intCast(index)); log.debug("emitMir: {s}", .{@tagName(tag)}); @@ -70,7 +70,9 @@ pub fn emitMir( .dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(), .psuedo_prologue => try emit.mirPsuedo(inst), - .psuedo_jump => try emit.mirPsuedo(inst), + .psuedo_epilogue => try emit.mirPsuedo(inst), + + .j => try emit.mirPsuedo(inst), .mv => try emit.mirRR(inst), @@ -80,13 +82,15 @@ pub fn emitMir( .lui => try emit.mirUType(inst), .ld => try emit.mirIType(inst), - .sd => try emit.mirIType(inst), .lw => try emit.mirIType(inst), - .sw => try emit.mirIType(inst), .lh => try emit.mirIType(inst), - .sh => try emit.mirIType(inst), .lb => try emit.mirIType(inst), + + .sd => try emit.mirIType(inst), + .sw => try emit.mirIType(inst), + .sh => try emit.mirIType(inst), .sb => try emit.mirIType(inst), + .ldr_ptr_stack => try emit.mirIType(inst), .load_symbol => try emit.mirLoadSymbol(inst), @@ -170,8 +174,6 @@ fn mirBType(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const b_type = emit.mir.instructions.items(.data)[inst].b_type; - // const inst = b_type.imm12; - switch (tag) { .beq => try emit.writeInstruction(Instruction.beq(b_type.rs1, b_type.rs2, b_type.imm12)), else => unreachable, @@ -187,12 +189,13 @@ fn mirIType(emit: *Emit, inst: Mir.Inst.Index) !void { .jalr => try emit.writeInstruction(Instruction.jalr(i_type.rd, i_type.imm12, i_type.rs1)), .ld => try emit.writeInstruction(Instruction.ld(i_type.rd, i_type.imm12, i_type.rs1)), - .sd => try emit.writeInstruction(Instruction.sd(i_type.rd, i_type.imm12, i_type.rs1)), .lw => try emit.writeInstruction(Instruction.lw(i_type.rd, i_type.imm12, i_type.rs1)), - .sw => try emit.writeInstruction(Instruction.sw(i_type.rd, i_type.imm12, i_type.rs1)), .lh => try emit.writeInstruction(Instruction.lh(i_type.rd, i_type.imm12, i_type.rs1)), - .sh => try emit.writeInstruction(Instruction.sh(i_type.rd, i_type.imm12, i_type.rs1)), .lb => try emit.writeInstruction(Instruction.lb(i_type.rd, i_type.imm12, i_type.rs1)), + + .sd => try emit.writeInstruction(Instruction.sd(i_type.rd, i_type.imm12, i_type.rs1)), + .sw => try emit.writeInstruction(Instruction.sw(i_type.rd, i_type.imm12, i_type.rs1)), + .sh => try emit.writeInstruction(Instruction.sh(i_type.rd, i_type.imm12, i_type.rs1)), .sb => try emit.writeInstruction(Instruction.sb(i_type.rd, i_type.imm12, i_type.rs1)), .ldr_ptr_stack => try emit.writeInstruction(Instruction.add(i_type.rd, i_type.rs1, .sp)), @@ -262,21 +265,44 @@ fn mirPsuedo(emit: *Emit, inst: Mir.Inst.Index) !void { switch (tag) { .psuedo_prologue => { - const imm12 = data.imm12; - const stack_size: i12 = @max(32, imm12); + const stack_size: i12 = math.cast(i12, emit.stack_size) orelse { + return emit.fail("TODO: mirPsuedo support larger stack sizes", .{}); + }; + // Decrement sp by num s registers + local var space try emit.writeInstruction(Instruction.addi(.sp, .sp, -stack_size)); + + // Spill ra try emit.writeInstruction(Instruction.sd(.ra, stack_size - 8, .sp)); + + // Spill s0 try emit.writeInstruction(Instruction.sd(.s0, stack_size - 16, .sp)); + + // Setup s0 try emit.writeInstruction(Instruction.addi(.s0, .sp, stack_size)); }, + .psuedo_epilogue => { + const stack_size: i12 = math.cast(i12, emit.stack_size) orelse { + return emit.fail("TODO: mirPsuedo support larger stack sizes", .{}); + }; - .psuedo_jump => { + // Restore ra + try emit.writeInstruction(Instruction.ld(.ra, stack_size - 16, .sp)); + + // Restore s0 + try emit.writeInstruction(Instruction.ld(.s0, stack_size - 16, .sp)); + + // Increment sp back to previous value + try emit.writeInstruction(Instruction.addi(.sp, .sp, stack_size)); + }, + + .j => { const target = data.inst; const offset: i12 = @intCast(emit.code.items.len); _ = target; try emit.writeInstruction(Instruction.jal(.s0, offset)); + unreachable; // TODO: mirPsuedo j }, else => unreachable, @@ -348,27 +374,43 @@ fn mirLoadSymbol(emit: *Emit, inst: Mir.Inst.Index) !void { } } -fn isBranch(tag: Mir.Inst.Tag) bool { - switch (tag) { - .psuedo_jump => true, +fn isStore(tag: Mir.Inst.Tag) bool { + return switch (tag) { + .sb => true, + .sh => true, + .sw => true, + .sd => true, else => false, - } + }; +} + +fn isLoad(tag: Mir.Inst.Tag) bool { + return switch (tag) { + .lb => true, + .lh => true, + .lw => true, + .ld => true, + else => false, + }; } fn lowerMir(emit: *Emit) !void { - const comp = emit.bin_file.comp; - const gpa = comp.gpa; const mir_tags = emit.mir.instructions.items(.tag); - - _ = gpa; + const mir_datas = emit.mir.instructions.items(.data); for (mir_tags, 0..) |tag, index| { const inst: u32 = @intCast(index); - if (isBranch(tag)) { - const target_inst = emit.mir.instructions.items(.data)[inst].inst; - - _ = target_inst; + if (isStore(tag) or isLoad(tag)) { + const data = mir_datas[inst].i_type; + // TODO: probably create a psuedo instruction for s0 loads/stores instead of this. + if (data.rs1 == .s0) { + const casted_size = math.cast(i12, emit.stack_size) orelse { + return emit.fail("TODO: support bigger stack sizes lowerMir", .{}); + }; + const offset = mir_datas[inst].i_type.imm12; + mir_datas[inst].i_type.imm12 = -(casted_size - 12 - offset); + } } } } diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 738012022c..4154c129cd 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -39,6 +39,8 @@ pub const Inst = struct { sub, jal, + /// Jumps. Uses `inst` payload. + j, // TODO: Maybe create a special data for compares that includes the ops /// Compare equal, uses r_type @@ -81,8 +83,9 @@ pub const Inst = struct { /// Psuedo-instruction that will generate a backpatched /// function prologue. psuedo_prologue, - /// Jumps. Uses `inst` payload. - psuedo_jump, + /// Psuedo-instruction that will generate a backpatched + /// function epilogue + psuedo_epilogue, // TODO: add description load_symbol, diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index be3ac590a2..4d72219d8d 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -92,10 +92,15 @@ pub fn classifyType(ty: Type, mod: *Module) Class { } pub const callee_preserved_regs = [_]Register{ - .s0, .s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11, + // NOTE: we use s0 as a psuedo stack pointer, so it's not included. + .s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11, }; -const allocatable_registers = callee_preserved_regs; +pub const function_arg_regs = [_]Register{ + .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7, +}; + +const allocatable_registers = callee_preserved_regs ++ function_arg_regs; pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, &allocatable_registers); // Register classes @@ -109,4 +114,13 @@ pub const RegisterClass = struct { }, true); break :blk set; }; + + pub const fa: RegisterBitSet = blk: { + var set = RegisterBitSet.initEmpty(); + set.setRangeValue(.{ + .start = callee_preserved_regs.len, + .end = callee_preserved_regs.len + function_arg_regs.len, + }, true); + break :blk set; + }; }; From 060c475fcd358eb9d05d14ec9f1bb7bfc47e4423 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Wed, 13 Mar 2024 19:26:10 -0700 Subject: [PATCH 03/44] riscv: update `start.zig` and restore ra from the proper stack offset --- lib/std/start.zig | 2 +- src/arch/riscv64/Emit.zig | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/std/start.zig b/lib/std/start.zig index 24674614e2..68ad3f67ac 100644 --- a/lib/std/start.zig +++ b/lib/std/start.zig @@ -155,7 +155,7 @@ fn exit2(code: usize) noreturn { asm volatile ("ecall" : : [number] "{a7}" (94), - [arg1] "{a0}" (0), + [arg1] "{a0}" (code), : "rcx", "r11", "memory" ); }, diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 3cdf3ce48e..a82d5fb602 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -287,7 +287,7 @@ fn mirPsuedo(emit: *Emit, inst: Mir.Inst.Index) !void { }; // Restore ra - try emit.writeInstruction(Instruction.ld(.ra, stack_size - 16, .sp)); + try emit.writeInstruction(Instruction.ld(.ra, stack_size - 8, .sp)); // Restore s0 try emit.writeInstruction(Instruction.ld(.s0, stack_size - 16, .sp)); From 28df64cba45595a201f8c2312656922a8c28a67c Mon Sep 17 00:00:00 2001 From: David Rubin Date: Wed, 13 Mar 2024 21:02:15 -0700 Subject: [PATCH 04/44] riscv: implement `@abs` - add the `abs` MIR instruction - implement `@abs` by shifting to the right by `bits - 1`, and xoring. --- lib/std/builtin.zig | 4 +--- src/arch/riscv64/CodeGen.zig | 40 +++++++++++++++++++++++++++++++++--- src/arch/riscv64/Emit.zig | 7 +++++++ src/arch/riscv64/Mir.zig | 3 +++ src/arch/riscv64/bits.zig | 10 ++++----- src/target.zig | 2 +- 6 files changed, 54 insertions(+), 12 deletions(-) diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index e0e0edf906..b28e17e38b 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -760,9 +760,7 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr @setCold(true); // stage2_riscv64 backend doesn't support loops yet. - if (builtin.zig_backend == .stage2_riscv64 or - builtin.cpu.arch == .riscv64) - { + if (builtin.zig_backend == .stage2_riscv64) { unreachable; } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 4e91c0852a..e7b2506a01 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1385,8 +1385,44 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { } fn airAbs(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.comp.module.?; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airAbs for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const ty = self.typeOf(ty_op.operand); + const scalar_ty = ty.scalarType(mod); + const operand = try self.resolveInst(ty_op.operand); + + switch (scalar_ty.zigTypeTag(mod)) { + .Int => if (ty.zigTypeTag(mod) == .Vector) { + return self.fail("TODO implement airAbs for {}", .{ty.fmt(mod)}); + } else { + const int_bits = ty.intInfo(mod).bits; + + if (int_bits > 32) { + return self.fail("TODO: airAbs for larger than 32 bits", .{}); + } + + // promote the src into a register + const src_mcv = try self.copyToNewRegister(inst, operand); + // temp register for shift + const temp_reg = try self.register_manager.allocReg(inst, gp); + + _ = try self.addInst(.{ + .tag = .abs, + .data = .{ + .i_type = .{ + .rs1 = src_mcv.register, + .rd = temp_reg, + .imm12 = @intCast(int_bits - 1), + }, + }, + }); + + break :result src_mcv; + }, + else => return self.fail("TODO: implement airAbs {}", .{scalar_ty.fmt(mod)}), + } + }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } @@ -1603,8 +1639,6 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { self.register_manager.getRegAssumeFree(src_reg, inst); break :dst src_mcv; }, - // don't need to allocate anything, can just be used immediately. - .stack_offset => src_mcv, else => return self.fail("TODO: airArg {s}", .{@tagName(src_mcv)}), }; diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index a82d5fb602..cfd61f8189 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -58,6 +58,7 @@ pub fn emitMir( .addi => try emit.mirIType(inst), .jalr => try emit.mirIType(inst), + .abs => try emit.mirIType(inst), .jal => try emit.mirJType(inst), @@ -200,6 +201,12 @@ fn mirIType(emit: *Emit, inst: Mir.Inst.Index) !void { .ldr_ptr_stack => try emit.writeInstruction(Instruction.add(i_type.rd, i_type.rs1, .sp)), + .abs => { + try emit.writeInstruction(Instruction.sraiw(i_type.rd, i_type.rs1, @intCast(i_type.imm12))); + try emit.writeInstruction(Instruction.xor(i_type.rs1, i_type.rs1, i_type.rd)); + try emit.writeInstruction(Instruction.subw(i_type.rs1, i_type.rs1, i_type.rd)); + }, + else => unreachable, } } diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 4154c129cd..38d7fe59f0 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -38,6 +38,9 @@ pub const Inst = struct { /// Subtraction sub, + /// Absolute Value, uses i_type payload. + abs, + jal, /// Jumps. Uses `inst` payload. j, diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig index 2239bd49f8..f987c7fc74 100644 --- a/src/arch/riscv64/bits.zig +++ b/src/arch/riscv64/bits.zig @@ -250,7 +250,7 @@ pub const Instruction = union(enum) { } pub fn srai(rd: Register, r1: Register, shamt: u6) Instruction { - return iType(0b0010011, 0b101, rd, r1, (1 << 10) + shamt); + return iType(0b0010011, 0b101, rd, r1, (@as(i12, 1) << 10) + shamt); } pub fn slti(rd: Register, r1: Register, imm: i12) Instruction { @@ -267,16 +267,16 @@ pub const Instruction = union(enum) { return iType(0b0011011, 0b000, rd, r1, imm); } - pub fn slliw(rd: Register, r1: Register, shamt: u5) Instruction { + pub fn slliw(rd: Register, r1: Register, shamt: u6) Instruction { return iType(0b0011011, 0b001, rd, r1, shamt); } - pub fn srliw(rd: Register, r1: Register, shamt: u5) Instruction { + pub fn srliw(rd: Register, r1: Register, shamt: u6) Instruction { return iType(0b0011011, 0b101, rd, r1, shamt); } - pub fn sraiw(rd: Register, r1: Register, shamt: u5) Instruction { - return iType(0b0011011, 0b101, rd, r1, (1 << 10) + shamt); + pub fn sraiw(rd: Register, r1: Register, shamt: u6) Instruction { + return iType(0b0011011, 0b101, rd, r1, (@as(i12, 1) << 10) + shamt); } // Upper Immediate diff --git a/src/target.zig b/src/target.zig index 99b9abcab8..ea58111bc1 100644 --- a/src/target.zig +++ b/src/target.zig @@ -507,7 +507,7 @@ pub fn zigBackend(target: std.Target, use_llvm: bool) std.builtin.CompilerBacken if (use_llvm) return .stage2_llvm; if (target.ofmt == .c) return .stage2_c; return switch (target.cpu.arch) { - .wasm32, .wasm64 => std.builtin.CompilerBackend.stage2_wasm, + .wasm32, .wasm64 => .stage2_wasm, .arm, .armeb, .thumb, .thumbeb => .stage2_arm, .x86_64 => .stage2_x86_64, .x86 => .stage2_x86, From 2be3033acda53389cac9f3e9a8ca0a3d41348eef Mon Sep 17 00:00:00 2001 From: David Rubin Date: Thu, 14 Mar 2024 02:44:24 -0700 Subject: [PATCH 05/44] riscv: implement basic branching we use a code offset map in Emit.zig to pre-compute what byte offset each MIR instruction is at. this is important because they can be of different size --- lib/std/builtin.zig | 8 +-- src/arch/riscv64/CodeGen.zig | 51 +++++++++++-------- src/arch/riscv64/Emit.zig | 98 +++++++++++++++++++++++++++++++----- src/arch/riscv64/Mir.zig | 4 +- 4 files changed, 119 insertions(+), 42 deletions(-) diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index b28e17e38b..3fb9494305 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -759,11 +759,6 @@ else pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr: ?usize) noreturn { @setCold(true); - // stage2_riscv64 backend doesn't support loops yet. - if (builtin.zig_backend == .stage2_riscv64) { - unreachable; - } - // For backends that cannot handle the language features depended on by the // default panic handler, we have a simpler panic handler: if (builtin.zig_backend == .stage2_wasm or @@ -772,7 +767,8 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr builtin.zig_backend == .stage2_x86 or (builtin.zig_backend == .stage2_x86_64 and (builtin.target.ofmt != .elf and builtin.target.ofmt != .macho)) or builtin.zig_backend == .stage2_sparc64 or - builtin.zig_backend == .stage2_spirv64) + builtin.zig_backend == .stage2_spirv64 or + builtin.zig_backend == .stage2_riscv64) { while (true) { @breakpoint(); diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index e7b2506a01..7e6ad49c67 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -301,6 +301,7 @@ pub fn generate( .prev_di_line = func.lbrace_line, .prev_di_column = func.lbrace_column, .stack_size = @max(32, function.max_end_stack), + .code_offset_mapping = .{}, }; defer emit.deinit(); @@ -929,21 +930,16 @@ fn binOpRegister( .cmp_gt => .cmp_gt, else => return self.fail("TODO: binOpRegister {s}", .{@tagName(tag)}), }; - const mir_data: Mir.Inst.Data = switch (tag) { - .add, - .sub, - .cmp_eq, - => .{ .r_type = .{ - .rd = dest_reg, - .rs1 = lhs_reg, - .rs2 = rhs_reg, - } }, - else => return self.fail("TODO: binOpRegister {s}", .{@tagName(tag)}), - }; _ = try self.addInst(.{ .tag = mir_tag, - .data = mir_data, + .data = .{ + .r_type = .{ + .rd = dest_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + }, + }, }); return MCValue{ .register = dest_reg }; @@ -1636,7 +1632,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const dst_mcv = switch (src_mcv) { .register => |src_reg| dst: { - self.register_manager.getRegAssumeFree(src_reg, inst); + try self.register_manager.getReg(src_reg, inst); break :dst src_mcv; }, else => return self.fail("TODO: airArg {s}", .{@tagName(src_mcv)}), @@ -1914,6 +1910,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { self.processDeath(operand); } try self.genBody(then_body); + // point at the to-be-generated else case + try self.performReloc(reloc, @intCast(self.mir_instructions.len)); // Revert to the previous register and stack allocation state. @@ -1929,7 +1927,6 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { self.next_stack_offset = parent_next_stack_offset; self.register_manager.free_registers = parent_free_registers; - try self.performReloc(reloc); const else_branch = self.branch_stack.addOneAssumeCapacity(); else_branch.* = .{}; @@ -2014,8 +2011,6 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { var item = self.branch_stack.pop(); item.deinit(self.gpa); } - - return self.finishAir(inst, .unreach, .{ .none, .none, .none }); } fn condBr(self: *Self, cond_ty: Type, condition: MCValue) !Mir.Inst.Index { @@ -2027,12 +2022,12 @@ fn condBr(self: *Self, cond_ty: Type, condition: MCValue) !Mir.Inst.Index { }; return try self.addInst(.{ - .tag = .beq, + .tag = .bne, .data = .{ .b_type = .{ .rs1 = reg, .rs2 = .zero, - .imm12 = 0, // patched later. + .inst = undefined, }, }, }); @@ -2218,7 +2213,13 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { try self.genBody(body); for (self.blocks.getPtr(inst).?.relocs.items) |reloc| { - try self.performReloc(reloc); + // here we are relocing to point at the instruction after the block. + // [then case] + // [jump to end] // this is reloced + // [else case] + // [jump to end] // this is reloced + // [this isn't generated yet] // point to here + try self.performReloc(reloc, @intCast(self.mir_instructions.len)); } const result = self.blocks.getPtr(inst).?.mcv; @@ -2233,11 +2234,14 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { // return self.finishAir(inst, .dead, .{ condition, .none, .none }); } -fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { +fn performReloc(self: *Self, inst: Mir.Inst.Index, target: Mir.Inst.Index) !void { const tag = self.mir_instructions.items(.tag)[inst]; switch (tag) { - .beq => self.mir_instructions.items(.data)[inst].b_type.imm12 = @intCast(inst), + .bne, + .beq, + => self.mir_instructions.items(.data)[inst].b_type.inst = target, + .jal => self.mir_instructions.items(.data)[inst].j_type.inst = target, else => return self.fail("TODO: performReloc {s}", .{@tagName(tag)}), } } @@ -2283,7 +2287,7 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void { .data = .{ .j_type = .{ .rd = .ra, - .imm21 = undefined, // populated later through performReloc + .inst = undefined, }, }, })); @@ -2467,6 +2471,9 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_val: MCValue) Inner } }, .stack_offset, .load_symbol => { + if (true) + return self.fail("TODO: genSetStack {s}", .{@tagName(src_val)}); + if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, src_val); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index cfd61f8189..a40d7ba03d 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -26,8 +26,14 @@ prev_di_line: u32, prev_di_column: u32, /// Relative to the beginning of `code`. prev_di_pc: usize, - +/// Function's stack size. Used for backpatching. stack_size: u32, +/// For backward branches: stores the code offset of the target +/// instruction +/// +/// For forward branches: stores the code offset of the branch +/// instruction +code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{}, const log = std.log.scoped(.emit); @@ -100,6 +106,10 @@ pub fn emitMir( } pub fn deinit(emit: *Emit) void { + const comp = emit.bin_file.comp; + const gpa = comp.gpa; + + emit.code_offset_mapping.deinit(gpa); emit.* = undefined; } @@ -118,10 +128,8 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { } fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void { - log.debug("Line: {} {}\n", .{ line, emit.prev_di_line }); const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line)); const delta_pc: usize = emit.code.items.len - emit.prev_di_pc; - log.debug("(advance pc={d} and line={d})", .{ delta_pc, delta_line }); switch (emit.debug_output) { .dwarf => |dw| { if (column != emit.prev_di_column) try dw.setColumn(column); @@ -166,7 +174,7 @@ fn mirRType(emit: *Emit, inst: Mir.Inst.Index) !void { switch (tag) { .add => try emit.writeInstruction(Instruction.add(r_type.rd, r_type.rs1, r_type.rs2)), .sub => try emit.writeInstruction(Instruction.sub(r_type.rd, r_type.rs1, r_type.rs2)), - .cmp_eq => try emit.writeInstruction(Instruction.slt(r_type.rd, r_type.rs1, r_type.rs2)), + .cmp_gt => try emit.writeInstruction(Instruction.slt(r_type.rd, r_type.rs1, r_type.rs2)), else => unreachable, } } @@ -175,8 +183,17 @@ fn mirBType(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const b_type = emit.mir.instructions.items(.data)[inst].b_type; + const offset = @as(i64, @intCast(emit.code_offset_mapping.get(b_type.inst).?)) - @as(i64, @intCast(emit.code.items.len)); + switch (tag) { - .beq => try emit.writeInstruction(Instruction.beq(b_type.rs1, b_type.rs2, b_type.imm12)), + .beq => { + log.debug("beq: {} offset={}", .{ inst, offset }); + try emit.writeInstruction(Instruction.beq(b_type.rs1, b_type.rs2, @intCast(offset))); + }, + .bne => { + log.debug("bne: {} offset={}", .{ inst, offset }); + try emit.writeInstruction(Instruction.bne(b_type.rs1, b_type.rs2, @intCast(offset))); + }, else => unreachable, } } @@ -215,9 +232,12 @@ fn mirJType(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const j_type = emit.mir.instructions.items(.data)[inst].j_type; + const offset = @as(i64, @intCast(emit.code_offset_mapping.get(j_type.inst).?)) - @as(i64, @intCast(emit.code.items.len)); + switch (tag) { .jal => { - try emit.writeInstruction(Instruction.jal(j_type.rd, j_type.imm21)); + log.debug("jal: {} offset={}", .{ inst, offset }); + try emit.writeInstruction(Instruction.jal(j_type.rd, @intCast(offset))); }, else => unreachable, } @@ -304,12 +324,8 @@ fn mirPsuedo(emit: *Emit, inst: Mir.Inst.Index) !void { }, .j => { - const target = data.inst; - const offset: i12 = @intCast(emit.code.items.len); - _ = target; - - try emit.writeInstruction(Instruction.jal(.s0, offset)); - unreachable; // TODO: mirPsuedo j + const offset = @as(i64, @intCast(emit.code_offset_mapping.get(data.inst).?)) - @as(i64, @intCast(emit.code.items.len)); + try emit.writeInstruction(Instruction.jal(.s0, @intCast(offset))); }, else => unreachable, @@ -401,7 +417,51 @@ fn isLoad(tag: Mir.Inst.Tag) bool { }; } +pub fn isBranch(tag: Mir.Inst.Tag) bool { + return switch (tag) { + .beq => true, + .bne => true, + .jal => true, + .j => true, + else => false, + }; +} + +pub fn branchTarget(emit: *Emit, inst: Mir.Inst.Index) Mir.Inst.Index { + const tag = emit.mir.instructions.items(.tag)[inst]; + const data = emit.mir.instructions.items(.data)[inst]; + + switch (tag) { + .bne, + .beq, + => return data.b_type.inst, + .jal => return data.j_type.inst, + .j => return data.inst, + else => std.debug.panic("branchTarget {s}", .{@tagName(tag)}), + } +} + +fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize { + const tag = emit.mir.instructions.items(.tag)[inst]; + + return switch (tag) { + .dbg_line, + .dbg_epilogue_begin, + .dbg_prologue_end, + => 0, + + .psuedo_epilogue => 12, // 3 * 4 + .psuedo_prologue => 16, // 4 * 4 + + .abs => 12, // 3 * 4 + + else => 4, + }; +} + fn lowerMir(emit: *Emit) !void { + const comp = emit.bin_file.comp; + const gpa = comp.gpa; const mir_tags = emit.mir.instructions.items(.tag); const mir_datas = emit.mir.instructions.items(.data); @@ -419,5 +479,19 @@ fn lowerMir(emit: *Emit) !void { mir_datas[inst].i_type.imm12 = -(casted_size - 12 - offset); } } + + if (isBranch(tag)) { + const target_inst = emit.branchTarget(inst); + try emit.code_offset_mapping.put(gpa, target_inst, 0); + } + } + var current_code_offset: usize = 0; + + for (0..mir_tags.len) |index| { + const inst = @as(u32, @intCast(index)); + if (emit.code_offset_mapping.getPtr(inst)) |offset| { + offset.* = current_code_offset; + } + current_code_offset += emit.instructionSize(inst); } } diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 38d7fe59f0..e64ba0c755 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -158,14 +158,14 @@ pub const Inst = struct { b_type: struct { rs1: Register, rs2: Register, - imm12: i13, + inst: Inst.Index, }, /// J-Type /// /// Used by e.g. jal j_type: struct { rd: Register, - imm21: i21, + inst: Inst.Index, }, /// U-Type /// From 3ccf0fd4c2d024d696bdb1e71a0b36af38ad6bed Mon Sep 17 00:00:00 2001 From: David Rubin Date: Thu, 14 Mar 2024 15:34:06 -0700 Subject: [PATCH 06/44] riscv: basic struct field access the current implementation only works when the struct is in a register. we use some shifting magic to get the field into the LSB, and from there, given the type provenance, the generated code should never reach into the bits beyond the bit size of the type and interact with the rest of the struct. --- lib/compiler/test_runner.zig | 4 +- src/arch/riscv64/CodeGen.zig | 92 ++++++++++++++++++++++++++++++------ src/arch/riscv64/Emit.zig | 4 ++ src/arch/riscv64/Mir.zig | 3 ++ 4 files changed, 88 insertions(+), 15 deletions(-) diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index 0b9a060fb5..08a2e5721b 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -12,7 +12,9 @@ var cmdline_buffer: [4096]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&cmdline_buffer); pub fn main() void { - if (builtin.zig_backend == .stage2_aarch64) { + if (builtin.zig_backend == .stage2_aarch64 or + builtin.zig_backend == .stage2_riscv64) + { return mainSimple() catch @panic("test failure"); } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 7e6ad49c67..ee860da8b5 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1586,11 +1586,53 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - _ = ty_pl; + const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; + const operand = extra.struct_operand; + const index = extra.field_index; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.comp.module.?; + const src_mcv = try self.resolveInst(operand); + const struct_ty = self.typeOf(operand); + const field_ty = struct_ty.structFieldType(index, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; - return self.fail("TODO: airStructFieldVal", .{}); + const field_off = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); - // return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); + switch (src_mcv) { + .dead, .unreach => unreachable, + .register => |src_reg| { + const src_reg_lock = self.register_manager.lockRegAssumeUnused(src_reg); + defer self.register_manager.unlockReg(src_reg_lock); + + const dst_reg = if (field_off == 0) + (try self.copyToNewRegister(inst, src_mcv)).register + else + try self.copyToTmpRegister(Type.usize, .{ .register = src_reg }); + + const dst_mcv: MCValue = .{ .register = dst_reg }; + const dst_lock = self.register_manager.lockReg(dst_reg); + defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); + + if (field_off > 0) { + _ = try self.addInst(.{ + .tag = .srli, + .data = .{ + .i_type = .{ + .imm12 = @intCast(field_off), + .rd = dst_reg, + .rs1 = dst_reg, + }, + }, + }); + } + + break :result if (field_off == 0) dst_mcv else try self.copyToNewRegister(inst, dst_mcv); + }, + else => return self.fail("TODO: airStructField {s}", .{@tagName(src_mcv)}), + } + }; + + return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { @@ -1626,8 +1668,6 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { self.arg_index = arg_index + 1; const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { - const arg_ty = self.typeOfIndex(inst); - _ = arg_ty; const src_mcv = self.args[arg_index]; const dst_mcv = switch (src_mcv) { @@ -2471,12 +2511,14 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_val: MCValue) Inner } }, .stack_offset, .load_symbol => { - if (true) - return self.fail("TODO: genSetStack {s}", .{@tagName(src_val)}); + switch (src_val) { + .stack_offset => |off| if (off == stack_offset) return, + else => {}, + } if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, src_val); - return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); + return self.genSetStack(ty, stack_offset, .{ .register = reg }); } const ptr_ty = try mod.singleMutPtrType(ty); @@ -2496,7 +2538,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_val: MCValue) Inner switch (src_val) { .stack_offset => |offset| { - if (offset == stack_offset) return; try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = offset }); }, .load_symbol => |sym_off| { @@ -2553,11 +2594,34 @@ fn genInlineMemcpy( ) !void { _ = src; _ = dst; - _ = len; - _ = count; - _ = tmp; - return self.fail("TODO: genInlineMemcpy", .{}); + // store 0 in the count + try self.genSetReg(Type.usize, count, .{ .immediate = 0 }); + + // compare count to length + const compare_inst = try self.addInst(.{ + .tag = .cmp_gt, + .data = .{ .r_type = .{ + .rd = tmp, + .rs1 = count, + .rs2 = len, + } }, + }); + + // end if true + _ = try self.addInst(.{ + .tag = .bne, + .data = .{ + .b_type = .{ + .inst = @intCast(self.mir_instructions.len + 0), // points after the last inst + .rs1 = .zero, + .rs2 = tmp, + }, + }, + }); + _ = compare_inst; + + return self.fail("TODO: finish genInlineMemcpy", .{}); } /// Sets the value of `src_val` into `reg`. Assumes you have a lock on it. @@ -2567,7 +2631,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_val: MCValue) InnerError! switch (src_val) { .dead => unreachable, - .ptr_stack_offset => return self.fail("TODO genSetReg ptr_stack_offset", .{}), + .ptr_stack_offset => |off| try self.genSetReg(ty, reg, .{ .stack_offset = off }), .unreach, .none => return, // Nothing to do. .undef => { if (!self.wantSafety()) diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index a40d7ba03d..036e5deea3 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -98,6 +98,8 @@ pub fn emitMir( .sh => try emit.mirIType(inst), .sb => try emit.mirIType(inst), + .srli => try emit.mirIType(inst), + .ldr_ptr_stack => try emit.mirIType(inst), .load_symbol => try emit.mirLoadSymbol(inst), @@ -224,6 +226,8 @@ fn mirIType(emit: *Emit, inst: Mir.Inst.Index) !void { try emit.writeInstruction(Instruction.subw(i_type.rs1, i_type.rs1, i_type.rd)); }, + .srli => try emit.writeInstruction(Instruction.srli(i_type.rd, i_type.rs1, @intCast(i_type.imm12))), + else => unreachable, } } diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index e64ba0c755..098e0c5655 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -41,6 +41,9 @@ pub const Inst = struct { /// Absolute Value, uses i_type payload. abs, + /// Logical Right Shift, uses i_type payload + srli, + jal, /// Jumps. Uses `inst` payload. j, From 664e3e16fa8dd49ff97f78dcdbf4579ff7f652aa Mon Sep 17 00:00:00 2001 From: David Rubin Date: Thu, 14 Mar 2024 18:29:24 -0700 Subject: [PATCH 07/44] riscv: add `cmp_eq` MIR instruction this opens up the door for addition! --- src/arch/riscv64/CodeGen.zig | 2 ++ src/arch/riscv64/Emit.zig | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index ee860da8b5..2c2c0bf583 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -942,6 +942,8 @@ fn binOpRegister( }, }); + // generate the struct for OF checks + return MCValue{ .register = dest_reg }; } diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 036e5deea3..7a14a39e43 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -177,6 +177,10 @@ fn mirRType(emit: *Emit, inst: Mir.Inst.Index) !void { .add => try emit.writeInstruction(Instruction.add(r_type.rd, r_type.rs1, r_type.rs2)), .sub => try emit.writeInstruction(Instruction.sub(r_type.rd, r_type.rs1, r_type.rs2)), .cmp_gt => try emit.writeInstruction(Instruction.slt(r_type.rd, r_type.rs1, r_type.rs2)), + .cmp_eq => { + try emit.writeInstruction(Instruction.xor(r_type.rd, r_type.rs1, r_type.rs2)); + try emit.writeInstruction(Instruction.sltiu(r_type.rd, r_type.rd, 1)); + }, else => unreachable, } } @@ -459,6 +463,8 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize { .abs => 12, // 3 * 4 + .cmp_eq => 8, + else => 4, }; } From b2150094badd3c14411a811ee0e508183b2142a2 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 17 Mar 2024 15:07:32 -0700 Subject: [PATCH 08/44] riscv: implement basic logical shifting --- src/arch/riscv64/CodeGen.zig | 122 +++++++++++++++++++++++++++++++++-- src/arch/riscv64/Emit.zig | 21 ++++-- src/arch/riscv64/Mir.zig | 8 ++- 3 files changed, 141 insertions(+), 10 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 2c2c0bf583..edd3c9bf57 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -928,6 +928,8 @@ fn binOpRegister( .sub => .sub, .cmp_eq => .cmp_eq, .cmp_gt => .cmp_gt, + .shl => .sllw, + .shr => .srlw, else => return self.fail("TODO: binOpRegister {s}", .{@tagName(tag)}), }; @@ -947,6 +949,84 @@ fn binOpRegister( return MCValue{ .register = dest_reg }; } +/// Don't call this function directly. Use binOp instead. +/// +/// Call this function if rhs is an immediate. Generates I version of binops. +/// +/// Asserts that rhs is an immediate MCValue +fn binOpImm( + self: *Self, + tag: Air.Inst.Tag, + maybe_inst: ?Air.Inst.Index, + lhs: MCValue, + rhs: MCValue, + lhs_ty: Type, + rhs_ty: Type, +) !MCValue { + _ = rhs_ty; + assert(rhs == .immediate); + + const lhs_is_register = lhs == .register; + + const lhs_lock: ?RegisterLock = if (lhs_is_register) + self.register_manager.lockReg(lhs.register) + else + null; + defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg); + + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + + const lhs_reg = if (lhs_is_register) lhs.register else blk: { + const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: { + const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + break :inst bin_op.lhs.toIndex().?; + } else null; + + const reg = try self.register_manager.allocReg(track_inst, gp); + + if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg }); + + break :blk reg; + }; + const new_lhs_lock = self.register_manager.lockReg(lhs_reg); + defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg); + + const dest_reg = if (maybe_inst) |inst| blk: { + const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + + if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) { + break :blk lhs_reg; + } else { + break :blk try self.register_manager.allocReg(inst, gp); + } + } else try self.register_manager.allocReg(null, gp); + + if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs); + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .shl => .slli, + .shr => .srli, + else => return self.fail("TODO: binOpImm {s}", .{@tagName(tag)}), + }; + + _ = try self.addInst(.{ + .tag = mir_tag, + .data = .{ + .i_type = .{ + .rd = dest_reg, + .rs1 = lhs_reg, + .imm12 = math.cast(i12, rhs.immediate) orelse { + return self.fail("TODO: binOpImm larger than i12 i_type payload", .{}); + }, + }, + }, + }); + + // generate the struct for OF checks + + return MCValue{ .register = dest_reg }; +} + /// For all your binary operation needs, this function will generate /// the corresponding Mir instruction(s). Returns the location of the /// result. @@ -989,8 +1069,10 @@ fn binOp( assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { - // TODO immediate operands - return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + if (rhs == .immediate) { + return self.binOpImm(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + } + return self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); } else { return self.fail("TODO binary operations on int with bits > 64", .{}); } @@ -1025,6 +1107,28 @@ fn binOp( else => unreachable, } }, + + // These instructions have unsymteric bit sizes. + .shr, + .shl, + => { + switch (lhs_ty.zigTypeTag(mod)) { + .Float => return self.fail("TODO binary operations on floats", .{}), + .Vector => return self.fail("TODO binary operations on vectors", .{}), + .Int => { + const int_info = lhs_ty.intInfo(mod); + if (int_info.bits <= 64) { + if (rhs == .immediate) { + return self.binOpImm(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + } + return self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + } else { + return self.fail("TODO binary operations on int with bits > 64", .{}); + } + }, + else => unreachable, + } + }, else => unreachable, } } @@ -1163,7 +1267,13 @@ fn airXor(self: *Self, inst: Air.Inst.Index) !void { fn airShl(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); + break :result try self.binOp(.shl, inst, lhs, rhs, lhs_ty, rhs_ty); + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -1426,7 +1536,11 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void { fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airByteSwap for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + if (true) + return self.fail("TODO: airByteSwap", .{}); + break :result undefined; + }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 7a14a39e43..e120e5ce23 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -98,7 +98,11 @@ pub fn emitMir( .sh => try emit.mirIType(inst), .sb => try emit.mirIType(inst), + .srlw => try emit.mirRType(inst), + .sllw => try emit.mirRType(inst), + .srli => try emit.mirIType(inst), + .slli => try emit.mirIType(inst), .ldr_ptr_stack => try emit.mirIType(inst), @@ -173,14 +177,20 @@ fn mirRType(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const r_type = emit.mir.instructions.items(.data)[inst].r_type; + const rd = r_type.rd; + const rs1 = r_type.rs1; + const rs2 = r_type.rs2; + switch (tag) { - .add => try emit.writeInstruction(Instruction.add(r_type.rd, r_type.rs1, r_type.rs2)), - .sub => try emit.writeInstruction(Instruction.sub(r_type.rd, r_type.rs1, r_type.rs2)), - .cmp_gt => try emit.writeInstruction(Instruction.slt(r_type.rd, r_type.rs1, r_type.rs2)), + .add => try emit.writeInstruction(Instruction.add(rd, rs1, rs2)), + .sub => try emit.writeInstruction(Instruction.sub(rd, rs1, rs2)), + .cmp_gt => try emit.writeInstruction(Instruction.slt(rd, rs1, rs2)), .cmp_eq => { - try emit.writeInstruction(Instruction.xor(r_type.rd, r_type.rs1, r_type.rs2)); - try emit.writeInstruction(Instruction.sltiu(r_type.rd, r_type.rd, 1)); + try emit.writeInstruction(Instruction.xor(rd, rs1, rs2)); + try emit.writeInstruction(Instruction.sltiu(rd, rd, 1)); }, + .sllw => try emit.writeInstruction(Instruction.sllw(rd, rs1, rs2)), + .srlw => try emit.writeInstruction(Instruction.srlw(rd, rs1, rs2)), else => unreachable, } } @@ -231,6 +241,7 @@ fn mirIType(emit: *Emit, inst: Mir.Inst.Index) !void { }, .srli => try emit.writeInstruction(Instruction.srli(i_type.rd, i_type.rs1, @intCast(i_type.imm12))), + .slli => try emit.writeInstruction(Instruction.slli(i_type.rd, i_type.rs1, @intCast(i_type.imm12))), else => unreachable, } diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 098e0c5655..7527f0b216 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -41,8 +41,14 @@ pub const Inst = struct { /// Absolute Value, uses i_type payload. abs, - /// Logical Right Shift, uses i_type payload + /// Immediate Logical Right Shift, uses i_type payload srli, + /// Immediate Logical Left Shift, uses i_type payload + slli, + /// Register Logical Left Shift, uses r_type payload + sllw, + /// Register Logical Right Shit, uses r_type payload + srlw, jal, /// Jumps. Uses `inst` payload. From f67fa73fe8bc5cc38af826c396d912b7f72b3261 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 17 Mar 2024 15:55:46 -0700 Subject: [PATCH 09/44] riscv: 16 bit `@byteSwap` --- src/arch/riscv64/CodeGen.zig | 44 +++++++++++++++++++++++++++++++++--- src/arch/riscv64/Emit.zig | 2 ++ src/arch/riscv64/Mir.zig | 3 +++ 3 files changed, 46 insertions(+), 3 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index edd3c9bf57..22418d7833 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1537,9 +1537,47 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void { fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - if (true) - return self.fail("TODO: airByteSwap", .{}); - break :result undefined; + const mod = self.bin_file.comp.module.?; + const ty = self.typeOf(ty_op.operand); + const operand = try self.resolveInst(ty_op.operand); + + const int_bits = ty.intInfo(mod).bits; + + // bytes are no-op + if (int_bits == 8 and self.reuseOperand(inst, ty_op.operand, 0, operand)) { + return self.finishAir(inst, operand, .{ ty_op.operand, .none, .none }); + } + + const dest_reg = try self.register_manager.allocReg(null, gp); + try self.genSetReg(ty, dest_reg, operand); + + const dest_mcv: MCValue = .{ .register = dest_reg }; + + switch (int_bits) { + 16 => { + const temp = try self.binOp(.shr, null, dest_mcv, .{ .immediate = 8 }, ty, Type.u8); + assert(temp == .register); + _ = try self.addInst(.{ + .tag = .slli, + .data = .{ .i_type = .{ + .imm12 = 8, + .rd = dest_reg, + .rs1 = dest_reg, + } }, + }); + _ = try self.addInst(.{ + .tag = .@"or", + .data = .{ .r_type = .{ + .rd = dest_reg, + .rs1 = dest_reg, + .rs2 = temp.register, + } }, + }); + }, + else => return self.fail("TODO: {d} bits for airByteSwap", .{int_bits}), + } + + break :result dest_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index e120e5ce23..55e1ff07e2 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -55,6 +55,7 @@ pub fn emitMir( switch (tag) { .add => try emit.mirRType(inst), .sub => try emit.mirRType(inst), + .@"or" => try emit.mirRType(inst), .cmp_eq => try emit.mirRType(inst), .cmp_gt => try emit.mirRType(inst), @@ -191,6 +192,7 @@ fn mirRType(emit: *Emit, inst: Mir.Inst.Index) !void { }, .sllw => try emit.writeInstruction(Instruction.sllw(rd, rs1, rs2)), .srlw => try emit.writeInstruction(Instruction.srlw(rd, rs1, rs2)), + .@"or" => try emit.writeInstruction(Instruction.@"or"(rd, rs1, rs2)), else => unreachable, } } diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 7527f0b216..8a0ad039df 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -33,6 +33,9 @@ pub const Inst = struct { ebreak, ecall, + /// OR instruction. Uses r_type payload. + @"or", + /// Addition add, /// Subtraction From 9b2a4582c983a4171de9ab9843d0af1d807ddbff Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 17 Mar 2024 17:45:20 -0700 Subject: [PATCH 10/44] riscv: implement 64 bit immediate into register loading LLVM has a better myriad sequence for this, where they don't allocate a temporary register, but for now this will do. --- src/arch/riscv64/CodeGen.zig | 43 ++++++++++++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 4 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 22418d7833..ba8eca1e0d 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -2633,8 +2633,17 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_val: MCValue) Inner switch (src_val) { .none => return, .dead => unreachable, + .undef => { + if (!self.wantSafety()) return; + try self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); + }, .immediate => { - const reg = try self.copyToTmpRegister(ty, src_val); + const reg = try self.register_manager.allocReg(null, gp); + const reg_lock = self.register_manager.lockReg(reg); + defer if (reg_lock) |lock| self.register_manager.unlockReg(lock); + + try self.genSetReg(ty, reg, src_val); + return self.genSetStack(ty, stack_offset, .{ .register = reg }); }, .register => |reg| { @@ -2724,6 +2733,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_val: MCValue) Inner // memcpy(src, dst, len) try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg); }, + else => return self.fail("TODO: genSetStack {s}", .{@tagName(src_val)}), } } @@ -2826,9 +2836,34 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_val: MCValue) InnerError! } }, }); } else { - // li rd, immediate - // "Myriad sequences" - return self.fail("TODO genSetReg 33-64 bit immediates for riscv64", .{}); // glhf + const temp = try self.register_manager.allocReg(null, gp); + const maybe_temp_lock = self.register_manager.lockReg(temp); + defer if (maybe_temp_lock) |temp_lock| self.register_manager.unlockReg(temp_lock); + + const lo32: i32 = @truncate(x); + const carry: i32 = if (lo32 < 0) 1 else 0; + const hi32: i32 = @truncate((x >> 32) +% carry); + + try self.genSetReg(Type.i32, temp, .{ .immediate = @bitCast(@as(i64, lo32)) }); + try self.genSetReg(Type.i32, reg, .{ .immediate = @bitCast(@as(i64, hi32)) }); + + _ = try self.addInst(.{ + .tag = .slli, + .data = .{ .i_type = .{ + .imm12 = 32, + .rd = reg, + .rs1 = reg, + } }, + }); + + _ = try self.addInst(.{ + .tag = .add, + .data = .{ .r_type = .{ + .rd = reg, + .rs1 = reg, + .rs2 = temp, + } }, + }); } }, .register => |src_reg| { From 2cbd8e1deb88e3e23ec3ca34393b824c0d76e5b0 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 17 Mar 2024 23:18:48 -0700 Subject: [PATCH 11/44] riscv: progress toward arrays - implement `airArrayElemVal` for arrays on the stack. This is really easy as we can just move the offset by the bytes into the array. This only works when the index access is comptime-known though, this won't work for runtime access. --- src/arch/riscv64/CodeGen.zig | 59 +++++++++++++++++++++++++----------- 1 file changed, 42 insertions(+), 17 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index ba8eca1e0d..3711a53baa 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1442,8 +1442,29 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.comp.module.?; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement array_elem_val for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const array_ty = self.typeOf(bin_op.lhs); + const array_mcv = try self.resolveInst(bin_op.lhs); + + const index_mcv = try self.resolveInst(bin_op.rhs); + + const elem_ty = array_ty.childType(mod); + const elem_abi_size = elem_ty.abiSize(mod); + + switch (array_mcv) { + // all we need to do is calculate the offset that the elem exits at. + .stack_offset => |off| { + if (index_mcv == .immediate) { + const true_offset: u32 = @intCast(index_mcv.immediate * elem_abi_size); + break :result MCValue{ .stack_offset = off + true_offset }; + } + return self.fail("TODO: airArrayElemVal with runtime index", .{}); + }, + else => return self.fail("TODO: airArrayElemVal {s}", .{@tagName(array_mcv)}), + } + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -1639,17 +1660,10 @@ fn load(self: *Self, dst_mcv: MCValue, src_ptr: MCValue, ptr_ty: Type) InnerErro .dead => unreachable, .immediate => |imm| try self.setValue(elem_ty, dst_mcv, .{ .memory = imm }), .ptr_stack_offset => |off| try self.setValue(elem_ty, dst_mcv, .{ .stack_offset = off }), - .register => try self.setValue(elem_ty, dst_mcv, src_ptr), - .memory, .stack_offset, - => { - const reg = try self.register_manager.allocReg(null, gp); - const reg_lock = self.register_manager.lockRegAssumeUnused(reg); - errdefer self.register_manager.unlockReg(reg_lock); - - try self.genSetReg(ptr_ty, reg, src_ptr); - try self.load(dst_mcv, .{ .register = reg }, ptr_ty); - }, + .register, + => try self.setValue(elem_ty, dst_mcv, src_ptr), + .memory => return self.fail("TODO: load memory", .{}), .load_symbol => { const reg = try self.copyToTmpRegister(ptr_ty, src_ptr); try self.load(dst_mcv, .{ .register = reg }, ptr_ty); @@ -1675,7 +1689,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { // The MCValue that holds the pointer can be re-used as the value. break :blk ptr; } else { - break :blk try self.allocRegOrMem(inst, true); + // TODO: set this to true, will need to implement register version of arrays and structs + break :blk try self.allocRegOrMem(inst, false); } }; try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); @@ -1750,7 +1765,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const field_ty = struct_ty.structFieldType(index, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; - const field_off = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); + const field_off: u32 = switch (struct_ty.containerLayout(mod)) { + .Auto, .Extern => @intCast(struct_ty.structFieldOffset(index, mod) * 8), + .Packed => if (mod.typeToStruct(struct_ty)) |struct_type| + mod.structPackedFieldBitOffset(struct_type, index) + else + 0, + }; switch (src_mcv) { .dead, .unreach => unreachable, @@ -1778,10 +1799,15 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { }, }, }); + + return self.fail("TODO: airStructFieldVal register with field_off > 0", .{}); } break :result if (field_off == 0) dst_mcv else try self.copyToNewRegister(inst, dst_mcv); }, + .stack_offset => |off| { + break :result MCValue{ .stack_offset = off + field_off }; + }, else => return self.fail("TODO: airStructField {s}", .{@tagName(src_mcv)}), } }; @@ -2435,7 +2461,8 @@ fn performReloc(self: *Self, inst: Mir.Inst.Index, target: Mir.Inst.Index) !void .bne, .beq, => self.mir_instructions.items(.data)[inst].b_type.inst = target, - .jal => self.mir_instructions.items(.data)[inst].j_type.inst = target, + .jal, + => self.mir_instructions.items(.data)[inst].j_type.inst = target, else => return self.fail("TODO: performReloc {s}", .{@tagName(tag)}), } } @@ -2614,6 +2641,7 @@ fn setValue(self: *Self, ty: Type, dst_val: MCValue, src_val: MCValue) !void { if (dst_val == .none) return; if (!dst_val.isMutable()) { + // panic so we can see the trace return std.debug.panic("tried to setValue immutable: {s}", .{@tagName(dst_val)}); } @@ -2649,8 +2677,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_val: MCValue) Inner .register => |reg| { switch (abi_size) { 1, 2, 4, 8 => { - assert(std.mem.isAlignedGeneric(u32, stack_offset, abi_size)); - const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .sb, 2 => .sh, @@ -2733,7 +2759,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_val: MCValue) Inner // memcpy(src, dst, len) try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg); }, - else => return self.fail("TODO: genSetStack {s}", .{@tagName(src_val)}), } } From 190e7d02397d7d3a467433f14492c67a77b0a2e1 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Thu, 21 Mar 2024 00:18:08 -0700 Subject: [PATCH 12/44] riscv: update builtin names --- src/arch/riscv64/CodeGen.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 3711a53baa..4529ab6eec 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1766,8 +1766,8 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; const field_off: u32 = switch (struct_ty.containerLayout(mod)) { - .Auto, .Extern => @intCast(struct_ty.structFieldOffset(index, mod) * 8), - .Packed => if (mod.typeToStruct(struct_ty)) |struct_type| + .auto, .@"extern" => @intCast(struct_ty.structFieldOffset(index, mod) * 8), + .@"packed" => if (mod.typeToStruct(struct_ty)) |struct_type| mod.structPackedFieldBitOffset(struct_type, index) else 0, From 63bbf665538d927bd56646e063821e31577f83f5 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Fri, 22 Mar 2024 19:13:50 -0700 Subject: [PATCH 13/44] riscv: remove an allocation from `dwarf.zig` --- lib/std/dwarf.zig | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/lib/std/dwarf.zig b/lib/std/dwarf.zig index a0f5a7f0a8..242408f6b2 100644 --- a/lib/std/dwarf.zig +++ b/lib/std/dwarf.zig @@ -1236,15 +1236,7 @@ pub const DwarfInfo = struct { const opcode_base = try fbr.readByte(); - const standard_opcode_lengths = try allocator.alloc(u8, opcode_base - 1); - defer allocator.free(standard_opcode_lengths); - - { - var i: usize = 0; - while (i < opcode_base - 1) : (i += 1) { - standard_opcode_lengths[i] = try fbr.readByte(); - } - } + const standard_opcode_lengths = try fbr.readBytes(opcode_base - 1); var include_directories = std.ArrayList(FileEntry).init(allocator); defer include_directories.deinit(); From 5e010b6deac7ad34f0cd06d507fc468fd98f9abc Mon Sep 17 00:00:00 2001 From: David Rubin Date: Fri, 22 Mar 2024 20:14:10 -0700 Subject: [PATCH 14/44] riscv: reorganize `binOp` and implement `cmp_imm_gte` MIR this was an annoying one to do, as there is no (to my knowledge) myriad sequence that will allow us to do `gte` compares with an immediate without allocating a register. RISC-V provides a single instruction to do compares, that being `lt`, and so you need to use more than one for other variants, but in this case, i believe you need to allocate a register. --- src/arch/riscv64/CodeGen.zig | 298 +++++++++++++++++++---------------- src/arch/riscv64/Emit.zig | 59 ++++--- src/arch/riscv64/Mir.zig | 15 +- 3 files changed, 210 insertions(+), 162 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 4529ab6eec..c9a71ee79e 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -850,6 +850,122 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } +fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { + const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); + + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +} + +/// For all your binary operation needs, this function will generate +/// the corresponding Mir instruction(s). Returns the location of the +/// result. +/// +/// If the binary operation itself happens to be an Air instruction, +/// pass the corresponding index in the inst parameter. That helps +/// this function do stuff like reusing operands. +/// +/// This function does not do any lowering to Mir itself, but instead +/// looks at the lhs and rhs and determines which kind of lowering +/// would be best suitable and then delegates the lowering to other +/// functions. +/// +/// `maybe_inst` **needs** to be a bin_op, make sure of that. +fn binOp( + self: *Self, + tag: Air.Inst.Tag, + maybe_inst: ?Air.Inst.Index, + lhs: MCValue, + rhs: MCValue, + lhs_ty: Type, + rhs_ty: Type, +) InnerError!MCValue { + const mod = self.bin_file.comp.module.?; + switch (tag) { + // Arithmetic operations on integers and floats + .add, + .sub, + .cmp_eq, + .cmp_neq, + .cmp_gt, + .cmp_gte, + .cmp_lt, + .cmp_lte, + => { + switch (lhs_ty.zigTypeTag(mod)) { + .Float => return self.fail("TODO binary operations on floats", .{}), + .Vector => return self.fail("TODO binary operations on vectors", .{}), + .Int => { + assert(lhs_ty.eql(rhs_ty, mod)); + const int_info = lhs_ty.intInfo(mod); + if (int_info.bits <= 64) { + if (rhs == .immediate) { + return self.binOpImm(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + } + return self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + } else { + return self.fail("TODO binary operations on int with bits > 64", .{}); + } + }, + else => unreachable, + } + }, + .ptr_add, + .ptr_sub, + => { + switch (lhs_ty.zigTypeTag(mod)) { + .Pointer => { + const ptr_ty = lhs_ty; + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), + }; + const elem_size = elem_ty.abiSize(mod); + + if (elem_size == 1) { + const base_tag: Air.Inst.Tag = switch (tag) { + .ptr_add => .add, + .ptr_sub => .sub, + else => unreachable, + }; + + return try self.binOpRegister(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + } else { + return self.fail("TODO ptr_add with elem_size > 1", .{}); + } + }, + else => unreachable, + } + }, + + // These instructions have unsymteric bit sizes on RHS and LHS. + .shr, + .shl, + => { + switch (lhs_ty.zigTypeTag(mod)) { + .Float => return self.fail("TODO binary operations on floats", .{}), + .Vector => return self.fail("TODO binary operations on vectors", .{}), + .Int => { + const int_info = lhs_ty.intInfo(mod); + if (int_info.bits <= 64) { + if (rhs == .immediate) { + return self.binOpImm(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + } + return self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + } else { + return self.fail("TODO binary operations on int with bits > 64", .{}); + } + }, + else => unreachable, + } + }, + else => unreachable, + } +} /// Don't call this function directly. Use binOp instead. /// /// Calling this function signals an intention to generate a Mir @@ -963,7 +1079,6 @@ fn binOpImm( lhs_ty: Type, rhs_ty: Type, ) !MCValue { - _ = rhs_ty; assert(rhs == .immediate); const lhs_is_register = lhs == .register; @@ -1006,142 +1121,44 @@ fn binOpImm( const mir_tag: Mir.Inst.Tag = switch (tag) { .shl => .slli, .shr => .srli, + .cmp_gte => .cmp_imm_gte, else => return self.fail("TODO: binOpImm {s}", .{@tagName(tag)}), }; - _ = try self.addInst(.{ - .tag = mir_tag, - .data = .{ - .i_type = .{ - .rd = dest_reg, - .rs1 = lhs_reg, - .imm12 = math.cast(i12, rhs.immediate) orelse { - return self.fail("TODO: binOpImm larger than i12 i_type payload", .{}); - }, - }, - }, - }); - - // generate the struct for OF checks - - return MCValue{ .register = dest_reg }; -} - -/// For all your binary operation needs, this function will generate -/// the corresponding Mir instruction(s). Returns the location of the -/// result. -/// -/// If the binary operation itself happens to be an Air instruction, -/// pass the corresponding index in the inst parameter. That helps -/// this function do stuff like reusing operands. -/// -/// This function does not do any lowering to Mir itself, but instead -/// looks at the lhs and rhs and determines which kind of lowering -/// would be best suitable and then delegates the lowering to other -/// functions. -/// -/// `maybe_inst` **needs** to be a bin_op, make sure of that. -fn binOp( - self: *Self, - tag: Air.Inst.Tag, - maybe_inst: ?Air.Inst.Index, - lhs: MCValue, - rhs: MCValue, - lhs_ty: Type, - rhs_ty: Type, -) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; - switch (tag) { - // Arithmetic operations on integers and floats - .add, - .sub, - .cmp_eq, - .cmp_neq, - .cmp_gt, - .cmp_gte, - .cmp_lt, - .cmp_lte, + // apply some special operations needed + switch (mir_tag) { + .slli, + .srli, => { - switch (lhs_ty.zigTypeTag(mod)) { - .Float => return self.fail("TODO binary operations on floats", .{}), - .Vector => return self.fail("TODO binary operations on vectors", .{}), - .Int => { - assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(mod); - if (int_info.bits <= 64) { - if (rhs == .immediate) { - return self.binOpImm(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); - } - return self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); - } else { - return self.fail("TODO binary operations on int with bits > 64", .{}); - } - }, - else => unreachable, - } + _ = try self.addInst(.{ + .tag = mir_tag, + .data = .{ .i_type = .{ + .rd = dest_reg, + .rs1 = lhs_reg, + .imm12 = math.cast(i12, rhs.immediate) orelse { + return self.fail("TODO: binOpImm larger than i12 i_type payload", .{}); + }, + } }, + }); }, - .ptr_add, - .ptr_sub, - => { - switch (lhs_ty.zigTypeTag(mod)) { - .Pointer => { - const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize(mod)) { - .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type - else => ptr_ty.childType(mod), - }; - const elem_size = elem_ty.abiSize(mod); + .cmp_imm_gte => { + const imm_reg = try self.copyToTmpRegister(rhs_ty, .{ .immediate = rhs.immediate - 1 }); - if (elem_size == 1) { - const base_tag: Air.Inst.Tag = switch (tag) { - .ptr_add => .add, - .ptr_sub => .sub, - else => unreachable, - }; - - return try self.binOpRegister(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); - } else { - return self.fail("TODO ptr_add with elem_size > 1", .{}); - } - }, - else => unreachable, - } - }, - - // These instructions have unsymteric bit sizes. - .shr, - .shl, - => { - switch (lhs_ty.zigTypeTag(mod)) { - .Float => return self.fail("TODO binary operations on floats", .{}), - .Vector => return self.fail("TODO binary operations on vectors", .{}), - .Int => { - const int_info = lhs_ty.intInfo(mod); - if (int_info.bits <= 64) { - if (rhs == .immediate) { - return self.binOpImm(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); - } - return self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); - } else { - return self.fail("TODO binary operations on int with bits > 64", .{}); - } - }, - else => unreachable, - } + _ = try self.addInst(.{ + .tag = mir_tag, + .data = .{ .r_type = .{ + .rd = dest_reg, + .rs1 = imm_reg, + .rs2 = lhs_reg, + } }, + }); }, else => unreachable, } -} -fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); + // generate the struct for overflow checks - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + return MCValue{ .register = dest_reg }; } fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { @@ -2101,8 +2118,12 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]); const liveness_condbr = self.liveness.getCondBr(inst); - // A branch to the false section. Uses beq - const reloc = try self.condBr(cond_ty, cond); + const cond_reg = try self.register_manager.allocReg(inst, gp); + const cond_reg_lock = self.register_manager.lockRegAssumeUnused(cond_reg); + defer self.register_manager.unlockReg(cond_reg_lock); + + // A branch to the false section. Uses bne + const reloc = try self.condBr(cond_ty, cond, cond_reg); // If the condition dies here in this condbr instruction, process // that death now instead of later as this has an effect on @@ -2233,19 +2254,14 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { } } -fn condBr(self: *Self, cond_ty: Type, condition: MCValue) !Mir.Inst.Index { - _ = cond_ty; - - const reg = switch (condition) { - .register => |r| r, - else => try self.copyToTmpRegister(Type.bool, condition), - }; +fn condBr(self: *Self, cond_ty: Type, condition: MCValue, cond_reg: Register) !Mir.Inst.Index { + try self.genSetReg(cond_ty, cond_reg, condition); return try self.addInst(.{ .tag = .bne, .data = .{ .b_type = .{ - .rs1 = reg, + .rs1 = cond_reg, .rs2 = .zero, .inst = undefined, }, @@ -2739,6 +2755,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_val: MCValue) Inner } else return self.fail("TODO genSetStack for {s}", .{@tagName(self.bin_file.tag)}); }; + // setup the src pointer _ = try self.addInst(.{ .tag = .load_symbol, .data = .{ @@ -2789,7 +2806,7 @@ fn genInlineMemcpy( // compare count to length const compare_inst = try self.addInst(.{ - .tag = .cmp_gt, + .tag = .cmp_eq, .data = .{ .r_type = .{ .rd = tmp, .rs1 = count, @@ -2861,9 +2878,12 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_val: MCValue) InnerError! } }, }); } else { + // TODO: use a more advanced myriad seq to do this without a reg. + // see: https://github.com/llvm/llvm-project/blob/081a66ffacfe85a37ff775addafcf3371e967328/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp#L224 + const temp = try self.register_manager.allocReg(null, gp); - const maybe_temp_lock = self.register_manager.lockReg(temp); - defer if (maybe_temp_lock) |temp_lock| self.register_manager.unlockReg(temp_lock); + const temp_lock = self.register_manager.lockRegAssumeUnused(temp); + defer self.register_manager.unlockReg(temp_lock); const lo32: i32 = @truncate(x); const carry: i32 = if (lo32 < 0) 1 else 0; diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 55e1ff07e2..6c98fca7b9 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -59,6 +59,7 @@ pub fn emitMir( .cmp_eq => try emit.mirRType(inst), .cmp_gt => try emit.mirRType(inst), + .cmp_imm_gte => try emit.mirRType(inst), .beq => try emit.mirBType(inst), .bne => try emit.mirBType(inst), @@ -185,14 +186,27 @@ fn mirRType(emit: *Emit, inst: Mir.Inst.Index) !void { switch (tag) { .add => try emit.writeInstruction(Instruction.add(rd, rs1, rs2)), .sub => try emit.writeInstruction(Instruction.sub(rd, rs1, rs2)), - .cmp_gt => try emit.writeInstruction(Instruction.slt(rd, rs1, rs2)), + .cmp_gt => { + // rs1 > rs2 + try emit.writeInstruction(Instruction.slt(rd, rs1, rs2)); + }, .cmp_eq => { + // rs1 == rs2 + + // if equal, write 0 to rd try emit.writeInstruction(Instruction.xor(rd, rs1, rs2)); + // if rd == 0, set rd to 1 try emit.writeInstruction(Instruction.sltiu(rd, rd, 1)); }, .sllw => try emit.writeInstruction(Instruction.sllw(rd, rs1, rs2)), .srlw => try emit.writeInstruction(Instruction.srlw(rd, rs1, rs2)), .@"or" => try emit.writeInstruction(Instruction.@"or"(rd, rs1, rs2)), + .cmp_imm_gte => { + // rd = rs1 >= imm12 + // see the docstring for cmp_imm_gte to see why we use r_type here + try emit.writeInstruction(Instruction.slt(rd, rs1, rs2)); + try emit.writeInstruction(Instruction.xori(rd, rd, 1)); + }, else => unreachable, } } @@ -220,30 +234,34 @@ fn mirIType(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const i_type = emit.mir.instructions.items(.data)[inst].i_type; + const rd = i_type.rd; + const rs1 = i_type.rs1; + const imm12 = i_type.imm12; + switch (tag) { - .addi => try emit.writeInstruction(Instruction.addi(i_type.rd, i_type.rs1, i_type.imm12)), - .jalr => try emit.writeInstruction(Instruction.jalr(i_type.rd, i_type.imm12, i_type.rs1)), + .addi => try emit.writeInstruction(Instruction.addi(rd, rs1, imm12)), + .jalr => try emit.writeInstruction(Instruction.jalr(rd, imm12, rs1)), - .ld => try emit.writeInstruction(Instruction.ld(i_type.rd, i_type.imm12, i_type.rs1)), - .lw => try emit.writeInstruction(Instruction.lw(i_type.rd, i_type.imm12, i_type.rs1)), - .lh => try emit.writeInstruction(Instruction.lh(i_type.rd, i_type.imm12, i_type.rs1)), - .lb => try emit.writeInstruction(Instruction.lb(i_type.rd, i_type.imm12, i_type.rs1)), + .ld => try emit.writeInstruction(Instruction.ld(rd, imm12, rs1)), + .lw => try emit.writeInstruction(Instruction.lw(rd, imm12, rs1)), + .lh => try emit.writeInstruction(Instruction.lh(rd, imm12, rs1)), + .lb => try emit.writeInstruction(Instruction.lb(rd, imm12, rs1)), - .sd => try emit.writeInstruction(Instruction.sd(i_type.rd, i_type.imm12, i_type.rs1)), - .sw => try emit.writeInstruction(Instruction.sw(i_type.rd, i_type.imm12, i_type.rs1)), - .sh => try emit.writeInstruction(Instruction.sh(i_type.rd, i_type.imm12, i_type.rs1)), - .sb => try emit.writeInstruction(Instruction.sb(i_type.rd, i_type.imm12, i_type.rs1)), + .sd => try emit.writeInstruction(Instruction.sd(rd, imm12, rs1)), + .sw => try emit.writeInstruction(Instruction.sw(rd, imm12, rs1)), + .sh => try emit.writeInstruction(Instruction.sh(rd, imm12, rs1)), + .sb => try emit.writeInstruction(Instruction.sb(rd, imm12, rs1)), - .ldr_ptr_stack => try emit.writeInstruction(Instruction.add(i_type.rd, i_type.rs1, .sp)), + .ldr_ptr_stack => try emit.writeInstruction(Instruction.add(rd, rs1, .sp)), .abs => { - try emit.writeInstruction(Instruction.sraiw(i_type.rd, i_type.rs1, @intCast(i_type.imm12))); - try emit.writeInstruction(Instruction.xor(i_type.rs1, i_type.rs1, i_type.rd)); - try emit.writeInstruction(Instruction.subw(i_type.rs1, i_type.rs1, i_type.rd)); + try emit.writeInstruction(Instruction.sraiw(rd, rs1, @intCast(imm12))); + try emit.writeInstruction(Instruction.xor(rs1, rs1, rd)); + try emit.writeInstruction(Instruction.subw(rs1, rs1, rd)); }, - .srli => try emit.writeInstruction(Instruction.srli(i_type.rd, i_type.rs1, @intCast(i_type.imm12))), - .slli => try emit.writeInstruction(Instruction.slli(i_type.rd, i_type.rs1, @intCast(i_type.imm12))), + .srli => try emit.writeInstruction(Instruction.srli(rd, rs1, @intCast(imm12))), + .slli => try emit.writeInstruction(Instruction.slli(rd, rs1, @intCast(imm12))), else => unreachable, } @@ -471,12 +489,13 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize { .dbg_prologue_end, => 0, - .psuedo_epilogue => 12, // 3 * 4 - .psuedo_prologue => 16, // 4 * 4 + .psuedo_epilogue => 12, + .psuedo_prologue => 16, - .abs => 12, // 3 * 4 + .abs => 12, .cmp_eq => 8, + .cmp_imm_gte => 8, else => 4, }; diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 8a0ad039df..68b314bd12 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -57,12 +57,21 @@ pub const Inst = struct { /// Jumps. Uses `inst` payload. j, - // TODO: Maybe create a special data for compares that includes the ops - /// Compare equal, uses r_type + // NOTE: Maybe create a special data for compares that includes the ops + /// Register `==`, uses r_type cmp_eq, - /// Compare greater than, uses r_type + /// Register `>`, uses r_type cmp_gt, + /// Immediate `>=`, uses r_type + /// + /// Note: this uses r_type because RISC-V does not provide a good way + /// to do `>=` comparisons on immediates. Usually we would just subtract + /// 1 from the immediate and do a `>` comparison, however there is no `>` + /// register to immedate comparison in RISC-V. This leads us to need to + /// allocate a register for temporary use. + cmp_imm_gte, + /// Branch if equal Uses b_type beq, /// Branch if not eql Uses b_type From 92293214009cbf5d8aede56a5f54f533173324d5 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sat, 23 Mar 2024 01:13:56 -0700 Subject: [PATCH 15/44] riscv: change up how we do args - before we were storing each arg in it's own function arg register. with this commit now we store the args in the fa register before calling as per the RISC-V calling convention, however as soon as we enter the callee, aka in airArg, we spill the argument to the stack. this allows us to spend less effort worrying about whether we're going to clobber the function arguments when another function is called inside of the callee. - we were actually clobbering the fa regs inside of resolveCallingConvetion, because of the null argument to allocReg. now each lock is stored in an array which is then iterated over and unlocked, which actually aids in the first point of this commit. --- src/arch/riscv64/CodeGen.zig | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index c9a71ee79e..bc844f1b5f 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1858,6 +1858,7 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void { } fn airArg(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.comp.module.?; var arg_index = self.arg_index; // we skip over args that have no bits @@ -1867,10 +1868,21 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const src_mcv = self.args[arg_index]; + // we want to move every arg onto the stack. + // while it might no tbe the best solution right now, it simplifies + // the spilling of args with multiple arg levels. const dst_mcv = switch (src_mcv) { .register => |src_reg| dst: { - try self.register_manager.getReg(src_reg, inst); - break :dst src_mcv; + // TODO: get the true type of the arg, and fit the spill to size. + const arg_size = Type.usize.abiSize(mod); + const arg_align = Type.usize.abiAlignment(mod); + const offset = try self.allocMem(inst, @intCast(arg_size), arg_align); + try self.genSetStack(Type.usize, offset, .{ .register = src_reg }); + + // can go on to be reused in next function call + self.register_manager.freeReg(src_reg); + + break :dst .{ .stack_offset = offset }; }, else => return self.fail("TODO: airArg {s}", .{@tagName(src_mcv)}), }; @@ -3258,17 +3270,26 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { return self.fail("TODO: support more than 8 function args", .{}); } + const locks = try self.gpa.alloc(RegisterLock, result.args.len); + defer self.gpa.free(locks); + for (0..result.args.len) |i| { const arg_reg = try self.register_manager.allocReg(null, fa); + const lock = self.register_manager.lockRegAssumeUnused(arg_reg); + locks[i] = lock; result.args[i] = .{ .register = arg_reg }; } + // we can just free the locks now, as this should be the only place where the fa + // arg set is used. + for (locks) |lock| { + self.register_manager.unlockReg(lock); + } + // stack_offset = num s registers spilled + local var space - var stack_offset: u32 = 0; - _ = &stack_offset; // TODO: spill used s registers here - result.stack_byte_count = stack_offset; + result.stack_byte_count = 0; result.stack_align = .@"16"; }, else => return self.fail("TODO implement function parameters for {} on riscv64", .{cc}), From f1fe5c937e5587064dde4ab357e9efe277a5ea49 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sat, 23 Mar 2024 14:55:33 -0700 Subject: [PATCH 16/44] riscv: pointer work lots of thinking later, ive begun to grasp my head around how the pointers should work. this commit allows basic pointer loading and storing to happen. --- src/arch/riscv64/CodeGen.zig | 122 ++++++++++++++++++++++++----------- src/arch/riscv64/Emit.zig | 1 + src/arch/riscv64/Mir.zig | 4 +- 3 files changed, 88 insertions(+), 39 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index bc844f1b5f..bdf42e10c7 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1666,28 +1666,6 @@ fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_ind return true; } -fn load(self: *Self, dst_mcv: MCValue, src_ptr: MCValue, ptr_ty: Type) InnerError!void { - const mod = self.bin_file.comp.module.?; - const elem_ty = ptr_ty.childType(mod); - - switch (src_ptr) { - .none => unreachable, - .undef => unreachable, - .unreach => unreachable, - .dead => unreachable, - .immediate => |imm| try self.setValue(elem_ty, dst_mcv, .{ .memory = imm }), - .ptr_stack_offset => |off| try self.setValue(elem_ty, dst_mcv, .{ .stack_offset = off }), - .stack_offset, - .register, - => try self.setValue(elem_ty, dst_mcv, src_ptr), - .memory => return self.fail("TODO: load memory", .{}), - .load_symbol => { - const reg = try self.copyToTmpRegister(ptr_ty, src_ptr); - try self.load(dst_mcv, .{ .register = reg }, ptr_ty); - }, - } -} - fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.comp.module.?; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -1706,8 +1684,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { // The MCValue that holds the pointer can be re-used as the value. break :blk ptr; } else { - // TODO: set this to true, will need to implement register version of arrays and structs - break :blk try self.allocRegOrMem(inst, false); + break :blk try self.allocRegOrMem(inst, true); } }; try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); @@ -1716,18 +1693,27 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn store(self: *Self, dst_ptr: MCValue, src_val: MCValue, ptr_ty: Type, value_ty: Type) !void { - _ = ptr_ty; +fn load(self: *Self, dst_mcv: MCValue, src_ptr: MCValue, ptr_ty: Type) InnerError!void { + const mod = self.bin_file.comp.module.?; + const elem_ty = ptr_ty.childType(mod); - log.debug("storing {s}", .{@tagName(dst_ptr)}); - - switch (dst_ptr) { + switch (src_ptr) { .none => unreachable, .undef => unreachable, .unreach => unreachable, .dead => unreachable, - .ptr_stack_offset => |off| try self.genSetStack(value_ty, off, src_val), - else => return self.fail("TODO implement storing to MCValue.{s}", .{@tagName(dst_ptr)}), + .immediate => |imm| try self.setValue(elem_ty, dst_mcv, .{ .memory = imm }), + .ptr_stack_offset => |off| try self.setValue(elem_ty, dst_mcv, .{ .stack_offset = off }), + + .stack_offset, + .register, + .memory, + => try self.setValue(elem_ty, dst_mcv, src_ptr), + + .load_symbol => { + const reg = try self.copyToTmpRegister(ptr_ty, src_ptr); + try self.load(dst_mcv, .{ .register = reg }, ptr_ty); + }, } } @@ -1748,6 +1734,50 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } +/// Loads `value` into the "payload" of `pointer`. +fn store(self: *Self, pointer: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) !void { + _ = ptr_ty; + const mod = self.bin_file.comp.module.?; + const value_size = value_ty.abiSize(mod); + + log.debug("storing {s}", .{@tagName(pointer)}); + + switch (pointer) { + .none => unreachable, + .undef => unreachable, + .unreach => unreachable, + .dead => unreachable, + .ptr_stack_offset => |off| try self.genSetStack(value_ty, off, value), + + .register => |reg| { + const value_reg = try self.copyToTmpRegister(value_ty, value); + + switch (value_size) { + 1, 2, 4, 8 => { + const tag: Mir.Inst.Tag = switch (value_size) { + 1 => .sb, + 2 => .sh, + 4 => .sw, + 8 => .sd, + else => unreachable, + }; + + _ = try self.addInst(.{ + .tag = tag, + .data = .{ .i_type = .{ + .rd = value_reg, + .rs1 = reg, + .imm12 = 0, + } }, + }); + }, + else => return self.fail("TODO: genSetStack for size={d}", .{value_size}), + } + }, + else => return self.fail("TODO implement storing to MCValue.{s}", .{@tagName(pointer)}), + } +} + fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; @@ -2693,10 +2723,14 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_val: MCValue) Inner if (!self.wantSafety()) return; try self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); }, - .immediate => { + .immediate, + .ptr_stack_offset, + => { + // TODO: remove this lock in favor of a copyToTmpRegister when we load 64 bit immediates with + // a register allocation. const reg = try self.register_manager.allocReg(null, gp); - const reg_lock = self.register_manager.lockReg(reg); - defer if (reg_lock) |lock| self.register_manager.unlockReg(lock); + const reg_lock = self.register_manager.lockRegAssumeUnused(reg); + defer self.register_manager.unlockReg(reg_lock); try self.genSetReg(ty, reg, src_val); @@ -2849,7 +2883,18 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_val: MCValue) InnerError! switch (src_val) { .dead => unreachable, - .ptr_stack_offset => |off| try self.genSetReg(ty, reg, .{ .stack_offset = off }), + .ptr_stack_offset => |off| { + _ = try self.addInst(.{ + .tag = .addi, + .data = .{ .i_type = .{ + .rd = reg, + .rs1 = .s0, + .imm12 = math.cast(i12, off) orelse { + return self.fail("TODO: bigger stack sizes", .{}); + }, + } }, + }); + }, .unreach, .none => return, // Nothing to do. .undef => { if (!self.wantSafety()) @@ -3166,6 +3211,8 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void { const prefetch = self.air.instructions.items(.data)[@intFromEnum(inst)].prefetch; + // TODO: RISC-V does have prefetch instruction variants. + // see here: https://raw.githubusercontent.com/riscv/riscv-CMOs/master/specifications/cmobase-v1.0.1.pdf return self.finishAir(inst, MCValue.dead, .{ prefetch.ptr, .none, .none }); } @@ -3205,12 +3252,13 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { const mod = self.bin_file.comp.module.?; - const mcv: MCValue = switch (try codegen.genTypedValue( + const result = try codegen.genTypedValue( self.bin_file, self.src_loc, val, mod.funcOwnerDeclIndex(self.func_index), - )) { + ); + const mcv: MCValue = switch (result) { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 6c98fca7b9..f6205cd4ab 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -442,6 +442,7 @@ fn isStore(tag: Mir.Inst.Tag) bool { .sh => true, .sw => true, .sd => true, + .addi => true, // needed for ptr_stack_offset stores else => false, }; } diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 68b314bd12..26f3b355c9 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -72,9 +72,9 @@ pub const Inst = struct { /// allocate a register for temporary use. cmp_imm_gte, - /// Branch if equal Uses b_type + /// Branch if equal, Uses b_type beq, - /// Branch if not eql Uses b_type + /// Branch if not equal, Uses b_type bne, nop, From 08452b1adde34f5ef20738970141f643709b6eb9 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sat, 23 Mar 2024 19:09:29 -0700 Subject: [PATCH 17/44] riscv: correct the order of the return epilogue --- src/arch/riscv64/CodeGen.zig | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index bdf42e10c7..7ba1d192d9 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -2039,20 +2039,6 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier return bt.finishAir(result); } -fn ret(self: *Self, mcv: MCValue) !void { - const mod = self.bin_file.comp.module.?; - const ret_ty = self.fn_type.fnReturnType(mod); - try self.setValue(ret_ty, self.ret_mcv, mcv); - - // Just add space for an instruction, patch this later - const index = try self.addInst(.{ - .tag = .ret, - .data = .{ .nop = {} }, - }); - - try self.exitlude_jump_relocs.append(self.gpa, index); -} - fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void { if (safety) { // safe @@ -2068,14 +2054,29 @@ fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void { .data = .{ .nop = {} }, }); + try self.ret(operand); + + return self.finishAir(inst, .dead, .{ un_op, .none, .none }); +} + +fn ret(self: *Self, mcv: MCValue) !void { + const mod = self.bin_file.comp.module.?; + + const ret_ty = self.fn_type.fnReturnType(mod); + try self.setValue(ret_ty, self.ret_mcv, mcv); + _ = try self.addInst(.{ .tag = .psuedo_epilogue, .data = .{ .nop = {} }, }); - try self.ret(operand); + // Just add space for an instruction, patch this later + const index = try self.addInst(.{ + .tag = .ret, + .data = .{ .nop = {} }, + }); - return self.finishAir(inst, .dead, .{ un_op, .none, .none }); + try self.exitlude_jump_relocs.append(self.gpa, index); } fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { @@ -3354,7 +3355,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { if (ret_ty_size <= 8) { result.return_value = .{ .register = .a0 }; } else if (ret_ty_size <= 16) { - return self.fail("TODO support MCValue 2 registers", .{}); + return self.fail("TODO support returning with a0 + a1", .{}); } else { return self.fail("TODO support return by reference", .{}); } From 09b7aabe094c11d7e4772c2e0c67ec7c28672266 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 24 Mar 2024 16:58:42 -0700 Subject: [PATCH 18/44] riscv: add `allocReg` helper, and clean up some comparing logic - Added the basic framework for panicing with an overflow in `airAddWithOverflow`, but there is no check done yet. - added the `cmp_lt`, `cmp_gte`, and `cmp_imm_eq` MIR instructions, and their respective functionality. --- src/arch/riscv64/CodeGen.zig | 165 ++++++++++++++++------------------- src/arch/riscv64/Emit.zig | 41 ++++++--- src/arch/riscv64/Mir.zig | 7 ++ 3 files changed, 114 insertions(+), 99 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 7ba1d192d9..f2e43f52e3 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -736,6 +736,15 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { return MCValue{ .stack_offset = stack_offset }; } +/// Allocates a register from the general purpose set and returns the Register and the Lock. +/// +/// Up to the user to unlock the register later. +fn allocReg(self: *Self) !struct { Register, RegisterLock } { + const reg = try self.register_manager.allocReg(null, gp); + const lock = self.register_manager.lockRegAssumeUnused(reg); + return .{ reg, lock }; +} + pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { const stack_mcv = try self.allocRegOrMem(inst, false); log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv }); @@ -983,67 +992,36 @@ fn binOpRegister( lhs_ty: Type, rhs_ty: Type, ) !MCValue { - const lhs_is_register = lhs == .register; - const rhs_is_register = rhs == .register; + _ = maybe_inst; - const lhs_lock: ?RegisterLock = if (lhs_is_register) - self.register_manager.lockReg(lhs.register) - else - null; - defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg); + const lhs_reg, const lhs_lock = blk: { + if (lhs == .register) break :blk .{ lhs.register, null }; - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - - const lhs_reg = if (lhs_is_register) lhs.register else blk: { - const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - break :inst bin_op.lhs.toIndex().?; - } else null; - - const reg = try self.register_manager.allocReg(track_inst, gp); - - if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg }); - - break :blk reg; + const lhs_reg, const lhs_lock = try self.allocReg(); + try self.genSetReg(lhs_ty, lhs_reg, lhs); + break :blk .{ lhs_reg, lhs_lock }; }; - const new_lhs_lock = self.register_manager.lockReg(lhs_reg); - defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg); + defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - const rhs_reg = if (rhs_is_register) rhs.register else blk: { - const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - break :inst bin_op.rhs.toIndex().?; - } else null; + const rhs_reg, const rhs_lock = blk: { + if (rhs == .register) break :blk .{ rhs.register, null }; - const reg = try self.register_manager.allocReg(track_inst, gp); - - if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg }); - - break :blk reg; + const rhs_reg, const rhs_lock = try self.allocReg(); + try self.genSetReg(rhs_ty, rhs_reg, rhs); + break :blk .{ rhs_reg, rhs_lock }; }; - const new_rhs_lock = self.register_manager.lockReg(rhs_reg); - defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg); + defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); - const dest_reg = if (maybe_inst) |inst| blk: { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - - if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) { - break :blk lhs_reg; - } else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) { - break :blk rhs_reg; - } else { - break :blk try self.register_manager.allocReg(inst, gp); - } - } else try self.register_manager.allocReg(null, gp); - - if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs); - if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs); + const dest_reg, const dest_lock = try self.allocReg(); + defer self.register_manager.unlockReg(dest_lock); const mir_tag: Mir.Inst.Tag = switch (tag) { .add => .add, .sub => .sub, .cmp_eq => .cmp_eq, .cmp_gt => .cmp_gt, + .cmp_gte => .cmp_gte, + .cmp_lt => .cmp_lt, .shl => .sllw, .shr => .srlw, else => return self.fail("TODO: binOpRegister {s}", .{@tagName(tag)}), @@ -1080,48 +1058,28 @@ fn binOpImm( rhs_ty: Type, ) !MCValue { assert(rhs == .immediate); + _ = maybe_inst; - const lhs_is_register = lhs == .register; + // TODO: use `maybe_inst` to track instead of forcing a lock. - const lhs_lock: ?RegisterLock = if (lhs_is_register) - self.register_manager.lockReg(lhs.register) - else - null; - defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg); + const lhs_reg, const lhs_lock = blk: { + if (lhs == .register) break :blk .{ lhs.register, null }; - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - - const lhs_reg = if (lhs_is_register) lhs.register else blk: { - const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - break :inst bin_op.lhs.toIndex().?; - } else null; - - const reg = try self.register_manager.allocReg(track_inst, gp); - - if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg }); - - break :blk reg; + const lhs_reg, const lhs_lock = try self.allocReg(); + try self.genSetReg(lhs_ty, lhs_reg, lhs); + break :blk .{ lhs_reg, lhs_lock }; }; - const new_lhs_lock = self.register_manager.lockReg(lhs_reg); - defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg); + defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - const dest_reg = if (maybe_inst) |inst| blk: { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - - if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) { - break :blk lhs_reg; - } else { - break :blk try self.register_manager.allocReg(inst, gp); - } - } else try self.register_manager.allocReg(null, gp); - - if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs); + const dest_reg, const dest_lock = try self.allocReg(); + defer self.register_manager.unlockReg(dest_lock); const mir_tag: Mir.Inst.Tag = switch (tag) { .shl => .slli, .shr => .srli, .cmp_gte => .cmp_imm_gte, + .cmp_eq => .cmp_imm_eq, + .add => .addi, else => return self.fail("TODO: binOpImm {s}", .{@tagName(tag)}), }; @@ -1129,6 +1087,8 @@ fn binOpImm( switch (mir_tag) { .slli, .srli, + .addi, + .cmp_imm_eq, => { _ = try self.addInst(.{ .tag = mir_tag, @@ -1156,8 +1116,6 @@ fn binOpImm( else => unreachable, } - // generate the struct for overflow checks - return MCValue{ .register = dest_reg }; } @@ -1216,6 +1174,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { } fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.comp.module.?; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -1225,7 +1184,28 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs_ty = self.typeOf(extra.lhs); const rhs_ty = self.typeOf(extra.rhs); - break :result try self.binOp(.add, null, lhs, rhs, lhs_ty, rhs_ty); + const partial_mcv = try self.binOp(.add, null, lhs, rhs, lhs_ty, rhs_ty); + + const tuple_ty = self.typeOfIndex(inst); + + // TODO: optimization, set this to true. needs the other struct access stuff to support + // accessing registers. + const result_mcv = try self.allocRegOrMem(inst, false); + const offset = result_mcv.stack_offset; + + const overflow_offset = tuple_ty.structFieldOffset(1, mod) + offset; + const result_offset = tuple_ty.structFieldOffset(0, mod) + offset; + + const overflow_mcv = try self.binOp(.cmp_lt, null, partial_mcv, lhs, lhs_ty, lhs_ty); + + const overflow_reg, const overflow_lock = try self.allocReg(); + defer self.register_manager.unlockReg(overflow_lock); + + try self.genSetReg(lhs_ty, overflow_reg, overflow_mcv); + + try self.genSetStack(Type.u1, @intCast(overflow_offset), overflow_mcv); + try self.genSetStack(lhs_ty, @intCast(result_offset), partial_mcv); + break :result result_mcv; }; return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); @@ -1749,6 +1729,15 @@ fn store(self: *Self, pointer: MCValue, value: MCValue, ptr_ty: Type, value_ty: .dead => unreachable, .ptr_stack_offset => |off| try self.genSetStack(value_ty, off, value), + .stack_offset => { + const pointer_reg, const lock = try self.allocReg(); + defer self.register_manager.unlockReg(lock); + + try self.genSetReg(ptr_ty, pointer_reg, pointer); + + return self.store(.{ .register = pointer_reg }, value, ptr_ty, value_ty); + }, + .register => |reg| { const value_reg = try self.copyToTmpRegister(value_ty, value); @@ -2165,7 +2154,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { const cond_reg_lock = self.register_manager.lockRegAssumeUnused(cond_reg); defer self.register_manager.unlockReg(cond_reg_lock); - // A branch to the false section. Uses bne + // A branch to the false section. Uses beq. 1 is the default "true" state. const reloc = try self.condBr(cond_ty, cond, cond_reg); // If the condition dies here in this condbr instruction, process @@ -2301,7 +2290,7 @@ fn condBr(self: *Self, cond_ty: Type, condition: MCValue, cond_reg: Register) !M try self.genSetReg(cond_ty, cond_reg, condition); return try self.addInst(.{ - .tag = .bne, + .tag = .beq, .data = .{ .b_type = .{ .rs1 = cond_reg, @@ -2729,8 +2718,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_val: MCValue) Inner => { // TODO: remove this lock in favor of a copyToTmpRegister when we load 64 bit immediates with // a register allocation. - const reg = try self.register_manager.allocReg(null, gp); - const reg_lock = self.register_manager.lockRegAssumeUnused(reg); + const reg, const reg_lock = try self.allocReg(); defer self.register_manager.unlockReg(reg_lock); try self.genSetReg(ty, reg, src_val); @@ -2939,8 +2927,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_val: MCValue) InnerError! // TODO: use a more advanced myriad seq to do this without a reg. // see: https://github.com/llvm/llvm-project/blob/081a66ffacfe85a37ff775addafcf3371e967328/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp#L224 - const temp = try self.register_manager.allocReg(null, gp); - const temp_lock = self.register_manager.lockRegAssumeUnused(temp); + const temp, const temp_lock = try self.allocReg(); defer self.register_manager.unlockReg(temp_lock); const lo32: i32 = @truncate(x); diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index f6205cd4ab..c371f14ec1 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -59,7 +59,10 @@ pub fn emitMir( .cmp_eq => try emit.mirRType(inst), .cmp_gt => try emit.mirRType(inst), + .cmp_gte => try emit.mirRType(inst), + .cmp_lt => try emit.mirRType(inst), .cmp_imm_gte => try emit.mirRType(inst), + .cmp_imm_eq => try emit.mirIType(inst), .beq => try emit.mirBType(inst), .bne => try emit.mirBType(inst), @@ -188,7 +191,12 @@ fn mirRType(emit: *Emit, inst: Mir.Inst.Index) !void { .sub => try emit.writeInstruction(Instruction.sub(rd, rs1, rs2)), .cmp_gt => { // rs1 > rs2 - try emit.writeInstruction(Instruction.slt(rd, rs1, rs2)); + try emit.writeInstruction(Instruction.sltu(rd, rs2, rs1)); + }, + .cmp_gte => { + // rs1 >= rs2 + try emit.writeInstruction(Instruction.sltu(rd, rs1, rs2)); + try emit.writeInstruction(Instruction.xori(rd, rd, 1)); }, .cmp_eq => { // rs1 == rs2 @@ -198,14 +206,19 @@ fn mirRType(emit: *Emit, inst: Mir.Inst.Index) !void { // if rd == 0, set rd to 1 try emit.writeInstruction(Instruction.sltiu(rd, rd, 1)); }, + .cmp_lt => { + // rd = 1 if rs1 < rs2 + try emit.writeInstruction(Instruction.slt(rd, rs1, rs2)); + }, .sllw => try emit.writeInstruction(Instruction.sllw(rd, rs1, rs2)), .srlw => try emit.writeInstruction(Instruction.srlw(rd, rs1, rs2)), .@"or" => try emit.writeInstruction(Instruction.@"or"(rd, rs1, rs2)), .cmp_imm_gte => { - // rd = rs1 >= imm12 - // see the docstring for cmp_imm_gte to see why we use r_type here - try emit.writeInstruction(Instruction.slt(rd, rs1, rs2)); - try emit.writeInstruction(Instruction.xori(rd, rd, 1)); + // rd = 1 if rs1 >= imm12 + // see the docstring of cmp_imm_gte to see why we use r_type here + + // (rs1 >= imm12) == !(imm12 > rs1) + try emit.writeInstruction(Instruction.sltu(rd, rs1, rs2)); }, else => unreachable, } @@ -263,6 +276,10 @@ fn mirIType(emit: *Emit, inst: Mir.Inst.Index) !void { .srli => try emit.writeInstruction(Instruction.srli(rd, rs1, @intCast(imm12))), .slli => try emit.writeInstruction(Instruction.slli(rd, rs1, @intCast(imm12))), + .cmp_imm_eq => { + try emit.writeInstruction(Instruction.xori(rd, rs1, imm12)); + try emit.writeInstruction(Instruction.sltiu(rd, rd, 1)); + }, else => unreachable, } } @@ -490,13 +507,17 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize { .dbg_prologue_end, => 0, - .psuedo_epilogue => 12, - .psuedo_prologue => 16, + .psuedo_prologue, + => 16, - .abs => 12, + .psuedo_epilogue, + .abs, + => 12, - .cmp_eq => 8, - .cmp_imm_gte => 8, + .cmp_eq, + .cmp_imm_eq, + .cmp_gte, + => 8, else => 4, }; diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 26f3b355c9..284358650c 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -62,6 +62,10 @@ pub const Inst = struct { cmp_eq, /// Register `>`, uses r_type cmp_gt, + /// Register `<`, uses r_type + cmp_lt, + /// Register `>=`, uses r_type + cmp_gte, /// Immediate `>=`, uses r_type /// @@ -72,6 +76,9 @@ pub const Inst = struct { /// allocate a register for temporary use. cmp_imm_gte, + /// Immediate `==`, uses i_type + cmp_imm_eq, + /// Branch if equal, Uses b_type beq, /// Branch if not equal, Uses b_type From c96989aa4b283c5d386c5f19e2b12a6b13dd521a Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 24 Mar 2024 19:39:37 -0700 Subject: [PATCH 19/44] riscv: correctly index struct field access when the struct is in stack memory, we access it using a byte-offset, because that's how the stack works. on the other hand when the struct is in a register, we are working with bits and the field offset should be a bit offset. --- src/arch/riscv64/CodeGen.zig | 39 ++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index f2e43f52e3..fd56644a48 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1184,28 +1184,36 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs_ty = self.typeOf(extra.lhs); const rhs_ty = self.typeOf(extra.rhs); - const partial_mcv = try self.binOp(.add, null, lhs, rhs, lhs_ty, rhs_ty); + const add_result_mcv = try self.binOp(.add, null, lhs, rhs, lhs_ty, rhs_ty); const tuple_ty = self.typeOfIndex(inst); + const int_info = lhs_ty.intInfo(mod); // TODO: optimization, set this to true. needs the other struct access stuff to support // accessing registers. const result_mcv = try self.allocRegOrMem(inst, false); const offset = result_mcv.stack_offset; - const overflow_offset = tuple_ty.structFieldOffset(1, mod) + offset; const result_offset = tuple_ty.structFieldOffset(0, mod) + offset; - const overflow_mcv = try self.binOp(.cmp_lt, null, partial_mcv, lhs, lhs_ty, lhs_ty); + // set the result first as we don't have a lock on the add_result_mcv register and it will + // get clobbered in the next binOp. + try self.genSetStack(lhs_ty, @intCast(result_offset), add_result_mcv); - const overflow_reg, const overflow_lock = try self.allocReg(); - defer self.register_manager.unlockReg(overflow_lock); + if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { + if (int_info.signedness == .unsigned) { + const overflow_offset = tuple_ty.structFieldOffset(1, mod) + offset; - try self.genSetReg(lhs_ty, overflow_reg, overflow_mcv); + const overflow_mcv = try self.binOp(.cmp_lt, null, add_result_mcv, lhs, lhs_ty, lhs_ty); + try self.genSetStack(Type.u1, @intCast(overflow_offset), overflow_mcv); - try self.genSetStack(Type.u1, @intCast(overflow_offset), overflow_mcv); - try self.genSetStack(lhs_ty, @intCast(result_offset), partial_mcv); - break :result result_mcv; + break :result result_mcv; + } else { + return self.fail("TODO: airAddWithOverFlow calculate carry for signed addition", .{}); + } + } else { + return self.fail("TODO: airAddWithOverflow with < 8 bits or non-pow of 2", .{}); + } }; return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); @@ -1716,9 +1724,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { /// Loads `value` into the "payload" of `pointer`. fn store(self: *Self, pointer: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) !void { - _ = ptr_ty; const mod = self.bin_file.comp.module.?; - const value_size = value_ty.abiSize(mod); + const value_abi_size = value_ty.abiSize(mod); log.debug("storing {s}", .{@tagName(pointer)}); @@ -1741,9 +1748,9 @@ fn store(self: *Self, pointer: MCValue, value: MCValue, ptr_ty: Type, value_ty: .register => |reg| { const value_reg = try self.copyToTmpRegister(value_ty, value); - switch (value_size) { + switch (value_abi_size) { 1, 2, 4, 8 => { - const tag: Mir.Inst.Tag = switch (value_size) { + const tag: Mir.Inst.Tag = switch (value_abi_size) { 1 => .sb, 2 => .sh, 4 => .sw, @@ -1760,7 +1767,7 @@ fn store(self: *Self, pointer: MCValue, value: MCValue, ptr_ty: Type, value_ty: } }, }); }, - else => return self.fail("TODO: genSetStack for size={d}", .{value_size}), + else => return self.fail("TODO: genSetStack for size={d}", .{value_abi_size}), } }, else => return self.fail("TODO implement storing to MCValue.{s}", .{@tagName(pointer)}), @@ -1842,7 +1849,9 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { break :result if (field_off == 0) dst_mcv else try self.copyToNewRegister(inst, dst_mcv); }, .stack_offset => |off| { - break :result MCValue{ .stack_offset = off + field_off }; + log.debug("airStructFieldVal off: {}", .{field_off}); + const field_byte_off: u32 = @divExact(field_off, 8); + break :result MCValue{ .stack_offset = off + field_byte_off }; }, else => return self.fail("TODO: airStructField {s}", .{@tagName(src_mcv)}), } From 06089fc89a47e6ae84d09e6e21db23b7a57f885e Mon Sep 17 00:00:00 2001 From: David Rubin Date: Mon, 25 Mar 2024 03:28:11 -0700 Subject: [PATCH 20/44] riscv: fix how we calculate stack offsets. allows for pass by reference arguments. --- src/arch/riscv64/Emit.zig | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index c371f14ec1..b2e97041cb 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -536,11 +536,18 @@ fn lowerMir(emit: *Emit) !void { const data = mir_datas[inst].i_type; // TODO: probably create a psuedo instruction for s0 loads/stores instead of this. if (data.rs1 == .s0) { - const casted_size = math.cast(i12, emit.stack_size) orelse { - return emit.fail("TODO: support bigger stack sizes lowerMir", .{}); - }; const offset = mir_datas[inst].i_type.imm12; - mir_datas[inst].i_type.imm12 = -(casted_size - 12 - offset); + + // sp + 32 (aka s0) + // ra -- previous ra spilled + // s0 -- previous s0 spilled + // --- this is -16(s0) + + // TODO: this "+ 8" is completely arbiratary as the largest possible store + // we don't want to actually use it. instead we need to calculate the difference + // between the first and second stack store and use it instead. + + mir_datas[inst].i_type.imm12 = -(16 + offset + 8); } } From e70584e2f87ae8daab18d9e28f72dac020d7702e Mon Sep 17 00:00:00 2001 From: David Rubin Date: Mon, 25 Mar 2024 03:52:13 -0700 Subject: [PATCH 21/44] riscv: change `load_symbol` psuedo instruction size to 8 --- src/arch/riscv64/Emit.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index b2e97041cb..d85629ad4b 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -517,6 +517,7 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize { .cmp_eq, .cmp_imm_eq, .cmp_gte, + .load_symbol, => 8, else => 4, From b28c966e332623dc43b1481b34016d18ce3262fa Mon Sep 17 00:00:00 2001 From: David Rubin Date: Mon, 25 Mar 2024 05:15:02 -0700 Subject: [PATCH 22/44] riscv: fix overflow checks in addition. --- src/arch/riscv64/CodeGen.zig | 48 +++++++++++++++++++++++++++++++++--- src/arch/riscv64/Emit.zig | 15 ++++++++--- src/arch/riscv64/Mir.zig | 5 ++++ 3 files changed, 61 insertions(+), 7 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index fd56644a48..e375ced93a 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1019,6 +1019,7 @@ fn binOpRegister( .add => .add, .sub => .sub, .cmp_eq => .cmp_eq, + .cmp_neq => .cmp_neq, .cmp_gt => .cmp_gt, .cmp_gte => .cmp_gte, .cmp_lt => .cmp_lt, @@ -1185,6 +1186,8 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const add_result_mcv = try self.binOp(.add, null, lhs, rhs, lhs_ty, rhs_ty); + const add_result_lock = self.register_manager.lockRegAssumeUnused(add_result_mcv.register); + defer self.register_manager.unlockReg(add_result_lock); const tuple_ty = self.typeOfIndex(inst); const int_info = lhs_ty.intInfo(mod); @@ -1196,15 +1199,44 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result_offset = tuple_ty.structFieldOffset(0, mod) + offset; - // set the result first as we don't have a lock on the add_result_mcv register and it will - // get clobbered in the next binOp. try self.genSetStack(lhs_ty, @intCast(result_offset), add_result_mcv); if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { if (int_info.signedness == .unsigned) { const overflow_offset = tuple_ty.structFieldOffset(1, mod) + offset; - const overflow_mcv = try self.binOp(.cmp_lt, null, add_result_mcv, lhs, lhs_ty, lhs_ty); + const max_val = std.math.pow(u16, 2, int_info.bits) - 1; + + const overflow_reg, const overflow_lock = try self.allocReg(); + defer self.register_manager.unlockReg(overflow_lock); + + const add_reg, const add_lock = blk: { + if (add_result_mcv == .register) break :blk .{ add_result_mcv.register, null }; + + const add_reg, const add_lock = try self.allocReg(); + try self.genSetReg(lhs_ty, add_reg, add_result_mcv); + break :blk .{ add_reg, add_lock }; + }; + defer if (add_lock) |lock| self.register_manager.unlockReg(lock); + + _ = try self.addInst(.{ + .tag = .andi, + .data = .{ .i_type = .{ + .rd = overflow_reg, + .rs1 = add_reg, + .imm12 = @intCast(max_val), + } }, + }); + + const overflow_mcv = try self.binOp( + .cmp_neq, + null, + .{ .register = overflow_reg }, + .{ .register = add_reg }, + lhs_ty, + lhs_ty, + ); + try self.genSetStack(Type.u1, @intCast(overflow_offset), overflow_mcv); break :result result_mcv; @@ -3042,7 +3074,15 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_val: MCValue) InnerError! fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result = try self.resolveInst(un_op); + const result = result: { + const src_mcv = try self.resolveInst(un_op); + if (self.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv; + + const dst_mcv = try self.allocRegOrMem(inst, true); + const dst_ty = self.typeOfIndex(inst); + try self.setValue(dst_ty, dst_mcv, src_mcv); + break :result dst_mcv; + }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index d85629ad4b..a07a0fb03e 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -58,6 +58,7 @@ pub fn emitMir( .@"or" => try emit.mirRType(inst), .cmp_eq => try emit.mirRType(inst), + .cmp_neq => try emit.mirRType(inst), .cmp_gt => try emit.mirRType(inst), .cmp_gte => try emit.mirRType(inst), .cmp_lt => try emit.mirRType(inst), @@ -68,6 +69,7 @@ pub fn emitMir( .bne => try emit.mirBType(inst), .addi => try emit.mirIType(inst), + .andi => try emit.mirIType(inst), .jalr => try emit.mirIType(inst), .abs => try emit.mirIType(inst), @@ -201,10 +203,14 @@ fn mirRType(emit: *Emit, inst: Mir.Inst.Index) !void { .cmp_eq => { // rs1 == rs2 - // if equal, write 0 to rd try emit.writeInstruction(Instruction.xor(rd, rs1, rs2)); - // if rd == 0, set rd to 1 - try emit.writeInstruction(Instruction.sltiu(rd, rd, 1)); + try emit.writeInstruction(Instruction.sltiu(rd, rd, 1)); // seqz + }, + .cmp_neq => { + // rs1 != rs2 + + try emit.writeInstruction(Instruction.xor(rd, rs1, rs2)); + try emit.writeInstruction(Instruction.sltu(rd, .x0, rd)); // snez }, .cmp_lt => { // rd = 1 if rs1 < rs2 @@ -255,6 +261,8 @@ fn mirIType(emit: *Emit, inst: Mir.Inst.Index) !void { .addi => try emit.writeInstruction(Instruction.addi(rd, rs1, imm12)), .jalr => try emit.writeInstruction(Instruction.jalr(rd, imm12, rs1)), + .andi => try emit.writeInstruction(Instruction.andi(rd, rs1, imm12)), + .ld => try emit.writeInstruction(Instruction.ld(rd, imm12, rs1)), .lw => try emit.writeInstruction(Instruction.lw(rd, imm12, rs1)), .lh => try emit.writeInstruction(Instruction.lh(rd, imm12, rs1)), @@ -515,6 +523,7 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize { => 12, .cmp_eq, + .cmp_neq, .cmp_imm_eq, .cmp_gte, .load_symbol, diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 284358650c..46c55def3f 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -57,9 +57,14 @@ pub const Inst = struct { /// Jumps. Uses `inst` payload. j, + /// Immediate and, uses i_type payload + andi, + // NOTE: Maybe create a special data for compares that includes the ops /// Register `==`, uses r_type cmp_eq, + /// Register `!=`, uses r_type + cmp_neq, /// Register `>`, uses r_type cmp_gt, /// Register `<`, uses r_type From 685f8282180016ac8d82ecf2fe7facc1a2d6b9f7 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Mon, 25 Mar 2024 05:30:12 -0700 Subject: [PATCH 23/44] riscv: add a custom panic function this provides a much better indication of when we are having a controlled panic with an error message or when we are actually segfaulting, as before the `trap` as causing a segfault. --- lib/std/builtin.zig | 22 ++++++++++++++++++++-- src/target.zig | 2 +- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 3fb9494305..1d26c3e0c1 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -767,13 +767,31 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr builtin.zig_backend == .stage2_x86 or (builtin.zig_backend == .stage2_x86_64 and (builtin.target.ofmt != .elf and builtin.target.ofmt != .macho)) or builtin.zig_backend == .stage2_sparc64 or - builtin.zig_backend == .stage2_spirv64 or - builtin.zig_backend == .stage2_riscv64) + builtin.zig_backend == .stage2_spirv64) { while (true) { @breakpoint(); } } + + if (builtin.zig_backend == .stage2_riscv64) { + asm volatile ("ecall" + : + : [number] "{a7}" (64), + [arg1] "{a0}" (1), + [arg2] "{a1}" (@intFromPtr("panicking!\n")), + [arg3] "{a2}" ("panicking!\n".len), + : "rcx", "r11", "memory" + ); + asm volatile ("ecall" + : + : [number] "{a7}" (94), + [arg1] "{a0}" (127), + : "rcx", "r11", "memory" + ); + unreachable; + } + switch (builtin.os.tag) { .freestanding => { while (true) { diff --git a/src/target.zig b/src/target.zig index ea58111bc1..8f61b2ba03 100644 --- a/src/target.zig +++ b/src/target.zig @@ -526,7 +526,7 @@ pub fn backendSupportsFeature( feature: Feature, ) bool { return switch (feature) { - .panic_fn => ofmt == .c or use_llvm or cpu_arch == .x86_64, + .panic_fn => ofmt == .c or use_llvm or cpu_arch == .x86_64 or cpu_arch == .riscv64, .panic_unwrap_error => ofmt == .c or use_llvm, .safety_check_formatted => ofmt == .c or use_llvm, .error_return_trace => use_llvm, From 3c0015c82889eff557cb937b655eccaaa7ecd01b Mon Sep 17 00:00:00 2001 From: David Rubin Date: Mon, 25 Mar 2024 10:33:34 -0700 Subject: [PATCH 24/44] riscv: implement a basic `@intCast` the truncation panic logic is generated in Sema, so I don't need to roll anything of my own. I add all of the boilerplate for that detecting the truncation and it works in basic test cases! --- src/Sema.zig | 7 +-- src/arch/riscv64/CodeGen.zig | 92 ++++++++++++++++++++++++++++++------ src/arch/riscv64/Emit.zig | 8 ++++ src/arch/riscv64/Mir.zig | 3 ++ 4 files changed, 92 insertions(+), 18 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 86d3378aee..37fdf0adb5 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -10499,9 +10499,10 @@ fn intCast( const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_scalar_ty); const dest_max_val = try sema.splat(operand_ty, dest_max_val_scalar); const dest_max = Air.internedToRef(dest_max_val.toIntern()); - const diff = try block.addBinOp(.sub_wrap, dest_max, operand); if (actual_info.signedness == .signed) { + const diff = try block.addBinOp(.sub_wrap, dest_max, operand); + // Reinterpret the sign-bit as part of the value. This will make // negative differences (`operand` > `dest_max`) appear too big. const unsigned_scalar_operand_ty = try mod.intType(.unsigned, actual_bits); @@ -10542,7 +10543,7 @@ fn intCast( try sema.addSafetyCheck(block, src, ok, .cast_truncated_data); } else { const ok = if (is_vector) ok: { - const is_in_range = try block.addCmpVector(diff, dest_max, .lte); + const is_in_range = try block.addCmpVector(operand, dest_max, .lte); const all_in_range = try block.addInst(.{ .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ @@ -10552,7 +10553,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const is_in_range = try block.addBinOp(.cmp_lte, diff, dest_max); + const is_in_range = try block.addBinOp(.cmp_lte, operand, dest_max); break :ok is_in_range; }; try sema.addSafetyCheck(block, src, ok, .cast_truncated_data); diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index e375ced93a..eebb1c6cb0 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -797,23 +797,57 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { } fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - if (self.liveness.isUnused(inst)) - return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); - const mod = self.bin_file.comp.module.?; - const operand_ty = self.typeOf(ty_op.operand); - const operand = try self.resolveInst(ty_op.operand); - const info_a = operand_ty.intInfo(mod); - const info_b = self.typeOfIndex(inst).intInfo(mod); - if (info_a.signedness != info_b.signedness) - return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); + const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const src_ty = self.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); - if (info_a.bits == info_b.bits) - return self.finishAir(inst, operand, .{ ty_op.operand, .none, .none }); + const result: MCValue = result: { + const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod)); - return self.fail("TODO implement intCast for {}", .{self.target.cpu.arch}); - // return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + const src_int_info = src_ty.intInfo(mod); + const dst_int_info = dst_ty.intInfo(mod); + const extend = switch (src_int_info.signedness) { + .signed => dst_int_info, + .unsigned => src_int_info, + }.signedness; + + _ = dst_abi_size; + _ = extend; + + const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty; + + const src_mcv = try self.resolveInst(ty_op.operand); + + const src_storage_bits: u16 = switch (src_mcv) { + .register => 64, + .stack_offset => src_int_info.bits, + else => return self.fail("airIntCast from {s}", .{@tagName(src_mcv)}), + }; + + const dst_mcv = if (dst_int_info.bits <= src_storage_bits and + math.divCeil(u16, dst_int_info.bits, 64) catch unreachable == + math.divCeil(u32, src_storage_bits, 64) catch unreachable and + self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: { + const dst_mcv = try self.allocRegOrMem(inst, true); + try self.setValue(min_ty, dst_mcv, src_mcv); + break :dst dst_mcv; + }; + + if (dst_int_info.bits <= src_int_info.bits) { + break :result dst_mcv; + } + + if (dst_int_info.bits > 64 or src_int_info.bits > 64) { + break :result null; // TODO + } + + break :result dst_mcv; + } orelse return self.fail("TODO implement airIntCast from {} to {}", .{ + src_ty.fmt(mod), dst_ty.fmt(mod), + }); + + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { @@ -1080,7 +1114,9 @@ fn binOpImm( .shr => .srli, .cmp_gte => .cmp_imm_gte, .cmp_eq => .cmp_imm_eq, + .cmp_lte => .cmp_imm_lte, .add => .addi, + .sub => .addiw, else => return self.fail("TODO: binOpImm {s}", .{@tagName(tag)}), }; @@ -1090,6 +1126,7 @@ fn binOpImm( .srli, .addi, .cmp_imm_eq, + .cmp_imm_lte, => { _ = try self.addInst(.{ .tag = mir_tag, @@ -1102,6 +1139,18 @@ fn binOpImm( } }, }); }, + .addiw => { + _ = try self.addInst(.{ + .tag = mir_tag, + .data = .{ .i_type = .{ + .rd = dest_reg, + .rs1 = lhs_reg, + .imm12 = -(math.cast(i12, rhs.immediate) orelse { + return self.fail("TODO: binOpImm larger than i12 i_type payload", .{}); + }), + } }, + }); + }, .cmp_imm_gte => { const imm_reg = try self.copyToTmpRegister(rhs_ty, .{ .immediate = rhs.immediate - 1 }); @@ -1146,7 +1195,16 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { fn airSubWrap(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement subwrap for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + // RISCV arthemtic instructions already wrap, so this is simply a sub binOp with + // no overflow checks. + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); + + break :result try self.binOp(.sub, inst, lhs, rhs, lhs_ty, rhs_ty); + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -3441,3 +3499,7 @@ fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.comp.module.?; return self.air.typeOfIndex(inst, &mod.intern_pool); } + +fn hasFeature(self: *Self, feature: Target.riscv.Feature) bool { + return Target.riscv.featureSetHas(self.target.cpu.features, feature); +} diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index a07a0fb03e..0c1e5d643b 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -64,11 +64,13 @@ pub fn emitMir( .cmp_lt => try emit.mirRType(inst), .cmp_imm_gte => try emit.mirRType(inst), .cmp_imm_eq => try emit.mirIType(inst), + .cmp_imm_lte => try emit.mirIType(inst), .beq => try emit.mirBType(inst), .bne => try emit.mirBType(inst), .addi => try emit.mirIType(inst), + .addiw => try emit.mirIType(inst), .andi => try emit.mirIType(inst), .jalr => try emit.mirIType(inst), .abs => try emit.mirIType(inst), @@ -259,6 +261,7 @@ fn mirIType(emit: *Emit, inst: Mir.Inst.Index) !void { switch (tag) { .addi => try emit.writeInstruction(Instruction.addi(rd, rs1, imm12)), + .addiw => try emit.writeInstruction(Instruction.addiw(rd, rs1, imm12)), .jalr => try emit.writeInstruction(Instruction.jalr(rd, imm12, rs1)), .andi => try emit.writeInstruction(Instruction.andi(rd, rs1, imm12)), @@ -288,6 +291,11 @@ fn mirIType(emit: *Emit, inst: Mir.Inst.Index) !void { try emit.writeInstruction(Instruction.xori(rd, rs1, imm12)); try emit.writeInstruction(Instruction.sltiu(rd, rd, 1)); }, + + .cmp_imm_lte => { + try emit.writeInstruction(Instruction.sltiu(rd, rs1, @bitCast(imm12))); + }, + else => unreachable, } } diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 46c55def3f..192d3a8eac 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -25,6 +25,7 @@ pub const Inst = struct { pub const Tag = enum(u16) { addi, + addiw, jalr, lui, mv, @@ -83,6 +84,8 @@ pub const Inst = struct { /// Immediate `==`, uses i_type cmp_imm_eq, + /// Immediate `<=`, uses i_typei + cmp_imm_lte, /// Branch if equal, Uses b_type beq, From cbf62bd6dc1f020df1177b3c6bcf11ed945ac83b Mon Sep 17 00:00:00 2001 From: David Rubin Date: Mon, 25 Mar 2024 15:58:39 -0700 Subject: [PATCH 25/44] riscv: switch `default_panic` to use the message --- lib/std/builtin.zig | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 1d26c3e0c1..15d405eed9 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -779,17 +779,11 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr : : [number] "{a7}" (64), [arg1] "{a0}" (1), - [arg2] "{a1}" (@intFromPtr("panicking!\n")), - [arg3] "{a2}" ("panicking!\n".len), + [arg2] "{a1}" (@intFromPtr(msg.ptr)), + [arg3] "{a2}" (msg.len), : "rcx", "r11", "memory" ); - asm volatile ("ecall" - : - : [number] "{a7}" (94), - [arg1] "{a0}" (127), - : "rcx", "r11", "memory" - ); - unreachable; + std.posix.exit(127); } switch (builtin.os.tag) { From 350ad90ceec37cb3f152b666377f7f619981a60e Mon Sep 17 00:00:00 2001 From: David Rubin Date: Thu, 28 Mar 2024 15:59:28 -0700 Subject: [PATCH 26/44] riscv: totally rewrite how we do loads and stores this commit is a little too large to document fully, however the main gist of it this - finish the `genInlineMemcpy` implement - rename `setValue` to `genCopy` as I agree with jacob that it's a better name - add in `genVarDbgInfo` for a better gdb experience - follow the x86_64's method for genCall, as the procedure is very similar for us - add `airSliceLen` as it's trivial - change up the `airAddWithOverflow implementation a bit - make sure to not spill of the elem_ty is 0 size - correctly follow the RISC-V calling convention and spill the used calle saved registers in the prologue and restore them in the epilogue - add `address`, `deref`, and `offset` helper functions for MCValue. I must say I love these, they make the code very readable and super verbose :) - fix a `register_manager.zig` issue where when using the last register in the set, the value would overflow at comptime. was happening because we were adding to `max_id` before subtracting from it. --- lib/std/builtin.zig | 16 +- src/arch/riscv64/CodeGen.zig | 854 +++++++++++++++++++++++++---------- src/arch/riscv64/Emit.zig | 100 ++-- src/arch/riscv64/Mir.zig | 65 ++- src/arch/riscv64/abi.zig | 18 +- src/arch/riscv64/bits.zig | 4 +- src/register_manager.zig | 2 +- 7 files changed, 748 insertions(+), 311 deletions(-) diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 15d405eed9..8c57083312 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -775,14 +775,14 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr } if (builtin.zig_backend == .stage2_riscv64) { - asm volatile ("ecall" - : - : [number] "{a7}" (64), - [arg1] "{a0}" (1), - [arg2] "{a1}" (@intFromPtr(msg.ptr)), - [arg3] "{a2}" (msg.len), - : "rcx", "r11", "memory" - ); + // asm volatile ("ecall" + // : + // : [number] "{a7}" (64), + // [arg1] "{a0}" (1), + // [arg2] "{a1}" (@intFromPtr(msg.ptr)), + // [arg3] "{a2}" (msg.len), + // : "rcx", "r11", "memory" + // ); std.posix.exit(127); } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index eebb1c6cb0..3a7ae9dbfb 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -38,9 +38,16 @@ const callee_preserved_regs = abi.callee_preserved_regs; const gp = abi.RegisterClass.gp; /// Function Args const fa = abi.RegisterClass.fa; +/// Temporary Use +const tp = abi.RegisterClass.tp; const InnerError = CodeGenError || error{OutOfRegisters}; +const RegisterView = enum(u1) { + caller, + callee, +}; + gpa: Allocator, air: Air, liveness: Liveness, @@ -82,8 +89,8 @@ branch_stack: *std.ArrayList(Branch), // Key is the block instruction blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, - register_manager: RegisterManager = .{}, + /// Maps offset to what is stored there. stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{}, @@ -99,6 +106,7 @@ air_bookkeeping: @TypeOf(air_bookkeeping_init) = air_bookkeeping_init, const air_bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {}; const SymbolOffset = struct { sym: u32, off: i32 = 0 }; +const RegisterOffset = struct { reg: Register, off: i32 = 0 }; const MCValue = union(enum) { /// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc. @@ -119,6 +127,8 @@ const MCValue = union(enum) { load_symbol: SymbolOffset, /// The value is in a target-specific register. register: Register, + /// The value is split across two registers + register_pair: [2]Register, /// The value is in memory at a hard-coded address. /// If the type is a pointer, it means the pointer address is at this memory location. memory: u64, @@ -127,10 +137,15 @@ const MCValue = union(enum) { stack_offset: u32, /// The value is a pointer to one of the stack variables (payload is stack offset). ptr_stack_offset: u32, + air_ref: Air.Inst.Ref, + /// The value is in memory at a constant offset from the address in a register. + indirect: RegisterOffset, + /// The value is a constant offset from the value in a register. + register_offset: RegisterOffset, fn isMemory(mcv: MCValue) bool { return switch (mcv) { - .memory, .stack_offset => true, + .memory, .indirect, .load_frame => true, else => false, }; } @@ -151,15 +166,85 @@ const MCValue = union(enum) { .immediate, .memory, .ptr_stack_offset, + .indirect, .undef, .load_symbol, + .air_ref, => false, .register, + .register_pair, + .register_offset, .stack_offset, => true, }; } + + fn address(mcv: MCValue) MCValue { + return switch (mcv) { + .none, + .unreach, + .dead, + .immediate, + .ptr_stack_offset, + .register_offset, + .undef, + .air_ref, + => unreachable, // not in memory + + .memory => |addr| .{ .immediate = addr }, + .stack_offset => |off| .{ .ptr_stack_offset = off }, + .indirect => |reg_off| switch (reg_off.off) { + 0 => .{ .register = reg_off.reg }, + else => .{ .register_offset = reg_off }, + }, + }; + } + + fn deref(mcv: MCValue) MCValue { + return switch (mcv) { + .none, + .unreach, + .dead, + .memory, + .indirect, + .undef, + .air_ref, + .stack_offset, + .register_pair, + .load_symbol, + => unreachable, // not a pointer + + .immediate => |addr| .{ .memory = addr }, + .ptr_stack_offset => |off| .{ .stack_offset = off }, + .register => |reg| .{ .indirect = .{ .reg = reg } }, + .register_offset => |reg_off| .{ .indirect = reg_off }, + }; + } + + fn offset(mcv: MCValue, off: i32) MCValue { + return switch (mcv) { + .none, + .unreach, + .dead, + .undef, + .air_ref, + => unreachable, // not valid + .register_pair, + .memory, + .indirect, + .stack_offset, + .load_symbol, + => switch (off) { + 0 => mcv, + else => unreachable, // not offsettable + }, + .immediate => |imm| .{ .immediate = @bitCast(@as(i64, @bitCast(imm)) +% off) }, + .register => |reg| .{ .register_offset = .{ .reg = reg, .off = off } }, + .register_offset => |reg_off| .{ .register_offset = .{ .reg = reg_off.reg, .off = reg_off.off + off } }, + .ptr_stack_offset => |stack_off| .{ .ptr_stack_offset = @intCast(@as(i64, @intCast(stack_off)) +% off) }, + }; + } }; const Branch = struct { @@ -211,6 +296,11 @@ const BigTomb = struct { const Self = @This(); +const CallView = enum(u1) { + callee, + caller, +}; + pub fn generate( lf: *link.File, src_loc: Module.SrcLoc, @@ -261,7 +351,7 @@ pub fn generate( defer function.blocks.deinit(gpa); defer function.exitlude_jump_relocs.deinit(gpa); - var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { + var call_info = function.resolveCallingConventionValues(fn_type, .callee) catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, error.OutOfRegisters => return Result{ .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), @@ -284,6 +374,14 @@ pub fn generate( else => |e| return e, }; + // Create list of registers to save in the prologue. + var save_reg_list = Mir.RegisterList{}; + for (callee_preserved_regs) |reg| { + if (function.register_manager.isRegAllocated(reg)) { + save_reg_list.push(&callee_preserved_regs, reg); + } + } + var mir = Mir{ .instructions = function.mir_instructions.toOwnedSlice(), .extra = try function.mir_extra.toOwnedSlice(gpa), @@ -300,8 +398,10 @@ pub fn generate( .prev_di_pc = 0, .prev_di_line = func.lbrace_line, .prev_di_column = func.lbrace_column, - .stack_size = @max(32, function.max_end_stack), .code_offset_mapping = .{}, + // need to at least decrease the sp by -8 + .stack_size = @max(8, mem.alignForward(u32, function.max_end_stack, 16)), + .save_reg_list = save_reg_list, }; defer emit.deinit(); @@ -629,6 +729,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { } } +fn feed(self: *Self, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) !void { + if (bt.feed()) if (operand.toIndex()) |inst| self.processDeath(inst); +} + /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { // When editing this function, note that the logic must synchronize with `reuseOperand`. @@ -639,7 +743,7 @@ fn processDeath(self: *Self, inst: Air.Inst.Index) void { .register => |reg| { self.register_manager.freeReg(reg); }, - else => {}, // TODO process stack allocation death + else => {}, // TODO process stack allocation death by freeing it to be reused later } } @@ -650,17 +754,11 @@ fn finishAirBookkeeping(self: *Self) void { } } -fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { - var tomb_bits = self.liveness.getTombBits(inst); - for (operands) |op| { - const dies = @as(u1, @truncate(tomb_bits)) != 0; - tomb_bits >>= 1; - if (!dies) continue; - const op_index = op.toIndex() orelse continue; - self.processDeath(op_index); - } - const is_used = @as(u1, @truncate(tomb_bits)) == 0; - if (is_used) { +fn finishAirResult(self: *Self, inst: Air.Inst.Index, result: MCValue) void { + if (self.liveness.isUnused(inst)) switch (result) { + .none, .dead, .unreach => {}, + else => unreachable, // Why didn't the result die? + } else { log.debug("%{d} => {}", .{ inst, result }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; branch.inst_table.putAssumeCapacityNoClobber(inst, result); @@ -682,6 +780,22 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live self.finishAirBookkeeping(); } +fn finishAir( + self: *Self, + inst: Air.Inst.Index, + result: MCValue, + operands: [Liveness.bpi - 1]Air.Inst.Ref, +) !void { + var tomb_bits = self.liveness.getTombBits(inst); + for (operands) |op| { + const dies = @as(u1, @truncate(tomb_bits)) != 0; + tomb_bits >>= 1; + if (!dies) continue; + self.processDeath(op.toIndexAllowNone() orelse continue); + } + self.finishAirResult(inst, result); +} + fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { const table = &self.branch_stack.items[self.branch_stack.items.len - 1].inst_table; try table.ensureUnusedCapacity(self.gpa, additional_count); @@ -716,6 +830,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const mod = self.bin_file.comp.module.?; const elem_ty = self.typeOfIndex(inst); + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; @@ -728,12 +843,12 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const ptr_bytes: u64 = @divExact(ptr_bits, 8); if (abi_size <= ptr_bytes) { if (self.register_manager.tryAllocReg(inst, gp)) |reg| { - return MCValue{ .register = reg }; + return .{ .register = reg }; } } } const stack_offset = try self.allocMem(inst, abi_size, abi_align); - return MCValue{ .stack_offset = stack_offset }; + return .{ .stack_offset = stack_offset }; } /// Allocates a register from the general purpose set and returns the Register and the Lock. @@ -746,6 +861,12 @@ fn allocReg(self: *Self) !struct { Register, RegisterLock } { } pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { + const mod = self.bin_file.comp.module.?; + const elem_ty = self.typeOfIndex(inst); + + // there isn't anything to spill + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return; + const stack_mcv = try self.allocRegOrMem(inst, false); log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); @@ -759,7 +880,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { - const reg = try self.register_manager.allocReg(null, gp); + const reg = try self.register_manager.allocReg(null, tp); try self.genSetReg(ty, reg, mcv); return reg; } @@ -830,7 +951,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { math.divCeil(u32, src_storage_bits, 64) catch unreachable and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: { const dst_mcv = try self.allocRegOrMem(inst, true); - try self.setValue(min_ty, dst_mcv, src_mcv); + try self.genCopy(min_ty, dst_mcv, src_mcv); break :dst dst_mcv; }; @@ -1261,43 +1382,48 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { if (int_info.signedness == .unsigned) { - const overflow_offset = tuple_ty.structFieldOffset(1, mod) + offset; + switch (int_info.bits) { + 1...8 => { + const max_val = std.math.pow(u16, 2, int_info.bits) - 1; - const max_val = std.math.pow(u16, 2, int_info.bits) - 1; + const overflow_reg, const overflow_lock = try self.allocReg(); + defer self.register_manager.unlockReg(overflow_lock); - const overflow_reg, const overflow_lock = try self.allocReg(); - defer self.register_manager.unlockReg(overflow_lock); + const add_reg, const add_lock = blk: { + if (add_result_mcv == .register) break :blk .{ add_result_mcv.register, null }; - const add_reg, const add_lock = blk: { - if (add_result_mcv == .register) break :blk .{ add_result_mcv.register, null }; + const add_reg, const add_lock = try self.allocReg(); + try self.genSetReg(lhs_ty, add_reg, add_result_mcv); + break :blk .{ add_reg, add_lock }; + }; + defer if (add_lock) |lock| self.register_manager.unlockReg(lock); - const add_reg, const add_lock = try self.allocReg(); - try self.genSetReg(lhs_ty, add_reg, add_result_mcv); - break :blk .{ add_reg, add_lock }; - }; - defer if (add_lock) |lock| self.register_manager.unlockReg(lock); + _ = try self.addInst(.{ + .tag = .andi, + .data = .{ .i_type = .{ + .rd = overflow_reg, + .rs1 = add_reg, + .imm12 = @intCast(max_val), + } }, + }); - _ = try self.addInst(.{ - .tag = .andi, - .data = .{ .i_type = .{ - .rd = overflow_reg, - .rs1 = add_reg, - .imm12 = @intCast(max_val), - } }, - }); + const overflow_mcv = try self.binOp( + .cmp_neq, + null, + .{ .register = overflow_reg }, + .{ .register = add_reg }, + lhs_ty, + lhs_ty, + ); - const overflow_mcv = try self.binOp( - .cmp_neq, - null, - .{ .register = overflow_reg }, - .{ .register = add_reg }, - lhs_ty, - lhs_ty, - ); + const overflow_offset = tuple_ty.structFieldOffset(1, mod) + offset; + try self.genSetStack(Type.u1, @intCast(overflow_offset), overflow_mcv); - try self.genSetStack(Type.u1, @intCast(overflow_offset), overflow_mcv); + break :result result_mcv; + }, - break :result result_mcv; + else => return self.fail("TODO: addWithOverflow check for size {d}", .{int_info.bits}), + } } else { return self.fail("TODO: airAddWithOverFlow calculate carry for signed addition", .{}); } @@ -1367,6 +1493,7 @@ fn airShl(self: *Self, inst: Air.Inst.Index) !void { const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.typeOf(bin_op.lhs); const rhs_ty = self.typeOf(bin_op.rhs); + break :result try self.binOp(.shl, inst, lhs, rhs, lhs_ty, rhs_ty); }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1506,7 +1633,19 @@ fn slicePtr(self: *Self, mcv: MCValue) !MCValue { fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSliceLen for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const ptr_bits = 64; + const ptr_bytes = @divExact(ptr_bits, 8); + const mcv = try self.resolveInst(ty_op.operand); + switch (mcv) { + .dead, .unreach, .none => unreachable, + .register => unreachable, // a slice doesn't fit in one register + .stack_offset => |off| { + break :result MCValue{ .stack_offset = off + ptr_bytes }; + }, + else => return self.fail("TODO airSliceLen for {}", .{mcv}), + } + }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } @@ -1598,10 +1737,60 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { fn airCtz(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airCtz for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); + + const dest_reg = try self.register_manager.allocReg(inst, gp); + + const source_reg, const source_lock = blk: { + if (operand == .register) break :blk .{ operand.register, null }; + + const source_reg, const source_lock = try self.allocReg(); + try self.genSetReg(operand_ty, source_reg, operand); + break :blk .{ source_reg, source_lock }; + }; + defer if (source_lock) |lock| self.register_manager.unlockReg(lock); + + // TODO: the B extension for RISCV should have the ctz instruction, and we should use it. + + try self.ctz(source_reg, dest_reg, operand_ty); + + break :result .{ .register = dest_reg }; + }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } +fn ctz(self: *Self, src: Register, dst: Register, ty: Type) !void { + const mod = self.bin_file.comp.module.?; + const length = (ty.abiSize(mod) * 8) - 1; + + const count_reg, const count_lock = try self.allocReg(); + defer self.register_manager.unlockReg(count_lock); + + const len_reg, const len_lock = try self.allocReg(); + defer self.register_manager.unlockReg(len_lock); + + try self.genSetReg(Type.usize, count_reg, .{ .immediate = 0 }); + try self.genSetReg(Type.usize, len_reg, .{ .immediate = length }); + + _ = try self.addInst(.{ + .tag = .beq, + .data = .{ + .b_type = .{ + .rs1 = count_reg, + .rs2 = len_reg, + .inst = @intCast(self.mir_instructions.len + 0), + }, + }, + }); + + _ = src; + _ = dst; + + return self.fail("TODO: finish ctz", .{}); +} + fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airPopcount for {}", .{self.target.cpu.arch}); @@ -1750,12 +1939,12 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { if (!elem_ty.hasRuntimeBits(mod)) - break :result MCValue.none; + break :result .none; const ptr = try self.resolveInst(ty_op.operand); const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) - break :result MCValue.dead; + break :result .dead; const dst_mcv: MCValue = blk: { if (self.reuseOperand(inst, ty_op.operand, 0, ptr)) { @@ -1771,27 +1960,38 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn load(self: *Self, dst_mcv: MCValue, src_ptr: MCValue, ptr_ty: Type) InnerError!void { +fn load(self: *Self, dst_mcv: MCValue, ptr_mcv: MCValue, ptr_ty: Type) InnerError!void { const mod = self.bin_file.comp.module.?; - const elem_ty = ptr_ty.childType(mod); + const dst_ty = ptr_ty.childType(mod); - switch (src_ptr) { - .none => unreachable, - .undef => unreachable, - .unreach => unreachable, - .dead => unreachable, - .immediate => |imm| try self.setValue(elem_ty, dst_mcv, .{ .memory = imm }), - .ptr_stack_offset => |off| try self.setValue(elem_ty, dst_mcv, .{ .stack_offset = off }), + log.debug("loading {}:{} into {}", .{ ptr_mcv, ptr_ty.fmt(mod), dst_mcv }); - .stack_offset, + switch (ptr_mcv) { + .none, + .undef, + .unreach, + .dead, + .register_pair, + => unreachable, // not a valid pointer + + .immediate, .register, - .memory, - => try self.setValue(elem_ty, dst_mcv, src_ptr), + .register_offset, + .ptr_stack_offset, + => try self.genCopy(dst_ty, dst_mcv, ptr_mcv.deref()), - .load_symbol => { - const reg = try self.copyToTmpRegister(ptr_ty, src_ptr); - try self.load(dst_mcv, .{ .register = reg }, ptr_ty); + .memory, + .indirect, + .load_symbol, + .stack_offset, + => { + const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv); + const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); + defer self.register_manager.unlockReg(addr_lock); + + try self.genCopy(dst_ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg } }); }, + .air_ref => |ptr_ref| try self.load(dst_mcv, try self.resolveInst(ptr_ref), ptr_ty), } } @@ -1817,7 +2017,12 @@ fn store(self: *Self, pointer: MCValue, value: MCValue, ptr_ty: Type, value_ty: const mod = self.bin_file.comp.module.?; const value_abi_size = value_ty.abiSize(mod); - log.debug("storing {s}", .{@tagName(pointer)}); + log.debug("storing {}:{} in {}:{}", .{ value, value_ty.fmt(mod), pointer, ptr_ty.fmt(mod) }); + + if (value_ty.isSlice(mod)) { + // cheat a bit by loading in two parts + + } switch (pointer) { .none => unreachable, @@ -1976,7 +2181,6 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void { } fn airArg(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; var arg_index = self.arg_index; // we skip over args that have no bits @@ -1986,21 +2190,10 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const src_mcv = self.args[arg_index]; - // we want to move every arg onto the stack. - // while it might no tbe the best solution right now, it simplifies - // the spilling of args with multiple arg levels. const dst_mcv = switch (src_mcv) { .register => |src_reg| dst: { - // TODO: get the true type of the arg, and fit the spill to size. - const arg_size = Type.usize.abiSize(mod); - const arg_align = Type.usize.abiAlignment(mod); - const offset = try self.allocMem(inst, @intCast(arg_size), arg_align); - try self.genSetStack(Type.usize, offset, .{ .register = src_reg }); - - // can go on to be reused in next function call - self.register_manager.freeReg(src_reg); - - break :dst .{ .stack_offset = offset }; + try self.register_manager.getReg(src_reg, null); + break :dst src_mcv; }, else => return self.fail("TODO: airArg {s}", .{@tagName(src_mcv)}), }; @@ -2044,87 +2237,122 @@ fn airFence(self: *Self) !void { } fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { - const mod = self.bin_file.comp.module.?; if (modifier == .always_tail) return self.fail("TODO implement tail calls for riscv64", .{}); const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const fn_ty = self.typeOf(pl_op.operand); const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); + const arg_refs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); - var info = try self.resolveCallingConventionValues(fn_ty); - defer info.deinit(self); + const expected_num_args = 8; + const ExpectedContents = extern struct { + vals: [expected_num_args][@sizeOf(MCValue)]u8 align(@alignOf(MCValue)), + }; + var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = + std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + const allocator = stack.get(); + + const arg_tys = try allocator.alloc(Type, arg_refs.len); + defer allocator.free(arg_tys); + for (arg_tys, arg_refs) |*arg_ty, arg_ref| arg_ty.* = self.typeOf(arg_ref); + + const arg_vals = try allocator.alloc(MCValue, arg_refs.len); + defer allocator.free(arg_vals); + for (arg_vals, arg_refs) |*arg_val, arg_ref| arg_val.* = .{ .air_ref = arg_ref }; + + const call_ret = try self.genCall(.{ .air = callee }, arg_tys, arg_vals); + + var bt = self.liveness.iterateBigTomb(inst); + try self.feed(&bt, pl_op.operand); + for (arg_refs) |arg_ref| try self.feed(&bt, arg_ref); + + const result = if (self.liveness.isUnused(inst)) .unreach else call_ret; + return self.finishAirResult(inst, result); +} + +fn genCall( + self: *Self, + info: union(enum) { + air: Air.Inst.Ref, + lib: struct { + return_type: InternPool.Index, + param_types: []const InternPool.Index, + lib: ?[]const u8 = null, + callee: []const u8, + }, + }, + arg_tys: []const Type, + args: []const MCValue, +) !MCValue { + const mod = self.bin_file.comp.module.?; + + const fn_ty = switch (info) { + .air => |callee| fn_info: { + const callee_ty = self.typeOf(callee); + break :fn_info switch (callee_ty.zigTypeTag(mod)) { + .Fn => callee_ty, + .Pointer => callee_ty.childType(mod), + else => unreachable, + }; + }, + .lib => |lib| try mod.funcType(.{ + .param_types = lib.param_types, + .return_type = lib.return_type, + .cc = .C, + }), + }; + + var call_info = try self.resolveCallingConventionValues(fn_ty, .caller); + defer call_info.deinit(self); + + for (call_info.args, 0..) |mc_arg, arg_i| try self.genCopy(arg_tys[arg_i], mc_arg, args[arg_i]); // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.bin_file.cast(link.File.Elf)) |elf_file| { - for (info.args, 0..) |mc_arg, arg_i| { - const arg = args[arg_i]; - const arg_ty = self.typeOf(arg); - const arg_mcv = try self.resolveInst(args[arg_i]); - try self.setValue(arg_ty, mc_arg, arg_mcv); - } - - if (try self.air.value(callee, mod)) |func_value| { - switch (mod.intern_pool.indexToKey(func_value.ip_index)) { + switch (info) { + .air => |callee| if (try self.air.value(callee, mod)) |func_value| { + const func_key = mod.intern_pool.indexToKey(func_value.ip_index); + switch (switch (func_key) { + else => func_key, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| mod.intern_pool.indexToKey(mod.declPtr(decl).val.toIntern()), + else => func_key, + }, + }) { .func => |func| { - const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func.owner_decl); - const sym = elf_file.symbol(sym_index); - _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); - const got_addr = sym.zigGotAddress(elf_file); - try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr }); - _ = try self.addInst(.{ - .tag = .jalr, - .data = .{ .i_type = .{ - .rd = .ra, - .rs1 = .ra, - .imm12 = 0, - } }, - }); + if (self.bin_file.cast(link.File.Elf)) |elf_file| { + const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func.owner_decl); + const sym = elf_file.symbol(sym_index); + _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); + const got_addr = sym.zigGotAddress(elf_file); + try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr }); + _ = try self.addInst(.{ + .tag = .jalr, + .data = .{ .i_type = .{ + .rd = .ra, + .rs1 = .ra, + .imm12 = 0, + } }, + }); + } else if (self.bin_file.cast(link.File.Coff)) |_| { + return self.fail("TODO implement calling in COFF for {}", .{self.target.cpu.arch}); + } else if (self.bin_file.cast(link.File.MachO)) |_| { + unreachable; // unsupported architecture for MachO + } else if (self.bin_file.cast(link.File.Plan9)) |_| { + return self.fail("TODO implement call on plan9 for {}", .{self.target.cpu.arch}); + } else unreachable; }, .extern_func => { - return self.fail("TODO implement calling extern functions", .{}); - }, - else => { - return self.fail("TODO implement calling bitcasted functions", .{}); + return self.fail("TODO: extern func calls", .{}); }, + else => return self.fail("TODO implement calling bitcasted functions", .{}), } } else { - return self.fail("TODO implement calling runtime-known function pointer", .{}); - } - } else if (self.bin_file.cast(link.File.Coff)) |_| { - return self.fail("TODO implement calling in COFF for {}", .{self.target.cpu.arch}); - } else if (self.bin_file.cast(link.File.MachO)) |_| { - unreachable; // unsupported architecture for MachO - } else if (self.bin_file.cast(link.File.Plan9)) |_| { - return self.fail("TODO implement call on plan9 for {}", .{self.target.cpu.arch}); - } else unreachable; - - const result: MCValue = result: { - switch (info.return_value) { - .register => |reg| { - if (RegisterManager.indexOfReg(&callee_preserved_regs, reg) == null) { - // Save function return value in a callee saved register - break :result try self.copyToNewRegister(inst, info.return_value); - } - }, - else => {}, - } - break :result info.return_value; - }; - - if (args.len <= Liveness.bpi - 2) { - var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); - buf[0] = callee; - @memcpy(buf[1..][0..args.len], args); - return self.finishAir(inst, result, buf); + return self.fail("TODO: call function pointers", .{}); + }, + .lib => return self.fail("TODO: lib func calls", .{}), } - var bt = try self.iterateBigTomb(inst, 1 + args.len); - bt.feed(callee); - for (args) |arg| { - bt.feed(arg); - } - return bt.finishAir(result); + + return call_info.return_value; } fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void { @@ -2151,7 +2379,7 @@ fn ret(self: *Self, mcv: MCValue) !void { const mod = self.bin_file.comp.module.?; const ret_ty = self.fn_type.fnReturnType(mod); - try self.setValue(ret_ty, self.ret_mcv, mcv); + try self.genCopy(ret_ty, self.ret_mcv, mcv); _ = try self.addInst(.{ .tag = .psuedo_epilogue, @@ -2183,6 +2411,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index) !void { const ty = self.typeOf(bin_op.lhs); const mod = self.bin_file.comp.module.?; assert(ty.eql(self.typeOf(bin_op.rhs), mod)); + if (ty.zigTypeTag(mod) == .ErrorSet) return self.fail("TODO implement cmp for errors", .{}); @@ -2233,11 +2462,54 @@ fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void { fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const name = self.air.nullTerminatedString(pl_op.payload); const operand = pl_op.operand; - // TODO emit debug info for this variable - _ = name; - return self.finishAir(inst, .dead, .{ operand, .none, .none }); + const ty = self.typeOf(operand); + const mcv = try self.resolveInst(operand); + + const name = self.air.nullTerminatedString(pl_op.payload); + + const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; + try self.genVarDbgInfo(tag, ty, mcv, name); + + return self.finishAir(inst, .unreach, .{ operand, .none, .none }); +} + +fn genVarDbgInfo( + self: Self, + tag: Air.Inst.Tag, + ty: Type, + mcv: MCValue, + name: [:0]const u8, +) !void { + const mod = self.bin_file.comp.module.?; + const is_ptr = switch (tag) { + .dbg_var_ptr => true, + .dbg_var_val => false, + else => unreachable, + }; + + switch (self.debug_output) { + .dwarf => |dw| { + const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (mcv) { + .register => |reg| .{ .register = reg.dwarfLocOp() }, + .memory => |address| .{ .memory = address }, + .load_symbol => |sym_off| loc: { + assert(sym_off.off == 0); + break :loc .{ .linker_load = .{ .type = .direct, .sym_index = sym_off.sym } }; + }, + .immediate => |x| .{ .immediate = x }, + .undef => .undef, + .none => .none, + else => blk: { + log.debug("TODO generate debug info for {}", .{mcv}); + break :blk .nop; + }, + }; + try dw.genVarDbgInfo(name, ty, mod.funcOwnerDeclIndex(self.func_index), is_ptr, loc); + }, + .plan9 => {}, + .none => {}, + } } fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { @@ -2348,7 +2620,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setValue(self.typeOfIndex(else_key), canon_mcv, else_value); + try self.genCopy(self.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); @@ -2375,7 +2647,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setValue(self.typeOfIndex(then_key), parent_mcv, then_value); + try self.genCopy(self.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -2638,7 +2910,7 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { if (block_mcv == .none) { block_data.mcv = operand_mcv; } else { - try self.setValue(self.typeOfIndex(block), block_mcv, operand_mcv); + try self.genCopy(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -2783,29 +3055,45 @@ fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigT } /// Sets the value without any modifications to register allocation metadata or stack allocation metadata. -fn setValue(self: *Self, ty: Type, dst_val: MCValue, src_val: MCValue) !void { +fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { // There isn't anything to store - if (dst_val == .none) return; + if (dst_mcv == .none) return; - if (!dst_val.isMutable()) { + if (!dst_mcv.isMutable()) { // panic so we can see the trace - return std.debug.panic("tried to setValue immutable: {s}", .{@tagName(dst_val)}); + return std.debug.panic("tried to genCopy immutable: {s}", .{@tagName(dst_mcv)}); } - switch (dst_val) { - .register => |reg| return self.genSetReg(ty, reg, src_val), - .stack_offset => |off| return self.genSetStack(ty, off, src_val), - .memory => |addr| return self.genSetMem(ty, addr, src_val), - else => return self.fail("TODO: setValue {s}", .{@tagName(dst_val)}), + switch (dst_mcv) { + .register => |reg| return self.genSetReg(ty, reg, src_mcv), + .register_pair => |pair| return self.genSetRegPair(ty, pair, src_mcv), + .register_offset => |dst_reg_off| try self.genSetReg(ty, dst_reg_off.reg, switch (src_mcv) { + .none, + .unreach, + .dead, + .undef, + => unreachable, + .immediate, + .register, + .register_offset, + => src_mcv.offset(-dst_reg_off.off), + else => .{ .register_offset = .{ + .reg = try self.copyToTmpRegister(ty, src_mcv), + .off = -dst_reg_off.off, + } }, + }), + .stack_offset => |off| return self.genSetStack(ty, off, src_mcv), + .memory => |addr| return self.genSetMem(ty, addr, src_mcv), + else => return self.fail("TODO: genCopy {s} with {s}", .{ @tagName(dst_mcv), @tagName(src_mcv) }), } } -/// Sets the value of `src_val` into stack memory at `stack_offset`. -fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_val: MCValue) InnerError!void { +/// Sets the value of `src_mcv` into stack memory at `stack_offset`. +fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_mcv: MCValue) InnerError!void { const mod = self.bin_file.comp.module.?; const abi_size: u32 = @intCast(ty.abiSize(mod)); - switch (src_val) { + switch (src_mcv) { .none => return, .dead => unreachable, .undef => { @@ -2820,7 +3108,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_val: MCValue) Inner const reg, const reg_lock = try self.allocReg(); defer self.register_manager.unlockReg(reg_lock); - try self.genSetReg(ty, reg, src_val); + try self.genSetReg(ty, reg, src_mcv); return self.genSetStack(ty, stack_offset, .{ .register = reg }); }, @@ -2839,24 +3127,24 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_val: MCValue) Inner .tag = tag, .data = .{ .i_type = .{ .rd = reg, - .rs1 = .s0, + .rs1 = .sp, .imm12 = math.cast(i12, stack_offset) orelse { return self.fail("TODO: genSetStack bigger stack values", .{}); }, } }, }); }, - else => return self.fail("TODO: genSetStack for size={d}", .{abi_size}), + else => unreachable, // register can hold a max of 8 bytes } }, .stack_offset, .load_symbol => { - switch (src_val) { + switch (src_mcv) { .stack_offset => |off| if (off == stack_offset) return, else => {}, } if (abi_size <= 8) { - const reg = try self.copyToTmpRegister(ty, src_val); + const reg = try self.copyToTmpRegister(ty, src_mcv); return self.genSetStack(ty, stack_offset, .{ .register = reg }); } @@ -2875,7 +3163,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_val: MCValue) Inner const count_reg = regs[3]; const tmp_reg = regs[4]; - switch (src_val) { + switch (src_mcv) { .stack_offset => |offset| { try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = offset }); }, @@ -2894,14 +3182,14 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_val: MCValue) Inner .tag = .load_symbol, .data = .{ .payload = try self.addExtra(Mir.LoadSymbolPayload{ - .register = @intFromEnum(src_reg), + .register = src_reg.id(), .atom_index = atom_index, .sym_index = sym_off.sym, }), }, }); }, - else => return self.fail("TODO: genSetStack unreachable {s}", .{@tagName(src_val)}), + else => return self.fail("TODO: genSetStack unreachable {s}", .{@tagName(src_mcv)}), } try self.genSetReg(ptr_ty, dst_reg, .{ .ptr_stack_offset = stack_offset }); @@ -2910,16 +3198,17 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_val: MCValue) Inner // memcpy(src, dst, len) try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg); }, - else => return self.fail("TODO: genSetStack {s}", .{@tagName(src_val)}), + .air_ref => |ref| try self.genSetStack(ty, stack_offset, try self.resolveInst(ref)), + else => return self.fail("TODO: genSetStack {s}", .{@tagName(src_mcv)}), } } -fn genSetMem(self: *Self, ty: Type, addr: u64, src_val: MCValue) InnerError!void { +fn genSetMem(self: *Self, ty: Type, addr: u64, src_mcv: MCValue) InnerError!void { const mod = self.bin_file.comp.module.?; const abi_size: u32 = @intCast(ty.abiSize(mod)); _ = abi_size; _ = addr; - _ = src_val; + _ = src_mcv; return self.fail("TODO: genSetMem", .{}); } @@ -2932,51 +3221,101 @@ fn genInlineMemcpy( count: Register, tmp: Register, ) !void { - _ = src; - _ = dst; + try self.genSetReg(Type.usize, count, .{ .register = len }); - // store 0 in the count - try self.genSetReg(Type.usize, count, .{ .immediate = 0 }); - - // compare count to length - const compare_inst = try self.addInst(.{ - .tag = .cmp_eq, - .data = .{ .r_type = .{ - .rd = tmp, - .rs1 = count, - .rs2 = len, - } }, - }); - - // end if true - _ = try self.addInst(.{ - .tag = .bne, + // lb tmp, 0(src) + const first_inst = try self.addInst(.{ + .tag = .lb, .data = .{ - .b_type = .{ - .inst = @intCast(self.mir_instructions.len + 0), // points after the last inst - .rs1 = .zero, - .rs2 = tmp, + .i_type = .{ + .rd = tmp, + .rs1 = src, + .imm12 = 0, }, }, }); - _ = compare_inst; - return self.fail("TODO: finish genInlineMemcpy", .{}); + // sb tmp, 0(dst) + _ = try self.addInst(.{ + .tag = .sb, + .data = .{ + .i_type = .{ + .rd = tmp, + .rs1 = dst, + .imm12 = 0, + }, + }, + }); + + // dec count by 1 + _ = try self.addInst(.{ + .tag = .addi, + .data = .{ + .i_type = .{ + .rd = count, + .rs1 = count, + .imm12 = -1, + }, + }, + }); + + // branch if count is 0 + _ = try self.addInst(.{ + .tag = .beq, + .data = .{ + .b_type = .{ + .inst = @intCast(self.mir_instructions.len + 4), // points after the last inst + .rs1 = count, + .rs2 = .zero, + }, + }, + }); + + // increment the pointers + _ = try self.addInst(.{ + .tag = .addi, + .data = .{ + .i_type = .{ + .rd = src, + .rs1 = src, + .imm12 = 1, + }, + }, + }); + + _ = try self.addInst(.{ + .tag = .addi, + .data = .{ + .i_type = .{ + .rd = dst, + .rs1 = dst, + .imm12 = 1, + }, + }, + }); + + // jump back to start of loop + _ = try self.addInst(.{ + .tag = .j, + .data = .{ + .inst = first_inst, + }, + }); } -/// Sets the value of `src_val` into `reg`. Assumes you have a lock on it. -fn genSetReg(self: *Self, ty: Type, reg: Register, src_val: MCValue) InnerError!void { +/// Sets the value of `src_mcv` into `reg`. Assumes you have a lock on it. +fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError!void { const mod = self.bin_file.comp.module.?; const abi_size: u32 = @intCast(ty.abiSize(mod)); - switch (src_val) { + switch (src_mcv) { .dead => unreachable, .ptr_stack_offset => |off| { _ = try self.addInst(.{ .tag = .addi, .data = .{ .i_type = .{ .rd = reg, - .rs1 = .s0, + .rs1 = .sp, .imm12 = math.cast(i12, off) orelse { return self.fail("TODO: bigger stack sizes", .{}); }, @@ -3006,7 +3345,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_val: MCValue) InnerError! const carry: i32 = if (lo12 < 0) 1 else 0; const hi20: i20 = @truncate((x >> 12) +% carry); - // TODO: add test case for 32-bit immediate _ = try self.addInst(.{ .tag = .lui, .data = .{ .u_type = .{ @@ -3069,6 +3407,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_val: MCValue) InnerError! } }, }); }, + .register_pair => |pair| try self.genSetReg(ty, reg, .{ .register = pair[0] }), .memory => |addr| { try self.genSetReg(ty, reg, .{ .immediate = addr }); @@ -3080,8 +3419,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_val: MCValue) InnerError! .imm12 = 0, } }, }); - - // LOAD imm=[i12 offset = 0], rs1 }, .stack_offset => |off| { const tag: Mir.Inst.Tag = switch (abi_size) { @@ -3096,7 +3433,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_val: MCValue) InnerError! .tag = tag, .data = .{ .i_type = .{ .rd = reg, - .rs1 = .s0, + .rs1 = .sp, .imm12 = math.cast(i12, off) orelse { return self.fail("TODO: genSetReg support larger stack sizes", .{}); }, @@ -3120,13 +3457,55 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_val: MCValue) InnerError! .tag = .load_symbol, .data = .{ .payload = try self.addExtra(Mir.LoadSymbolPayload{ - .register = @intFromEnum(reg), + .register = reg.id(), .atom_index = atom_index, .sym_index = sym_off.sym, }), }, }); }, + .air_ref => |ref| try self.genSetReg(ty, reg, try self.resolveInst(ref)), + .indirect => |reg_off| { + const tag: Mir.Inst.Tag = switch (abi_size) { + 1 => .lb, + 2 => .lh, + 4 => .lw, + 8 => .ld, + else => return self.fail("TODO: genSetReg for size {d}", .{abi_size}), + }; + + _ = try self.addInst(.{ + .tag = tag, + .data = .{ + .i_type = .{ + .rd = reg, + .rs1 = reg_off.reg, + .imm12 = @intCast(reg_off.off), + }, + }, + }); + }, + else => return self.fail("TODO: genSetReg {s}", .{@tagName(src_mcv)}), + } +} + +fn genSetRegPair(self: *Self, ty: Type, pair: [2]Register, src_mcv: MCValue) InnerError!void { + const mod = self.bin_file.comp.module.?; + const abi_size: u32 = @intCast(ty.abiSize(mod)); + + assert(abi_size > 8 and abi_size <= 16); // must fit only fit into two registers + + switch (src_mcv) { + .air_ref => |ref| return self.genSetRegPair(ty, pair, try self.resolveInst(ref)), + .load_symbol => |sym_off| { + _ = sym_off; + // return self.fail("TODO: genSetRegPair load_symbol", .{}); + // commented out just for testing. + + // plan here is to load the address into a temporary register and + // copy into the pair. + }, + else => return self.fail("TODO: genSetRegPair {s}", .{@tagName(src_mcv)}), } } @@ -3138,7 +3517,7 @@ fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_mcv = try self.allocRegOrMem(inst, true); const dst_ty = self.typeOfIndex(inst); - try self.setValue(dst_ty, dst_mcv, src_mcv); + try self.genCopy(dst_ty, dst_mcv, src_mcv); break :result dst_mcv; }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -3157,7 +3536,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const dest = try self.allocRegOrMem(inst, true); - try self.setValue(self.typeOfIndex(inst), dest, operand); + try self.genCopy(self.typeOfIndex(inst), dest, operand); break :result dest; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -3385,8 +3764,12 @@ const CallMCValues = struct { }; /// Caller must call `CallMCValues.deinit`. -fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { +fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: CallView) !CallMCValues { const mod = self.bin_file.comp.module.?; + const ip = &mod.intern_pool; + + _ = role; + const fn_info = mod.typeToFunc(fn_ty).?; const cc = fn_info.cc; var result: CallMCValues = .{ @@ -3413,26 +3796,31 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { return self.fail("TODO: support more than 8 function args", .{}); } - const locks = try self.gpa.alloc(RegisterLock, result.args.len); - defer self.gpa.free(locks); + var fa_reg_i: u32 = 0; - for (0..result.args.len) |i| { - const arg_reg = try self.register_manager.allocReg(null, fa); - const lock = self.register_manager.lockRegAssumeUnused(arg_reg); - locks[i] = lock; - result.args[i] = .{ .register = arg_reg }; + // spill the needed argument registers + for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| { + const param_ty = Type.fromInterned(ty); + const param_size = param_ty.abiSize(mod); + + switch (param_size) { + 1...8 => { + const arg_reg: Register = abi.function_arg_regs[fa_reg_i]; + fa_reg_i += 1; + try self.register_manager.getReg(arg_reg, null); + result_arg.* = .{ .register = arg_reg }; + }, + 9...16 => { + const arg_regs: [2]Register = abi.function_arg_regs[fa_reg_i..][0..2].*; + fa_reg_i += 2; + for (arg_regs) |reg| try self.register_manager.getReg(reg, null); + result_arg.* = .{ .register_pair = arg_regs }; + }, + else => return self.fail("TODO: support args of size {}", .{param_size}), + } } - // we can just free the locks now, as this should be the only place where the fa - // arg set is used. - for (locks) |lock| { - self.register_manager.unlockReg(lock); - } - - // stack_offset = num s registers spilled + local var space - // TODO: spill used s registers here - - result.stack_byte_count = 0; + result.stack_byte_count = self.max_end_stack; result.stack_align = .@"16"; }, else => return self.fail("TODO implement function parameters for {} on riscv64", .{cc}), diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 0c1e5d643b..6a0b5a0559 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -1,19 +1,6 @@ //! This file contains the functionality for lowering RISCV64 MIR into //! machine code -const Emit = @This(); -const std = @import("std"); -const math = std.math; -const Mir = @import("Mir.zig"); -const bits = @import("bits.zig"); -const link = @import("../../link.zig"); -const Module = @import("../../Module.zig"); -const ErrorMsg = Module.ErrorMsg; -const assert = std.debug.assert; -const Instruction = bits.Instruction; -const Register = bits.Register; -const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; - mir: Mir, bin_file: *link.File, debug_output: DebugInfoOutput, @@ -22,12 +9,17 @@ err_msg: ?*ErrorMsg = null, src_loc: Module.SrcLoc, code: *std.ArrayList(u8), +/// List of registers to save in the prologue. +save_reg_list: Mir.RegisterList, + prev_di_line: u32, prev_di_column: u32, /// Relative to the beginning of `code`. prev_di_pc: usize, + /// Function's stack size. Used for backpatching. stack_size: u32, + /// For backward branches: stores the code offset of the target /// instruction /// @@ -212,7 +204,7 @@ fn mirRType(emit: *Emit, inst: Mir.Inst.Index) !void { // rs1 != rs2 try emit.writeInstruction(Instruction.xor(rd, rs1, rs2)); - try emit.writeInstruction(Instruction.sltu(rd, .x0, rd)); // snez + try emit.writeInstruction(Instruction.sltu(rd, .zero, rd)); // snez }, .cmp_lt => { // rd = 1 if rs1 < rs2 @@ -368,17 +360,20 @@ fn mirPsuedo(emit: *Emit, inst: Mir.Inst.Index) !void { return emit.fail("TODO: mirPsuedo support larger stack sizes", .{}); }; - // Decrement sp by num s registers + local var space + // Decrement sp by (num s registers * 8) + local var space try emit.writeInstruction(Instruction.addi(.sp, .sp, -stack_size)); // Spill ra - try emit.writeInstruction(Instruction.sd(.ra, stack_size - 8, .sp)); + try emit.writeInstruction(Instruction.sd(.ra, 0, .sp)); - // Spill s0 - try emit.writeInstruction(Instruction.sd(.s0, stack_size - 16, .sp)); - - // Setup s0 - try emit.writeInstruction(Instruction.addi(.s0, .sp, stack_size)); + // Spill callee saved registers. + var s_reg_iter = emit.save_reg_list.iterator(.{}); + var i: i12 = 8; + while (s_reg_iter.next()) |reg_i| { + const reg = abi.callee_preserved_regs[reg_i]; + try emit.writeInstruction(Instruction.sd(reg, i, .sp)); + i += 8; + } }, .psuedo_epilogue => { const stack_size: i12 = math.cast(i12, emit.stack_size) orelse { @@ -386,10 +381,16 @@ fn mirPsuedo(emit: *Emit, inst: Mir.Inst.Index) !void { }; // Restore ra - try emit.writeInstruction(Instruction.ld(.ra, stack_size - 8, .sp)); + try emit.writeInstruction(Instruction.ld(.ra, 0, .sp)); - // Restore s0 - try emit.writeInstruction(Instruction.ld(.s0, stack_size - 16, .sp)); + // Restore spilled callee saved registers + var s_reg_iter = emit.save_reg_list.iterator(.{}); + var i: i12 = 8; + while (s_reg_iter.next()) |reg_i| { + const reg = abi.callee_preserved_regs[reg_i]; + try emit.writeInstruction(Instruction.ld(reg, i, .sp)); + i += 8; + } // Increment sp back to previous value try emit.writeInstruction(Instruction.addi(.sp, .sp, stack_size)); @@ -408,8 +409,11 @@ fn mirRR(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const rr = emit.mir.instructions.items(.data)[inst].rr; + const rd = rr.rd; + const rs = rr.rs; + switch (tag) { - .mv => try emit.writeInstruction(Instruction.addi(rr.rd, rr.rs, 0)), + .mv => try emit.writeInstruction(Instruction.addi(rd, rs, 0)), else => unreachable, } } @@ -435,7 +439,6 @@ fn mirNop(emit: *Emit, inst: Mir.Inst.Index) !void { } fn mirLoadSymbol(emit: *Emit, inst: Mir.Inst.Index) !void { - // const tag = emit.mir.instructions.items(.tag)[inst]; const payload = emit.mir.instructions.items(.data)[inst].payload; const data = emit.mir.extraData(Mir.LoadSymbolPayload, payload).data; const reg = @as(Register, @enumFromInt(data.register)); @@ -523,20 +526,19 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize { .dbg_prologue_end, => 0, - .psuedo_prologue, - => 16, - - .psuedo_epilogue, - .abs, - => 12, - .cmp_eq, .cmp_neq, .cmp_imm_eq, .cmp_gte, .load_symbol, + .abs, => 8, + .psuedo_epilogue, .psuedo_prologue => size: { + const count = emit.save_reg_list.count() * 4; + break :size count + 8; + }, + else => 4, }; } @@ -547,25 +549,17 @@ fn lowerMir(emit: *Emit) !void { const mir_tags = emit.mir.instructions.items(.tag); const mir_datas = emit.mir.instructions.items(.data); + const proglogue_size: u32 = @intCast(emit.save_reg_list.size()); + emit.stack_size += proglogue_size; + for (mir_tags, 0..) |tag, index| { const inst: u32 = @intCast(index); if (isStore(tag) or isLoad(tag)) { const data = mir_datas[inst].i_type; - // TODO: probably create a psuedo instruction for s0 loads/stores instead of this. - if (data.rs1 == .s0) { + if (data.rs1 == .sp) { const offset = mir_datas[inst].i_type.imm12; - - // sp + 32 (aka s0) - // ra -- previous ra spilled - // s0 -- previous s0 spilled - // --- this is -16(s0) - - // TODO: this "+ 8" is completely arbiratary as the largest possible store - // we don't want to actually use it. instead we need to calculate the difference - // between the first and second stack store and use it instead. - - mir_datas[inst].i_type.imm12 = -(16 + offset + 8); + mir_datas[inst].i_type.imm12 = offset + @as(i12, @intCast(proglogue_size)) + 8; } } @@ -584,3 +578,17 @@ fn lowerMir(emit: *Emit) !void { current_code_offset += emit.instructionSize(inst); } } + +const Emit = @This(); +const std = @import("std"); +const math = std.math; +const Mir = @import("Mir.zig"); +const bits = @import("bits.zig"); +const abi = @import("abi.zig"); +const link = @import("../../link.zig"); +const Module = @import("../../Module.zig"); +const ErrorMsg = Module.ErrorMsg; +const assert = std.debug.assert; +const Instruction = bits.Instruction; +const Register = bits.Register; +const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 192d3a8eac..6ba0930232 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -6,14 +6,6 @@ //! The main purpose of MIR is to postpone the assignment of offsets until Isel, //! so that, for example, the smaller encodings of jump instructions can be used. -const Mir = @This(); -const std = @import("std"); -const builtin = @import("builtin"); -const assert = std.debug.assert; - -const bits = @import("bits.zig"); -const Register = bits.Register; - instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. extra: []const u32, @@ -58,7 +50,7 @@ pub const Inst = struct { /// Jumps. Uses `inst` payload. j, - /// Immediate and, uses i_type payload + /// Immediate AND, uses i_type payload andi, // NOTE: Maybe create a special data for compares that includes the ops @@ -219,15 +211,6 @@ pub const Inst = struct { }, }; - const CompareOp = enum { - eq, - neq, - gt, - gte, - lt, - lte, - }; - // Make sure we don't accidentally make instructions bigger than expected. // Note that in Debug builds, Zig is allowed to insert a secret field for safety checks. // comptime { @@ -268,3 +251,49 @@ pub const LoadSymbolPayload = struct { atom_index: u32, sym_index: u32, }; + +/// Used in conjunction with payload to transfer a list of used registers in a compact manner. +pub const RegisterList = struct { + bitset: BitSet = BitSet.initEmpty(), + + const BitSet = IntegerBitSet(32); + const Self = @This(); + + fn getIndexForReg(registers: []const Register, reg: Register) BitSet.MaskInt { + for (registers, 0..) |cpreg, i| { + if (reg.id() == cpreg.id()) return @intCast(i); + } + unreachable; // register not in input register list! + } + + pub fn push(self: *Self, registers: []const Register, reg: Register) void { + const index = getIndexForReg(registers, reg); + self.bitset.set(index); + } + + pub fn isSet(self: Self, registers: []const Register, reg: Register) bool { + const index = getIndexForReg(registers, reg); + return self.bitset.isSet(index); + } + + pub fn iterator(self: Self, comptime options: std.bit_set.IteratorOptions) BitSet.Iterator(options) { + return self.bitset.iterator(options); + } + + pub fn count(self: Self) u32 { + return @intCast(self.bitset.count()); + } + + pub fn size(self: Self) u32 { + return @intCast(self.bitset.count() * 8); + } +}; + +const Mir = @This(); +const std = @import("std"); +const builtin = @import("builtin"); +const assert = std.debug.assert; + +const bits = @import("bits.zig"); +const Register = bits.Register; +const IntegerBitSet = std.bit_set.IntegerBitSet; diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index 4d72219d8d..198ff437f8 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -92,15 +92,18 @@ pub fn classifyType(ty: Type, mod: *Module) Class { } pub const callee_preserved_regs = [_]Register{ - // NOTE: we use s0 as a psuedo stack pointer, so it's not included. - .s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11, + .s0, .s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11, }; pub const function_arg_regs = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7, }; -const allocatable_registers = callee_preserved_regs ++ function_arg_regs; +pub const temporary_regs = [_]Register{ + .t0, .t1, .t2, .t3, .t4, .t5, .t6, +}; + +const allocatable_registers = callee_preserved_regs ++ function_arg_regs ++ temporary_regs; pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, &allocatable_registers); // Register classes @@ -123,4 +126,13 @@ pub const RegisterClass = struct { }, true); break :blk set; }; + + pub const tp: RegisterBitSet = blk: { + var set = RegisterBitSet.initEmpty(); + set.setRangeValue(.{ + .start = callee_preserved_regs.len + function_arg_regs.len, + .end = callee_preserved_regs.len + function_arg_regs.len + temporary_regs.len, + }, true); + break :blk set; + }; }; diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig index f987c7fc74..b7efdef765 100644 --- a/src/arch/riscv64/bits.zig +++ b/src/arch/riscv64/bits.zig @@ -404,14 +404,14 @@ pub const Register = enum(u6) { t3, t4, t5, t6, // caller saved // zig fmt: on - /// Returns the unique 4-bit ID of this register which is used in + /// Returns the unique 5-bit ID of this register which is used in /// the machine code pub fn id(self: Register) u5 { return @as(u5, @truncate(@intFromEnum(self))); } pub fn dwarfLocOp(reg: Register) u8 { - return @as(u8, reg.id()) + DW.OP.reg0; + return @as(u8, reg.id()); } }; diff --git a/src/register_manager.zig b/src/register_manager.zig index d1d773ed10..f2539e0dbe 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -102,7 +102,7 @@ pub fn RegisterManager( } const OptionalIndex = std.math.IntFittingRange(0, set.len); - comptime var map = [1]OptionalIndex{set.len} ** (max_id + 1 - min_id); + comptime var map = [1]OptionalIndex{set.len} ** (max_id - min_id + 1); inline for (set, 0..) |elem, elem_index| map[comptime elem.id() - min_id] = elem_index; const id_index = reg.id() -% min_id; From 3bf008a3d07b2d9f91964141e1fb33d3fab82390 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Fri, 29 Mar 2024 06:48:41 -0700 Subject: [PATCH 27/44] riscv: implement slices --- lib/std/builtin.zig | 16 +-- src/arch/riscv64/CodeGen.zig | 208 +++++++++++++++++++++++------------ src/arch/riscv64/Emit.zig | 30 +++-- src/arch/riscv64/Mir.zig | 4 +- src/arch/riscv64/abi.zig | 33 +++++- src/codegen/llvm.zig | 2 + src/link/Elf.zig | 2 + src/link/Elf/Atom.zig | 28 ++++- 8 files changed, 232 insertions(+), 91 deletions(-) diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 8c57083312..15d405eed9 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -775,14 +775,14 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr } if (builtin.zig_backend == .stage2_riscv64) { - // asm volatile ("ecall" - // : - // : [number] "{a7}" (64), - // [arg1] "{a0}" (1), - // [arg2] "{a1}" (@intFromPtr(msg.ptr)), - // [arg3] "{a2}" (msg.len), - // : "rcx", "r11", "memory" - // ); + asm volatile ("ecall" + : + : [number] "{a7}" (64), + [arg1] "{a0}" (1), + [arg2] "{a1}" (@intFromPtr(msg.ptr)), + [arg3] "{a2}" (msg.len), + : "rcx", "r11", "memory" + ); std.posix.exit(127); } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 3a7ae9dbfb..cc5a731c28 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -122,9 +122,10 @@ const MCValue = union(enum) { /// A pointer-sized integer that fits in a register. /// If the type is a pointer, this is the pointer address in virtual address space. immediate: u64, - /// The value is in memory at an address not-yet-allocated by the linker. - /// This traditionally corresponds to a relocation emitted in a relocatable object file. + /// The value doesn't exist in memory yet. load_symbol: SymbolOffset, + /// The address of the memory location not-yet-allocated by the linker. + addr_symbol: SymbolOffset, /// The value is in a target-specific register. register: Register, /// The value is split across two registers @@ -169,6 +170,7 @@ const MCValue = union(enum) { .indirect, .undef, .load_symbol, + .addr_symbol, .air_ref, => false, @@ -188,10 +190,14 @@ const MCValue = union(enum) { .immediate, .ptr_stack_offset, .register_offset, + .register_pair, + .register, .undef, .air_ref, + .addr_symbol, => unreachable, // not in memory + .load_symbol => |sym_off| .{ .addr_symbol = sym_off }, .memory => |addr| .{ .immediate = addr }, .stack_offset => |off| .{ .ptr_stack_offset = off }, .indirect => |reg_off| switch (reg_off.off) { @@ -219,6 +225,7 @@ const MCValue = union(enum) { .ptr_stack_offset => |off| .{ .stack_offset = off }, .register => |reg| .{ .indirect = .{ .reg = reg } }, .register_offset => |reg_off| .{ .indirect = reg_off }, + .addr_symbol => |sym_off| .{ .load_symbol = sym_off }, }; } @@ -235,6 +242,7 @@ const MCValue = union(enum) { .indirect, .stack_offset, .load_symbol, + .addr_symbol, => switch (off) { 0 => mcv, else => unreachable, // not offsettable @@ -801,6 +809,43 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { try table.ensureUnusedCapacity(self.gpa, additional_count); } +fn splitType(self: *Self, ty: Type) ![2]Type { + const mod = self.bin_file.comp.module.?; + const classes = mem.sliceTo(&abi.classifySystemV(ty, mod), .none); + var parts: [2]Type = undefined; + if (classes.len == 2) for (&parts, classes, 0..) |*part, class, part_i| { + part.* = switch (class) { + .integer => switch (part_i) { + 0 => Type.u64, + 1 => part: { + const elem_size = ty.abiAlignment(mod).minStrict(.@"8").toByteUnitsOptional().?; + const elem_ty = try mod.intType(.unsigned, @intCast(elem_size * 8)); + break :part switch (@divExact(ty.abiSize(mod) - 8, elem_size)) { + 1 => elem_ty, + else => |len| try mod.arrayType(.{ .len = len, .child = elem_ty.toIntern() }), + }; + }, + else => unreachable, + }, + else => break, + }; + } else if (parts[0].abiSize(mod) + parts[1].abiSize(mod) == ty.abiSize(mod)) return parts; + return std.debug.panic("TODO implement splitType for {}", .{ty.fmt(mod)}); +} + +fn symbolIndex(self: *Self) !u32 { + const mod = self.bin_file.comp.module.?; + const decl_index = mod.funcOwnerDeclIndex(self.func_index); + return switch (self.bin_file.tag) { + .elf => blk: { + const elf_file = self.bin_file.cast(link.File.Elf).?; + const atom_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, decl_index); + break :blk atom_index; + }, + else => return self.fail("TODO genSetReg load_symbol for {s}", .{@tagName(self.bin_file.tag)}), + }; +} + fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: Alignment) !u32 { self.stack_align = self.stack_align.max(abi_align); // TODO find a free slot instead of always appending @@ -1610,40 +1655,41 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const mcv = try self.resolveInst(ty_op.operand); - break :result try self.slicePtr(mcv); + const result = result: { + const src_mcv = try self.resolveInst(ty_op.operand); + if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; + + const dst_mcv = try self.allocRegOrMem(inst, true); + const dst_ty = self.typeOfIndex(inst); + try self.genCopy(dst_ty, dst_mcv, src_mcv); + break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn slicePtr(self: *Self, mcv: MCValue) !MCValue { - switch (mcv) { - .dead, .unreach, .none => unreachable, - .register => unreachable, // a slice doesn't fit in one register - .stack_offset => |off| { - return MCValue{ .stack_offset = off }; - }, - .memory => |addr| { - return MCValue{ .memory = addr }; - }, - else => return self.fail("TODO slicePtr {s}", .{@tagName(mcv)}), - } -} - fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_bits = 64; - const ptr_bytes = @divExact(ptr_bits, 8); - const mcv = try self.resolveInst(ty_op.operand); - switch (mcv) { - .dead, .unreach, .none => unreachable, - .register => unreachable, // a slice doesn't fit in one register + const src_mcv = try self.resolveInst(ty_op.operand); + switch (src_mcv) { .stack_offset => |off| { - break :result MCValue{ .stack_offset = off + ptr_bytes }; + const len_mcv: MCValue = .{ .stack_offset = off + 8 }; + if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result len_mcv; + + const dst_mcv = try self.allocRegOrMem(inst, true); + try self.genCopy(Type.usize, dst_mcv, len_mcv); + break :result dst_mcv; }, - else => return self.fail("TODO airSliceLen for {}", .{mcv}), + .register_pair => |pair| { + const len_mcv: MCValue = .{ .register = pair[1] }; + + if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result len_mcv; + + const dst_mcv = try self.allocRegOrMem(inst, true); + try self.genCopy(Type.usize, dst_mcv, len_mcv); + break :result dst_mcv; + }, + else => return self.fail("TODO airSliceLen for {}", .{src_mcv}), } }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1978,6 +2024,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_mcv: MCValue, ptr_ty: Type) InnerErro .register, .register_offset, .ptr_stack_offset, + .addr_symbol, => try self.genCopy(dst_ty, dst_mcv, ptr_mcv.deref()), .memory, @@ -2019,11 +2066,6 @@ fn store(self: *Self, pointer: MCValue, value: MCValue, ptr_ty: Type, value_ty: log.debug("storing {}:{} in {}:{}", .{ value, value_ty.fmt(mod), pointer, ptr_ty.fmt(mod) }); - if (value_ty.isSlice(mod)) { - // cheat a bit by loading in two parts - - } - switch (pointer) { .none => unreachable, .undef => unreachable, @@ -2192,7 +2234,11 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const dst_mcv = switch (src_mcv) { .register => |src_reg| dst: { - try self.register_manager.getReg(src_reg, null); + self.register_manager.getRegAssumeFree(src_reg, null); + break :dst src_mcv; + }, + .register_pair => |pair| dst: { + for (pair) |reg| self.register_manager.getRegAssumeFree(reg, null); break :dst src_mcv; }, else => return self.fail("TODO: airArg {s}", .{@tagName(src_mcv)}), @@ -3056,6 +3102,8 @@ fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigT /// Sets the value without any modifications to register allocation metadata or stack allocation metadata. fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { + const mod = self.bin_file.comp.module.?; + // There isn't anything to store if (dst_mcv == .none) return; @@ -3066,7 +3114,6 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { switch (dst_mcv) { .register => |reg| return self.genSetReg(ty, reg, src_mcv), - .register_pair => |pair| return self.genSetRegPair(ty, pair, src_mcv), .register_offset => |dst_reg_off| try self.genSetReg(ty, dst_reg_off.reg, switch (src_mcv) { .none, .unreach, @@ -3084,7 +3131,47 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { }), .stack_offset => |off| return self.genSetStack(ty, off, src_mcv), .memory => |addr| return self.genSetMem(ty, addr, src_mcv), - else => return self.fail("TODO: genCopy {s} with {s}", .{ @tagName(dst_mcv), @tagName(src_mcv) }), + .register_pair => |dst_regs| { + const src_info: ?struct { addr_reg: Register, addr_lock: RegisterLock } = switch (src_mcv) { + .register_pair, .memory, .indirect, .stack_offset => null, + .load_symbol => src: { + const src_addr_reg, const src_addr_lock = try self.allocReg(); + errdefer self.register_manager.unlockReg(src_addr_lock); + + try self.genSetReg(Type.usize, src_addr_reg, src_mcv.address()); + break :src .{ .addr_reg = src_addr_reg, .addr_lock = src_addr_lock }; + }, + .air_ref => |src_ref| return self.genCopy( + ty, + dst_mcv, + try self.resolveInst(src_ref), + ), + else => return self.fail("TODO implement genCopy for {s} of {}", .{ + @tagName(src_mcv), ty.fmt(mod), + }), + }; + defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock); + + switch (ty.zigTypeTag(mod)) { + .Optional => return, + else => {}, + } + + var part_disp: i32 = 0; + for (dst_regs, try self.splitType(ty), 0..) |dst_reg, dst_ty, part_i| { + try self.genSetReg(dst_ty, dst_reg, switch (src_mcv) { + .register_pair => |src_regs| .{ .register = src_regs[part_i] }, + .memory, .indirect, .stack_offset => src_mcv.address().offset(part_disp).deref(), + .load_symbol => .{ .indirect = .{ + .reg = src_info.?.addr_reg, + .off = part_disp, + } }, + else => unreachable, + }); + part_disp += @intCast(dst_ty.abiSize(mod)); + } + }, + else => return std.debug.panic("TODO: genCopy {s} with {s}", .{ @tagName(dst_mcv), @tagName(src_mcv) }), } } @@ -3168,14 +3255,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_mcv: MCValue) Inner try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = offset }); }, .load_symbol => |sym_off| { - const atom_index = atom: { - const decl_index = mod.funcOwnerDeclIndex(self.func_index); - - if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const atom_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, decl_index); - break :atom atom_index; - } else return self.fail("TODO genSetStack for {s}", .{@tagName(self.bin_file.tag)}); - }; + const atom_index = try self.symbolIndex(); // setup the src pointer _ = try self.addInst(.{ @@ -3443,16 +3523,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! .load_symbol => |sym_off| { assert(sym_off.off == 0); - const decl_index = mod.funcOwnerDeclIndex(self.func_index); + const atom_index = try self.symbolIndex(); - const atom_index = switch (self.bin_file.tag) { - .elf => blk: { - const elf_file = self.bin_file.cast(link.File.Elf).?; - const atom_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, decl_index); - break :blk atom_index; - }, - else => return self.fail("TODO genSetReg load_symbol for {s}", .{@tagName(self.bin_file.tag)}), - }; _ = try self.addInst(.{ .tag = .load_symbol, .data = .{ @@ -3485,27 +3557,23 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! }, }); }, - else => return self.fail("TODO: genSetReg {s}", .{@tagName(src_mcv)}), - } -} + .addr_symbol => |sym_off| { + assert(sym_off.off == 0); -fn genSetRegPair(self: *Self, ty: Type, pair: [2]Register, src_mcv: MCValue) InnerError!void { - const mod = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(ty.abiSize(mod)); + const atom_index = try self.symbolIndex(); - assert(abi_size > 8 and abi_size <= 16); // must fit only fit into two registers - - switch (src_mcv) { - .air_ref => |ref| return self.genSetRegPair(ty, pair, try self.resolveInst(ref)), - .load_symbol => |sym_off| { - _ = sym_off; - // return self.fail("TODO: genSetRegPair load_symbol", .{}); - // commented out just for testing. - - // plan here is to load the address into a temporary register and - // copy into the pair. + _ = try self.addInst(.{ + .tag = .load_symbol, + .data = .{ + .payload = try self.addExtra(Mir.LoadSymbolPayload{ + .register = reg.id(), + .atom_index = atom_index, + .sym_index = sym_off.sym, + }), + }, + }); }, - else => return self.fail("TODO: genSetRegPair {s}", .{@tagName(src_mcv)}), + else => return self.fail("TODO: genSetReg {s}", .{@tagName(src_mcv)}), } } diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 6a0b5a0559..8579f33b38 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -398,7 +398,7 @@ fn mirPsuedo(emit: *Emit, inst: Mir.Inst.Index) !void { .j => { const offset = @as(i64, @intCast(emit.code_offset_mapping.get(data.inst).?)) - @as(i64, @intCast(emit.code.items.len)); - try emit.writeInstruction(Instruction.jal(.s0, @intCast(offset))); + try emit.writeInstruction(Instruction.jal(.zero, @intCast(offset))); }, else => unreachable, @@ -443,27 +443,40 @@ fn mirLoadSymbol(emit: *Emit, inst: Mir.Inst.Index) !void { const data = emit.mir.extraData(Mir.LoadSymbolPayload, payload).data; const reg = @as(Register, @enumFromInt(data.register)); - const end_offset = @as(u32, @intCast(emit.code.items.len)); + const start_offset = @as(u32, @intCast(emit.code.items.len)); try emit.writeInstruction(Instruction.lui(reg, 0)); - try emit.writeInstruction(Instruction.lw(reg, 0, reg)); switch (emit.bin_file.tag) { .elf => { const elf_file = emit.bin_file.cast(link.File.Elf).?; const atom_ptr = elf_file.symbol(data.atom_index).atom(elf_file).?; + const sym_index = elf_file.zigObjectPtr().?.symbol(data.sym_index); + const sym = elf_file.symbol(sym_index); - const hi_r_type = @intFromEnum(std.elf.R_RISCV.HI20); + var hi_r_type: u32 = @intFromEnum(std.elf.R_RISCV.HI20); + var lo_r_type: u32 = @intFromEnum(std.elf.R_RISCV.LO12_I); + + if (sym.flags.needs_zig_got) { + _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); + + hi_r_type = Elf.R_ZIG_GOT_HI20; + lo_r_type = Elf.R_ZIG_GOT_LO12; + + // we need to deref once if we are getting from zig_got, as itll + // reloc an address of the address in the got. + try emit.writeInstruction(Instruction.ld(reg, 0, reg)); + } else { + try emit.writeInstruction(Instruction.addi(reg, reg, 0)); + } try atom_ptr.addReloc(elf_file, .{ - .r_offset = end_offset, + .r_offset = start_offset, .r_info = (@as(u64, @intCast(data.sym_index)) << 32) | hi_r_type, .r_addend = 0, }); - const lo_r_type = @intFromEnum(std.elf.R_RISCV.LO12_I); - try atom_ptr.addReloc(elf_file, .{ - .r_offset = end_offset + 4, + .r_offset = start_offset + 4, .r_info = (@as(u64, @intCast(data.sym_index)) << 32) | lo_r_type, .r_addend = 0, }); @@ -587,6 +600,7 @@ const bits = @import("bits.zig"); const abi = @import("abi.zig"); const link = @import("../../link.zig"); const Module = @import("../../Module.zig"); +const Elf = @import("../../link/Elf.zig"); const ErrorMsg = Module.ErrorMsg; const assert = std.debug.assert; const Instruction = bits.Instruction; diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 6ba0930232..1b8c8bb3d3 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -118,7 +118,9 @@ pub const Inst = struct { /// function epilogue psuedo_epilogue, - // TODO: add description + /// Loads the address of a value that hasn't yet been allocated in memory. + /// + /// uses the Mir.LoadSymbolPayload payload. load_symbol, // TODO: add description diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index 198ff437f8..e586d297ae 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -5,7 +5,7 @@ const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; const Module = @import("../../Module.zig"); -pub const Class = enum { memory, byval, integer, double_integer, fields }; +pub const Class = enum { memory, byval, integer, double_integer, fields, none }; pub fn classifyType(ty: Type, mod: *Module) Class { const target = mod.getTarget(); @@ -91,6 +91,37 @@ pub fn classifyType(ty: Type, mod: *Module) Class { } } +/// There are a maximum of 8 possible return slots. Returned values are in +/// the beginning of the array; unused slots are filled with .none. +pub fn classifySystemV(ty: Type, mod: *Module) [8]Class { + const memory_class = [_]Class{ + .memory, .none, .none, .none, + .none, .none, .none, .none, + }; + var result = [1]Class{.none} ** 8; + switch (ty.zigTypeTag(mod)) { + .Pointer => switch (ty.ptrSize(mod)) { + .Slice => { + result[0] = .integer; + result[1] = .integer; + return result; + }, + else => { + result[0] = .integer; + return result; + }, + }, + .Optional => { + if (ty.isPtrLikeOptional(mod)) { + result[0] = .integer; + return result; + } + return memory_class; + }, + else => return result, + } +} + pub const callee_preserved_regs = [_]Register{ .s0, .s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11, }; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 04a5af8bb0..5b42a701ff 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -11132,6 +11132,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu } return o.builder.structType(.normal, types[0..types_len]); }, + .none => unreachable, } }, // TODO investigate C ABI for other architectures @@ -11389,6 +11390,7 @@ const ParamTypeIterator = struct { it.llvm_index += it.types_len - 1; return .multiple_llvm_types; }, + .none => unreachable, } }, // TODO investigate C ABI for other architectures diff --git a/src/link/Elf.zig b/src/link/Elf.zig index a1e23945ee..8a3192f93e 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -6409,6 +6409,8 @@ const RelaSectionTable = std.AutoArrayHashMapUnmanaged(u32, RelaSection); // TODO: add comptime check we don't clobber any reloc for any ISA pub const R_ZIG_GOT32: u32 = 0xff00; pub const R_ZIG_GOTPCREL: u32 = 0xff01; +pub const R_ZIG_GOT_HI20: u32 = 0xff02; +pub const R_ZIG_GOT_LO12: u32 = 0xff03; fn defaultEntrySymbolName(cpu_arch: std.Target.Cpu.Arch) []const u8 { return switch (cpu_arch) { diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig index 90056cc4c5..239186ffaa 100644 --- a/src/link/Elf/Atom.zig +++ b/src/link/Elf/Atom.zig @@ -2025,7 +2025,15 @@ const riscv = struct { .SUB32, => {}, - else => try atom.reportUnhandledRelocError(rel, elf_file), + else => |x| switch (@intFromEnum(x)) { + Elf.R_ZIG_GOT_HI20, + Elf.R_ZIG_GOT_LO12, + => { + assert(symbol.flags.has_zig_got); + }, + + else => try atom.reportUnhandledRelocError(rel, elf_file), + }, } } @@ -2046,7 +2054,6 @@ const riscv = struct { const P, const A, const S, const GOT, const G, const TP, const DTP, const ZIG_GOT = args; _ = TP; _ = DTP; - _ = ZIG_GOT; switch (r_type) { .NONE => unreachable, @@ -2136,7 +2143,22 @@ const riscv = struct { } }, - else => try atom.reportUnhandledRelocError(rel, elf_file), + else => |x| switch (@intFromEnum(x)) { + // Zig custom relocations + Elf.R_ZIG_GOT_HI20 => { + assert(target.flags.has_zig_got); + const disp: u32 = @bitCast(math.cast(i32, G + ZIG_GOT + A) orelse return error.Overflow); + riscv_util.writeInstU(code[r_offset..][0..4], disp); + }, + + Elf.R_ZIG_GOT_LO12 => { + assert(target.flags.has_zig_got); + const value: u32 = @bitCast(math.cast(i32, G + ZIG_GOT + A) orelse return error.Overflow); + riscv_util.writeInstI(code[r_offset..][0..4], value); + }, + + else => try atom.reportUnhandledRelocError(rel, elf_file), + }, } } From 26ce82d98efce4847307c0b73d1765be7a104256 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Fri, 29 Mar 2024 06:59:32 -0700 Subject: [PATCH 28/44] riscv: correctly derefence `load_symbol` in genSetReg --- src/arch/riscv64/CodeGen.zig | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index cc5a731c28..1ec0d0ab6e 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -3535,6 +3535,27 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! }), }, }); + + const tag: Mir.Inst.Tag = switch (abi_size) { + 1 => .lb, + 2 => .lh, + 4 => .lw, + 8 => .ld, + else => return self.fail("TODO: genSetReg for size {d}", .{abi_size}), + }; + + _ = try self.addInst(.{ + .tag = tag, + .data = .{ + .i_type = .{ + .rd = reg, + .rs1 = reg, + .imm12 = 0, + }, + }, + }); + + unreachable; }, .air_ref => |ref| try self.genSetReg(ty, reg, try self.resolveInst(ref)), .indirect => |reg_off| { From ece70e08a09e24ac27f354579ee70446563cc4bf Mon Sep 17 00:00:00 2001 From: David Rubin Date: Fri, 29 Mar 2024 07:30:04 -0700 Subject: [PATCH 29/44] riscv: pass optionals by register_pair for resolveCallingConventionValues --- src/arch/riscv64/CodeGen.zig | 9 +-------- src/arch/riscv64/abi.zig | 9 +++------ 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 1ec0d0ab6e..0cd2d2759d 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -830,7 +830,7 @@ fn splitType(self: *Self, ty: Type) ![2]Type { else => break, }; } else if (parts[0].abiSize(mod) + parts[1].abiSize(mod) == ty.abiSize(mod)) return parts; - return std.debug.panic("TODO implement splitType for {}", .{ty.fmt(mod)}); + return self.fail("TODO implement splitType for {}", .{ty.fmt(mod)}); } fn symbolIndex(self: *Self) !u32 { @@ -3152,11 +3152,6 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { }; defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock); - switch (ty.zigTypeTag(mod)) { - .Optional => return, - else => {}, - } - var part_disp: i32 = 0; for (dst_regs, try self.splitType(ty), 0..) |dst_reg, dst_ty, part_i| { try self.genSetReg(dst_ty, dst_reg, switch (src_mcv) { @@ -3554,8 +3549,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! }, }, }); - - unreachable; }, .air_ref => |ref| try self.genSetReg(ty, reg, try self.resolveInst(ref)), .indirect => |reg_off| { diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index e586d297ae..90aef62f09 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -94,10 +94,6 @@ pub fn classifyType(ty: Type, mod: *Module) Class { /// There are a maximum of 8 possible return slots. Returned values are in /// the beginning of the array; unused slots are filled with .none. pub fn classifySystemV(ty: Type, mod: *Module) [8]Class { - const memory_class = [_]Class{ - .memory, .none, .none, .none, - .none, .none, .none, .none, - }; var result = [1]Class{.none} ** 8; switch (ty.zigTypeTag(mod)) { .Pointer => switch (ty.ptrSize(mod)) { @@ -113,10 +109,11 @@ pub fn classifySystemV(ty: Type, mod: *Module) [8]Class { }, .Optional => { if (ty.isPtrLikeOptional(mod)) { - result[0] = .integer; return result; } - return memory_class; + result[0] = .integer; + result[1] = .integer; + return result; }, else => return result, } From 4ce85f930e82c987c0759f528e4876fb45286389 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Fri, 29 Mar 2024 07:51:56 -0700 Subject: [PATCH 30/44] riscv: implement `structFieldPtr` and `retLoad` --- src/arch/riscv64/CodeGen.zig | 57 ++++++++++++++++++++++++------------ 1 file changed, 38 insertions(+), 19 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 0cd2d2759d..256d2efbfd 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -2114,23 +2114,39 @@ fn store(self: *Self, pointer: MCValue, value: MCValue, ptr_ty: Type, value_ty: fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; - const result = try self.structFieldPtr(inst, extra.struct_operand, ty_pl.ty, extra.field_index); + const result = try self.structFieldPtr(inst, extra.struct_operand, extra.field_index); return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); } fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result = try self.structFieldPtr(inst, ty_op.operand, ty_op.ty, index); + const result = try self.structFieldPtr(inst, ty_op.operand, index); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Air.Inst.Ref, index: u32) !MCValue { - _ = inst; - _ = operand; - _ = ty; - _ = index; +fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { + const mod = self.bin_file.comp.module.?; + const ptr_field_ty = self.typeOfIndex(inst); + const ptr_container_ty = self.typeOf(operand); + const ptr_container_ty_info = ptr_container_ty.ptrInfo(mod); + const container_ty = ptr_container_ty.childType(mod); - return self.fail("TODO: structFieldPtr", .{}); + const field_offset: i32 = if (mod.typeToPackedStruct(container_ty)) |struct_obj| + if (ptr_field_ty.ptrInfo(mod).packed_offset.host_size == 0) + @divExact(mod.structPackedFieldBitOffset(struct_obj, index) + + ptr_container_ty_info.packed_offset.bit_offset, 8) + else + 0 + else + @intCast(container_ty.structFieldOffset(index, mod)); + + const src_mcv = try self.resolveInst(operand); + const dst_mcv = if (switch (src_mcv) { + .immediate, .ptr_stack_offset => true, + .register, .register_offset => self.reuseOperand(inst, operand, 0, src_mcv), + else => false, + }) src_mcv else try self.copyToNewRegister(inst, src_mcv); + return dst_mcv.offset(field_offset); } fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { @@ -2402,6 +2418,8 @@ fn genCall( } fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void { + const mod = self.bin_file.comp.module.?; + if (safety) { // safe } else { @@ -2416,17 +2434,15 @@ fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void { .data = .{ .nop = {} }, }); - try self.ret(operand); + const ret_ty = self.fn_type.fnReturnType(mod); + try self.genCopy(ret_ty, self.ret_mcv, operand); + + try self.ret(); return self.finishAir(inst, .dead, .{ un_op, .none, .none }); } -fn ret(self: *Self, mcv: MCValue) !void { - const mod = self.bin_file.comp.module.?; - - const ret_ty = self.fn_type.fnReturnType(mod); - try self.genCopy(ret_ty, self.ret_mcv, mcv); - +fn ret(self: *Self) !void { _ = try self.addInst(.{ .tag = .psuedo_epilogue, .data = .{ .nop = {} }, @@ -2444,9 +2460,13 @@ fn ret(self: *Self, mcv: MCValue) !void { fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ptr = try self.resolveInst(un_op); - _ = ptr; - return self.fail("TODO implement airRetLoad for {}", .{self.target.cpu.arch}); - //return self.finishAir(inst, .dead, .{ un_op, .none, .none }); + const ptr_ty = self.typeOf(un_op); + + try self.load(self.ret_mcv, ptr, ptr_ty); + + try self.ret(); + + return self.finishAir(inst, .dead, .{ un_op, .none, .none }); } fn airCmp(self: *Self, inst: Air.Inst.Index) !void { @@ -3790,7 +3810,6 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { return MCValue{ .none = {} }; const inst_index = inst.toIndex() orelse return self.genTypedValue((try self.air.value(inst, mod)).?); - return self.getResolvedInstValue(inst_index); } From c0629c3539f1c944636b6cc9cb531113759b089a Mon Sep 17 00:00:00 2001 From: David Rubin Date: Fri, 29 Mar 2024 08:19:52 -0700 Subject: [PATCH 31/44] riscv: implement `airNot` --- src/arch/riscv64/CodeGen.zig | 36 +++++++++++++++++++++++++++++++++++- src/arch/riscv64/Emit.zig | 2 ++ src/arch/riscv64/Mir.zig | 5 ++++- 3 files changed, 41 insertions(+), 2 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 256d2efbfd..6625b6da7b 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1036,7 +1036,41 @@ fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement NOT for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.comp.module.?; + + const operand = try self.resolveInst(ty_op.operand); + const ty = self.typeOf(ty_op.operand); + + switch (ty.zigTypeTag(mod)) { + .Bool => { + const operand_reg = blk: { + if (operand == .register) break :blk operand.register; + break :blk try self.copyToTmpRegister(ty, operand); + }; + + const dst_reg: Register = + if (self.reuseOperand(inst, ty_op.operand, 0, operand) and operand == .register) + operand.register + else + try self.register_manager.allocReg(inst, gp); + + _ = try self.addInst(.{ + .tag = .not, + .data = .{ + .rr = .{ + .rs = operand_reg, + .rd = dst_reg, + }, + }, + }); + + break :result .{ .register = dst_reg }; + }, + .Int => return self.fail("TODO: airNot ints", .{}), + else => unreachable, + } + }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 8579f33b38..8161e4a99b 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -83,6 +83,7 @@ pub fn emitMir( .j => try emit.mirPsuedo(inst), .mv => try emit.mirRR(inst), + .not => try emit.mirRR(inst), .nop => try emit.mirNop(inst), .ret => try emit.mirNop(inst), @@ -414,6 +415,7 @@ fn mirRR(emit: *Emit, inst: Mir.Inst.Index) !void { switch (tag) { .mv => try emit.writeInstruction(Instruction.addi(rd, rs, 0)), + .not => try emit.writeInstruction(Instruction.xori(rd, rs, 1)), else => unreachable, } } diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 1b8c8bb3d3..d8c8775a6d 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -76,7 +76,7 @@ pub const Inst = struct { /// Immediate `==`, uses i_type cmp_imm_eq, - /// Immediate `<=`, uses i_typei + /// Immediate `<=`, uses i_type cmp_imm_lte, /// Branch if equal, Uses b_type @@ -84,6 +84,9 @@ pub const Inst = struct { /// Branch if not equal, Uses b_type bne, + /// Boolean NOT, Uses rr payload + not, + nop, ret, From 8ac239ebcea0eacfd99680d51489371e28266ec3 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sat, 30 Mar 2024 05:49:16 -0700 Subject: [PATCH 32/44] riscv: add enough components to get a test runner working --- lib/compiler/test_runner.zig | 20 +- lib/std/start.zig | 18 +- src/arch/riscv64/CodeGen.zig | 994 +++++++++++++++++++++++++---------- src/arch/riscv64/Emit.zig | 22 +- src/arch/riscv64/Mir.zig | 6 + src/arch/riscv64/bits.zig | 8 +- 6 files changed, 783 insertions(+), 285 deletions(-) diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index 08a2e5721b..41dbbf0986 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -12,9 +12,9 @@ var cmdline_buffer: [4096]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&cmdline_buffer); pub fn main() void { - if (builtin.zig_backend == .stage2_aarch64 or - builtin.zig_backend == .stage2_riscv64) - { + if (builtin.zig_backend == .stage2_riscv64) return mainExtraSimple() catch @panic("test failure"); + + if (builtin.zig_backend == .stage2_aarch64) { return mainSimple() catch @panic("test failure"); } @@ -249,3 +249,17 @@ pub fn mainSimple() anyerror!void { if (failed != 0) std.process.exit(1); } } + +pub fn mainExtraSimple() !void { + var pass_count: u8 = 0; + + for (builtin.test_functions) |test_fn| { + test_fn.func() catch |err| { + if (err != error.SkipZigTest) { + @panic(test_fn.name); + } + continue; + }; + pass_count += 1; + } +} diff --git a/lib/std/start.zig b/lib/std/start.zig index 68ad3f67ac..0228ffdc2b 100644 --- a/lib/std/start.zig +++ b/lib/std/start.zig @@ -20,7 +20,6 @@ pub const simplified_logic = builtin.zig_backend == .stage2_x86 or builtin.zig_backend == .stage2_aarch64 or builtin.zig_backend == .stage2_arm or - builtin.zig_backend == .stage2_riscv64 or builtin.zig_backend == .stage2_sparc64 or builtin.cpu.arch == .spirv32 or builtin.cpu.arch == .spirv64; @@ -61,6 +60,10 @@ comptime { } else if (@typeInfo(@TypeOf(root.main)).Fn.calling_convention != .C) { @export(main, .{ .name = "main" }); } + } else if (native_arch.isRISCV()) { + if (!@hasDecl(root, "_start")) { + @export(riscv_start, .{ .name = "_start" }); + } } else if (native_os == .windows) { if (!@hasDecl(root, "WinMain") and !@hasDecl(root, "WinMainCRTStartup") and !@hasDecl(root, "wWinMain") and !@hasDecl(root, "wWinMainCRTStartup")) @@ -151,14 +154,6 @@ fn exit2(code: usize) noreturn { : "memory", "cc" ); }, - .riscv64 => { - asm volatile ("ecall" - : - : [number] "{a7}" (94), - [arg1] "{a0}" (code), - : "rcx", "r11", "memory" - ); - }, .sparc64 => { asm volatile ("ta 0x6d" : @@ -212,6 +207,11 @@ fn wasi_start() callconv(.C) void { } } +fn riscv_start() callconv(.C) noreturn { + const code = @call(.always_inline, callMain, .{}); + std.process.exit(code); +} + fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) callconv(.C) usize { uefi.handle = handle; uefi.system_table = system_table; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 6625b6da7b..bbb672649e 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -167,9 +167,7 @@ const MCValue = union(enum) { .immediate, .memory, .ptr_stack_offset, - .indirect, .undef, - .load_symbol, .addr_symbol, .air_ref, => false, @@ -178,6 +176,8 @@ const MCValue = union(enum) { .register_pair, .register_offset, .stack_offset, + .load_symbol, + .indirect, => true, }; } @@ -265,7 +265,7 @@ const Branch = struct { }; const StackAllocation = struct { - inst: Air.Inst.Index, + inst: ?Air.Inst.Index, /// TODO: make the size inferred from the bits of the inst size: u32, }; @@ -410,6 +410,8 @@ pub fn generate( // need to at least decrease the sp by -8 .stack_size = @max(8, mem.alignForward(u32, function.max_end_stack, 16)), .save_reg_list = save_reg_list, + .output_mode = lf.comp.config.output_mode, + .link_mode = lf.comp.config.link_mode, }; defer emit.deinit(); @@ -633,7 +635,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .mul_add => try self.airMulAdd(inst), .addrspace_cast => return self.fail("TODO: addrspace_cast", .{}), - .@"try" => return self.fail("TODO: try", .{}), + .@"try" => try self.airTry(inst), .try_ptr => return self.fail("TODO: try_ptr", .{}), .dbg_var_ptr, @@ -846,7 +848,7 @@ fn symbolIndex(self: *Self) !u32 { }; } -fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: Alignment) !u32 { +fn allocMem(self: *Self, inst: ?Air.Inst.Index, abi_size: u32, abi_align: Alignment) !u32 { self.stack_align = self.stack_align.max(abi_align); // TODO find a free slot instead of always appending const offset: u32 = @intCast(abi_align.forward(self.next_stack_offset)); @@ -905,6 +907,36 @@ fn allocReg(self: *Self) !struct { Register, RegisterLock } { return .{ reg, lock }; } +fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Register { + log.debug("elemOffset: {}", .{index}); + const reg: Register = blk: { + switch (index) { + .immediate => |imm| { + // Optimisation: if index MCValue is an immediate, we can multiply in `comptime` + // and set the register directly to the scaled offset as an immediate. + const reg = try self.register_manager.allocReg(null, gp); + try self.genSetReg(index_ty, reg, .{ .immediate = imm * elem_size }); + break :blk reg; + }, + else => { + const reg = try self.copyToTmpRegister(index_ty, index); + const lock = self.register_manager.lockRegAssumeUnused(reg); + defer self.register_manager.unlockReg(lock); + + try self.binOpMir( + .mul, + null, + index_ty, + .{ .register = reg }, + .{ .immediate = elem_size }, + ); + break :blk reg; + }, + } + }; + return reg; +} + pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { const mod = self.bin_file.comp.module.?; const elem_ty = self.typeOfIndex(inst); @@ -1104,6 +1136,22 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } +fn supportImmediate(tag: Air.Inst.Tag) bool { + return switch (tag) { + .add, + .sub, + .cmp_eq, + .cmp_neq, + .cmp_gt, + .cmp_gte, + .cmp_lt, + .cmp_lte, + => true, + + else => false, + }; +} + /// For all your binary operation needs, this function will generate /// the corresponding Mir instruction(s). Returns the location of the /// result. @@ -1132,6 +1180,7 @@ fn binOp( // Arithmetic operations on integers and floats .add, .sub, + .mul, .cmp_eq, .cmp_neq, .cmp_gt, @@ -1146,7 +1195,7 @@ fn binOp( assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { - if (rhs == .immediate) { + if (rhs == .immediate and supportImmediate(tag)) { return self.binOpImm(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); } return self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); @@ -1154,9 +1203,10 @@ fn binOp( return self.fail("TODO binary operations on int with bits > 64", .{}); } }, - else => unreachable, + else => |x| return self.fail("TOOD: binOp {s}", .{@tagName(x)}), } }, + .ptr_add, .ptr_sub, => { @@ -1178,7 +1228,24 @@ fn binOp( return try self.binOpRegister(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); } else { - return self.fail("TODO ptr_add with elem_size > 1", .{}); + const offset = try self.binOp( + .mul, + null, + rhs, + .{ .immediate = elem_size }, + Type.usize, + Type.usize, + ); + + const addr = try self.binOp( + tag, + null, + lhs, + offset, + Type.manyptr_u8, + Type.usize, + ); + return addr; } }, else => unreachable, @@ -1206,7 +1273,7 @@ fn binOp( else => unreachable, } }, - else => unreachable, + else => return self.fail("TODO binOp {}", .{tag}), } } /// Don't call this function directly. Use binOp instead. @@ -1252,6 +1319,7 @@ fn binOpRegister( const mir_tag: Mir.Inst.Tag = switch (tag) { .add => .add, .sub => .sub, + .mul => .mul, .cmp_eq => .cmp_eq, .cmp_neq => .cmp_neq, .cmp_gt => .cmp_gt, @@ -1314,7 +1382,9 @@ fn binOpImm( .shr => .srli, .cmp_gte => .cmp_imm_gte, .cmp_eq => .cmp_imm_eq, + .cmp_neq => .cmp_imm_neq, .cmp_lte => .cmp_imm_lte, + .cmp_lt => .cmp_imm_lt, .add => .addi, .sub => .addiw, else => return self.fail("TODO: binOpImm {s}", .{@tagName(tag)}), @@ -1326,7 +1396,9 @@ fn binOpImm( .srli, .addi, .cmp_imm_eq, + .cmp_imm_neq, .cmp_imm_lte, + .cmp_imm_lt, => { _ = try self.addInst(.{ .tag = mir_tag, @@ -1369,6 +1441,40 @@ fn binOpImm( return MCValue{ .register = dest_reg }; } +fn binOpMir( + self: *Self, + mir_tag: Mir.Inst.Tag, + maybe_inst: ?Air.Inst.Index, + ty: Type, + dst_mcv: MCValue, + src_mcv: MCValue, +) !void { + const mod = self.bin_file.comp.module.?; + const abi_size: u32 = @intCast(ty.abiSize(mod)); + + _ = abi_size; + _ = maybe_inst; + + switch (dst_mcv) { + .register => |dst_reg| { + const src_reg = try self.copyToTmpRegister(ty, src_mcv); + + _ = try self.addInst(.{ + .tag = mir_tag, + .data = .{ + .r_type = .{ + .rd = dst_reg, + .rs1 = dst_reg, + .rs2 = src_reg, + }, + }, + }); + }, + + else => return self.fail("TODO: binOpMir {s}", .{@tagName(dst_mcv)}), + } +} + fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -1520,8 +1626,101 @@ fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { - _ = inst; - return self.fail("TODO implement airMulWithOverflow for {}", .{self.target.cpu.arch}); + //const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; + const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.comp.module.?; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const lhs = try self.resolveInst(extra.lhs); + const rhs = try self.resolveInst(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); + + switch (lhs_ty.zigTypeTag(mod)) { + else => |x| return self.fail("TODO: airMulWithOverflow {s}", .{@tagName(x)}), + .Int => { + assert(lhs_ty.eql(rhs_ty, mod)); + const int_info = lhs_ty.intInfo(mod); + switch (int_info.bits) { + 1...32 => { + if (self.hasFeature(.m)) { + const dest = try self.binOp(.mul, null, lhs, rhs, lhs_ty, rhs_ty); + + const add_result_lock = self.register_manager.lockRegAssumeUnused(dest.register); + defer self.register_manager.unlockReg(add_result_lock); + + const tuple_ty = self.typeOfIndex(inst); + + // TODO: optimization, set this to true. needs the other struct access stuff to support + // accessing registers. + const result_mcv = try self.allocRegOrMem(inst, false); + const offset = result_mcv.stack_offset; + + const result_offset = tuple_ty.structFieldOffset(0, mod) + offset; + + try self.genSetStack(lhs_ty, @intCast(result_offset), dest); + + if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { + if (int_info.signedness == .unsigned) { + switch (int_info.bits) { + 1...8 => { + const max_val = std.math.pow(u16, 2, int_info.bits) - 1; + + const overflow_reg, const overflow_lock = try self.allocReg(); + defer self.register_manager.unlockReg(overflow_lock); + + const add_reg, const add_lock = blk: { + if (dest == .register) break :blk .{ dest.register, null }; + + const add_reg, const add_lock = try self.allocReg(); + try self.genSetReg(lhs_ty, add_reg, dest); + break :blk .{ add_reg, add_lock }; + }; + defer if (add_lock) |lock| self.register_manager.unlockReg(lock); + + _ = try self.addInst(.{ + .tag = .andi, + .data = .{ .i_type = .{ + .rd = overflow_reg, + .rs1 = add_reg, + .imm12 = @intCast(max_val), + } }, + }); + + const overflow_mcv = try self.binOp( + .cmp_neq, + null, + .{ .register = overflow_reg }, + .{ .register = add_reg }, + lhs_ty, + lhs_ty, + ); + + const overflow_offset = tuple_ty.structFieldOffset(1, mod) + offset; + try self.genSetStack(Type.u1, @intCast(overflow_offset), overflow_mcv); + + break :result result_mcv; + }, + + else => return self.fail("TODO: airMulWithOverflow check for size {d}", .{int_info.bits}), + } + } else { + return self.fail("TODO: airMulWithOverflow calculate carry for signed addition", .{}); + } + } else { + return self.fail("TODO: airMulWithOverflow with < 8 bits or non-pow of 2", .{}); + } + } else { + return self.fail("TODO: emulate mul for targets without M feature", .{}); + } + }, + else => return self.fail("TODO: airMulWithOverflow larger than 32-bit mul", .{}), + } + }, + } + }; + + return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { @@ -1610,16 +1809,98 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement unwrap error union error for {}", .{self.target.cpu.arch}); + const mod = self.bin_file.comp.module.?; + const err_union_ty = self.typeOf(ty_op.operand); + const err_ty = err_union_ty.errorUnionSet(mod); + const payload_ty = err_union_ty.errorUnionPayload(mod); + const operand = try self.resolveInst(ty_op.operand); + + const result: MCValue = result: { + if (err_ty.errorSetIsEmpty(mod)) { + break :result .{ .immediate = 0 }; + } + + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + break :result operand; + } + + const err_off: u32 = @intCast(errUnionErrorOffset(payload_ty, mod)); + + switch (operand) { + .register => |reg| { + const eu_lock = self.register_manager.lockReg(reg); + defer if (eu_lock) |lock| self.register_manager.unlockReg(lock); + + var result = try self.copyToNewRegister(inst, operand); + + if (err_off > 0) { + result = try self.binOp( + .shr, + null, + result, + .{ .immediate = @as(u6, @intCast(err_off * 8)) }, + err_union_ty, + Type.u8, + ); + } + break :result result; + }, + else => return self.fail("TODO implement unwrap_err_err for {}", .{operand}), + } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement unwrap error union payload for {}", .{self.target.cpu.arch}); + const operand_ty = self.typeOf(ty_op.operand); + const operand = try self.resolveInst(ty_op.operand); + const result = try self.genUnwrapErrUnionPayloadMir(operand_ty, operand); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } +fn genUnwrapErrUnionPayloadMir( + self: *Self, + err_union_ty: Type, + err_union: MCValue, +) !MCValue { + const mod = self.bin_file.comp.module.?; + + const payload_ty = err_union_ty.errorUnionPayload(mod); + + const result: MCValue = result: { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; + + const payload_off: u32 = @intCast(errUnionPayloadOffset(payload_ty, mod)); + switch (err_union) { + .stack_offset => |off| break :result .{ .stack_offset = off + payload_off }, + .register => |reg| { + const eu_lock = self.register_manager.lockReg(reg); + defer if (eu_lock) |lock| self.register_manager.unlockReg(lock); + + var result: MCValue = .{ .register = try self.copyToTmpRegister(err_union_ty, err_union) }; + + if (payload_off > 0) { + result = try self.binOp( + .shr, + null, + result, + .{ .immediate = @as(u6, @intCast(payload_off * 8)) }, + err_union_ty, + Type.u8, + ); + } + + break :result result; + }, + else => return self.fail("TODO implement genUnwrapErrUnionPayloadMir for {}", .{err_union}), + } + }; + + return result; +} + // *(E!T) -> E fn airUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -1682,11 +1963,108 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.comp.module.?; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement wrap errunion error for {}", .{self.target.cpu.arch}); + + const eu_ty = ty_op.ty.toType(); + const pl_ty = eu_ty.errorUnionPayload(mod); + const err_ty = eu_ty.errorUnionSet(mod); + + const result: MCValue = result: { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result try self.resolveInst(ty_op.operand); + + const stack_off = try self.allocMem(null, @intCast(eu_ty.abiSize(mod)), eu_ty.abiAlignment(mod)); + const pl_off: u32 = @intCast(errUnionPayloadOffset(pl_ty, mod)); + const err_off: u32 = @intCast(errUnionErrorOffset(pl_ty, mod)); + try self.genSetStack(pl_ty, stack_off + pl_off, .undef); + const operand = try self.resolveInst(ty_op.operand); + try self.genSetStack(err_ty, stack_off + err_off, operand); + break :result .{ .stack_offset = stack_off }; + }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } +fn airTry(self: *Self, inst: Air.Inst.Index) !void { + const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; + const extra = self.air.extraData(Air.Try, pl_op.payload); + const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]); + const operand_ty = self.typeOf(pl_op.operand); + const result = try self.genTry(inst, pl_op.operand, body, operand_ty, false); + return self.finishAir(inst, result, .{ .none, .none, .none }); +} + +fn genTry( + self: *Self, + inst: Air.Inst.Index, + operand: Air.Inst.Ref, + body: []const Air.Inst.Index, + operand_ty: Type, + operand_is_ptr: bool, +) !MCValue { + const liveness_condbr = self.liveness.getCondBr(inst); + + _ = operand_is_ptr; + + const operand_mcv = try self.resolveInst(operand); + const is_err_mcv = try self.isErr(null, operand_ty, operand_mcv); + + const cond_reg = try self.register_manager.allocReg(inst, gp); + const cond_reg_lock = self.register_manager.lockRegAssumeUnused(cond_reg); + defer self.register_manager.unlockReg(cond_reg_lock); + + // A branch to the false section. Uses beq. 1 is the default "true" state. + const reloc = try self.condBr(Type.anyerror, is_err_mcv, cond_reg); + + if (self.liveness.operandDies(inst, 0)) { + if (operand.toIndex()) |op_inst| self.processDeath(op_inst); + } + + // Save state + const parent_next_stack_offset = self.next_stack_offset; + const parent_free_registers = self.register_manager.free_registers; + var parent_stack = try self.stack.clone(self.gpa); + defer parent_stack.deinit(self.gpa); + const parent_registers = self.register_manager.registers; + + try self.branch_stack.append(.{}); + errdefer { + _ = self.branch_stack.pop(); + } + + try self.ensureProcessDeathCapacity(liveness_condbr.else_deaths.len); + for (liveness_condbr.else_deaths) |op| { + self.processDeath(op); + } + + try self.genBody(body); + + // Restore state + var saved_then_branch = self.branch_stack.pop(); + defer saved_then_branch.deinit(self.gpa); + + self.register_manager.registers = parent_registers; + + self.stack.deinit(self.gpa); + self.stack = parent_stack; + parent_stack = .{}; + + self.next_stack_offset = parent_next_stack_offset; + self.register_manager.free_registers = parent_free_registers; + + try self.performReloc(reloc, @intCast(self.mir_instructions.len)); + + try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len); + for (liveness_condbr.then_deaths) |op| { + self.processDeath(op); + } + + const result = if (self.liveness.isUnused(inst)) + .unreach + else + try self.genUnwrapErrUnionPayloadMir(operand_ty, operand_mcv); + return result; +} + fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result = result: { @@ -1742,9 +2120,36 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.comp.module.?; const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice_elem_val for {}", .{self.target.cpu.arch}); + + if (!is_volatile and self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); + const result: MCValue = result: { + const slice_mcv = try self.resolveInst(bin_op.lhs); + const index_mcv = try self.resolveInst(bin_op.rhs); + + const slice_ty = self.typeOf(bin_op.lhs); + + const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); + + const index_lock: ?RegisterLock = if (index_mcv == .register) + self.register_manager.lockRegAssumeUnused(index_mcv.register) + else + null; + defer if (index_lock) |reg| self.register_manager.unlockReg(reg); + + const base_mcv: MCValue = switch (slice_mcv) { + .stack_offset => |off| .{ .register = try self.copyToTmpRegister(slice_ptr_field_type, .{ .stack_offset = off }) }, + else => return self.fail("TODO slice_elem_val when slice is {}", .{slice_mcv}), + }; + + const dest = try self.allocRegOrMem(inst, true); + const addr = try self.binOp(.ptr_add, null, base_mcv, index_mcv, slice_ptr_field_type, Type.usize); + try self.load(dest, addr, slice_ptr_field_type); + + break :result dest; + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -1763,21 +2168,44 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const array_mcv = try self.resolveInst(bin_op.lhs); const index_mcv = try self.resolveInst(bin_op.rhs); + const index_ty = self.typeOf(bin_op.rhs); const elem_ty = array_ty.childType(mod); const elem_abi_size = elem_ty.abiSize(mod); + const addr_reg, const addr_reg_lock = try self.allocReg(); + defer self.register_manager.unlockReg(addr_reg_lock); + switch (array_mcv) { - // all we need to do is calculate the offset that the elem exits at. - .stack_offset => |off| { - if (index_mcv == .immediate) { - const true_offset: u32 = @intCast(index_mcv.immediate * elem_abi_size); - break :result MCValue{ .stack_offset = off + true_offset }; - } - return self.fail("TODO: airArrayElemVal with runtime index", .{}); + .register => { + const stack_offset = try self.allocMem( + null, + @intCast(array_ty.abiSize(mod)), + array_ty.abiAlignment(mod), + ); + try self.genSetStack(array_ty, stack_offset, array_mcv); + try self.genSetReg(Type.usize, addr_reg, .{ .ptr_stack_offset = stack_offset }); }, - else => return self.fail("TODO: airArrayElemVal {s}", .{@tagName(array_mcv)}), + .stack_offset => |off| { + try self.genSetReg(Type.usize, addr_reg, .{ .ptr_stack_offset = off }); + }, + else => try self.genSetReg(Type.usize, addr_reg, array_mcv.address()), } + + const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_abi_size); + const offset_lock = self.register_manager.lockRegAssumeUnused(offset_reg); + defer self.register_manager.unlockReg(offset_lock); + + const dst_mcv = try self.allocRegOrMem(inst, false); + try self.binOpMir( + .add, + null, + Type.usize, + .{ .register = addr_reg }, + .{ .register = offset_reg }, + ); + try self.genCopy(elem_ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg } }); + break :result dst_mcv; }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -2094,54 +2522,37 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { } /// Loads `value` into the "payload" of `pointer`. -fn store(self: *Self, pointer: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) !void { +fn store(self: *Self, ptr_mcv: MCValue, src_mcv: MCValue, ptr_ty: Type, src_ty: Type) !void { const mod = self.bin_file.comp.module.?; - const value_abi_size = value_ty.abiSize(mod); - log.debug("storing {}:{} in {}:{}", .{ value, value_ty.fmt(mod), pointer, ptr_ty.fmt(mod) }); + log.debug("storing {}:{} in {}:{}", .{ src_mcv, src_ty.fmt(mod), ptr_mcv, ptr_ty.fmt(mod) }); - switch (pointer) { + switch (ptr_mcv) { .none => unreachable, .undef => unreachable, .unreach => unreachable, .dead => unreachable, - .ptr_stack_offset => |off| try self.genSetStack(value_ty, off, value), + .register_pair => unreachable, - .stack_offset => { - const pointer_reg, const lock = try self.allocReg(); - defer self.register_manager.unlockReg(lock); + .immediate, + .register, + .register_offset, + .addr_symbol, + .ptr_stack_offset, + => try self.genCopy(src_ty, ptr_mcv.deref(), src_mcv), - try self.genSetReg(ptr_ty, pointer_reg, pointer); + .memory, + .indirect, + .load_symbol, + .stack_offset, + => { + const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv); + const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); + defer self.register_manager.unlockReg(addr_lock); - return self.store(.{ .register = pointer_reg }, value, ptr_ty, value_ty); + try self.genCopy(src_ty, .{ .indirect = .{ .reg = addr_reg } }, src_mcv); }, - - .register => |reg| { - const value_reg = try self.copyToTmpRegister(value_ty, value); - - switch (value_abi_size) { - 1, 2, 4, 8 => { - const tag: Mir.Inst.Tag = switch (value_abi_size) { - 1 => .sb, - 2 => .sh, - 4 => .sw, - 8 => .sd, - else => unreachable, - }; - - _ = try self.addInst(.{ - .tag = tag, - .data = .{ .i_type = .{ - .rd = value_reg, - .rs1 = reg, - .imm12 = 0, - } }, - }); - }, - else => return self.fail("TODO: genSetStack for size={d}", .{value_abi_size}), - } - }, - else => return self.fail("TODO implement storing to MCValue.{s}", .{@tagName(pointer)}), + .air_ref => |ptr_ref| try self.store(try self.resolveInst(ptr_ref), src_mcv, ptr_ty, src_ty), } } @@ -2405,45 +2816,58 @@ fn genCall( // Due to incremental compilation, how function calls are generated depends // on linking. switch (info) { - .air => |callee| if (try self.air.value(callee, mod)) |func_value| { - const func_key = mod.intern_pool.indexToKey(func_value.ip_index); - switch (switch (func_key) { - else => func_key, - .ptr => |ptr| switch (ptr.addr) { - .decl => |decl| mod.intern_pool.indexToKey(mod.declPtr(decl).val.toIntern()), + .air => |callee| { + if (try self.air.value(callee, mod)) |func_value| { + const func_key = mod.intern_pool.indexToKey(func_value.ip_index); + switch (switch (func_key) { else => func_key, - }, - }) { - .func => |func| { - if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func.owner_decl); - const sym = elf_file.symbol(sym_index); - _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); - const got_addr = sym.zigGotAddress(elf_file); - try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr }); - _ = try self.addInst(.{ - .tag = .jalr, - .data = .{ .i_type = .{ - .rd = .ra, - .rs1 = .ra, - .imm12 = 0, - } }, - }); - } else if (self.bin_file.cast(link.File.Coff)) |_| { - return self.fail("TODO implement calling in COFF for {}", .{self.target.cpu.arch}); - } else if (self.bin_file.cast(link.File.MachO)) |_| { - unreachable; // unsupported architecture for MachO - } else if (self.bin_file.cast(link.File.Plan9)) |_| { - return self.fail("TODO implement call on plan9 for {}", .{self.target.cpu.arch}); - } else unreachable; - }, - .extern_func => { - return self.fail("TODO: extern func calls", .{}); - }, - else => return self.fail("TODO implement calling bitcasted functions", .{}), + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| mod.intern_pool.indexToKey(mod.declPtr(decl).val.toIntern()), + else => func_key, + }, + }) { + .func => |func| { + if (self.bin_file.cast(link.File.Elf)) |elf_file| { + const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func.owner_decl); + const sym = elf_file.symbol(sym_index); + _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); + const got_addr = sym.zigGotAddress(elf_file); + try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr }); + _ = try self.addInst(.{ + .tag = .jalr, + .data = .{ .i_type = .{ + .rd = .ra, + .rs1 = .ra, + .imm12 = 0, + } }, + }); + } else if (self.bin_file.cast(link.File.Coff)) |_| { + return self.fail("TODO implement calling in COFF for {}", .{self.target.cpu.arch}); + } else if (self.bin_file.cast(link.File.MachO)) |_| { + unreachable; // unsupported architecture for MachO + } else if (self.bin_file.cast(link.File.Plan9)) |_| { + return self.fail("TODO implement call on plan9 for {}", .{self.target.cpu.arch}); + } else unreachable; + }, + .extern_func => { + return self.fail("TODO: extern func calls", .{}); + }, + else => return self.fail("TODO implement calling bitcasted functions", .{}), + } + } else { + assert(self.typeOf(callee).zigTypeTag(mod) == .Pointer); + const addr_reg, const addr_lock = try self.allocReg(); + defer self.register_manager.unlockReg(addr_lock); + try self.genSetReg(Type.usize, addr_reg, .{ .air_ref = callee }); + _ = try self.addInst(.{ + .tag = .jalr, + .data = .{ .i_type = .{ + .rd = .ra, + .rs1 = addr_reg, + .imm12 = 0, + } }, + }); } - } else { - return self.fail("TODO: call function pointers", .{}); }, .lib => return self.fail("TODO: lib func calls", .{}), } @@ -2506,21 +2930,41 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - if (self.liveness.isUnused(inst)) - return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); - const ty = self.typeOf(bin_op.lhs); const mod = self.bin_file.comp.module.?; - assert(ty.eql(self.typeOf(bin_op.rhs), mod)); - if (ty.zigTypeTag(mod) == .ErrorSet) - return self.fail("TODO implement cmp for errors", .{}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); + const int_ty = switch (lhs_ty.zigTypeTag(mod)) { + .Vector => unreachable, // Handled by cmp_vector. + .Enum => lhs_ty.intTagType(mod), + .Int => lhs_ty, + .Bool => Type.u1, + .Pointer => Type.usize, + .ErrorSet => Type.u16, + .Optional => blk: { + const payload_ty = lhs_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + break :blk Type.u1; + } else if (lhs_ty.isPtrLikeOptional(mod)) { + break :blk Type.usize; + } else { + return self.fail("TODO riscv cmp non-pointer optionals", .{}); + } + }, + .Float => return self.fail("TODO riscv cmp floats", .{}), + else => unreachable, + }; - const result = try self.binOp(tag, null, lhs, rhs, lhs_ty, rhs_ty); + const int_info = int_ty.intInfo(mod); + if (int_info.bits <= 64) { + break :result try self.binOp(tag, null, lhs, rhs, int_ty, int_ty); + } else { + return self.fail("TODO riscv cmp for ints > 64 bits", .{}); + } + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -2555,9 +2999,7 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload); - _ = extra; - // TODO: emit debug info for this block - return self.finishAir(inst, .dead, .{ .none, .none, .none }); + try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len])); } fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { @@ -2632,9 +3074,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { // that death now instead of later as this has an effect on // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { - if (pl_op.operand.toIndex()) |op_index| { - self.processDeath(op_index); - } + if (pl_op.operand.toIndex()) |op_inst| self.processDeath(op_inst); } // Save state @@ -2654,11 +3094,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { self.processDeath(operand); } try self.genBody(then_body); - // point at the to-be-generated else case - try self.performReloc(reloc, @intCast(self.mir_instructions.len)); - - // Revert to the previous register and stack allocation state. + // Restore state var saved_then_branch = self.branch_stack.pop(); defer saved_then_branch.deinit(self.gpa); @@ -2674,6 +3111,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { const else_branch = self.branch_stack.addOneAssumeCapacity(); else_branch.* = .{}; + try self.performReloc(reloc, @intCast(self.mir_instructions.len)); + try self.ensureProcessDeathCapacity(liveness_condbr.else_deaths.len); for (liveness_condbr.else_deaths) |operand| { self.processDeath(operand); @@ -2772,34 +3211,6 @@ fn condBr(self: *Self, cond_ty: Type, condition: MCValue, cond_reg: Register) !M }); } -fn isNull(self: *Self, operand: MCValue) !MCValue { - _ = operand; - // Here you can specialize this instruction if it makes sense to, otherwise the default - // will call isNonNull and invert the result. - return self.fail("TODO call isNonNull and invert the result", .{}); -} - -fn isNonNull(self: *Self, operand: MCValue) !MCValue { - _ = operand; - // Here you can specialize this instruction if it makes sense to, otherwise the default - // will call isNull and invert the result. - return self.fail("TODO call isNull and invert the result", .{}); -} - -fn isErr(self: *Self, operand: MCValue) !MCValue { - _ = operand; - // Here you can specialize this instruction if it makes sense to, otherwise the default - // will call isNonNull and invert the result. - return self.fail("TODO call isNonErr and invert the result", .{}); -} - -fn isNonErr(self: *Self, operand: MCValue) !MCValue { - _ = operand; - // Here you can specialize this instruction if it makes sense to, otherwise the default - // will call isNull and invert the result. - return self.fail("TODO call isErr and invert the result", .{}); -} - fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { @@ -2827,6 +3238,13 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ un_op, .none, .none }); } +fn isNull(self: *Self, operand: MCValue) !MCValue { + _ = operand; + // Here you can specialize this instruction if it makes sense to, otherwise the default + // will call isNonNull and invert the result. + return self.fail("TODO call isNonNull and invert the result", .{}); +} + fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { @@ -2836,6 +3254,13 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ un_op, .none, .none }); } +fn isNonNull(self: *Self, operand: MCValue) !MCValue { + _ = operand; + // Here you can specialize this instruction if it makes sense to, otherwise the default + // will call isNull and invert the result. + return self.fail("TODO call isNull and invert the result", .{}); +} + fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { @@ -2858,12 +3283,14 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - break :result try self.isErr(operand); + const operand_ty = self.typeOf(un_op); + break :result try self.isErr(inst, operand_ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.comp.module.?; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); @@ -2876,21 +3303,98 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { } }; try self.load(operand, operand_ptr, self.typeOf(un_op)); - break :result try self.isErr(operand); + const operand_ptr_ty = self.typeOf(un_op); + const operand_ty = operand_ptr_ty.childType(mod); + + break :result try self.isErr(inst, operand_ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } +/// Generates a compare instruction which will indicate if `eu_mcv` is an error. +/// +/// Result is in the return register. +fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue { + const mod = self.bin_file.comp.module.?; + const err_ty = eu_ty.errorUnionSet(mod); + if (err_ty.errorSetIsEmpty(mod)) return MCValue{ .immediate = 0 }; // always false + + _ = maybe_inst; + + const err_off = errUnionErrorOffset(eu_ty.errorUnionPayload(mod), mod); + + switch (eu_mcv) { + .register => |reg| { + const eu_lock = self.register_manager.lockReg(reg); + defer if (eu_lock) |lock| self.register_manager.unlockReg(lock); + + const return_reg = try self.copyToTmpRegister(eu_ty, eu_mcv); + const return_lock = self.register_manager.lockRegAssumeUnused(return_reg); + defer self.register_manager.unlockReg(return_lock); + + var return_mcv: MCValue = .{ .register = return_reg }; + + if (err_off > 0) { + return_mcv = try self.binOp( + .shr, + null, + return_mcv, + .{ .immediate = @as(u6, @intCast(err_off * 8)) }, + eu_ty, + Type.u8, + ); + } + + try self.binOpMir( + .cmp_neq, + null, + Type.anyerror, + return_mcv, + .{ .immediate = 0 }, + ); + + return return_mcv; + }, + else => return self.fail("TODO implement isErr for {}", .{eu_mcv}), + } +} + fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - break :result try self.isNonErr(operand); + const ty = self.typeOf(un_op); + break :result try self.isNonErr(inst, ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } +fn isNonErr(self: *Self, inst: Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue { + const is_err_res = try self.isErr(inst, eu_ty, eu_mcv); + switch (is_err_res) { + .register => |reg| { + _ = try self.addInst(.{ + .tag = .not, + .data = .{ + .rr = .{ + .rd = reg, + .rs = reg, + }, + }, + }); + return is_err_res; + }, + // always false case + .immediate => |imm| { + assert(imm == 0); + return MCValue{ .immediate = @intFromBool(imm == 0) }; + }, + else => unreachable, + } +} + fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.comp.module.?; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); @@ -2902,8 +3406,11 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; + const operand_ptr_ty = self.typeOf(un_op); + const operand_ty = operand_ptr_ty.childType(mod); + try self.load(operand, operand_ptr, self.typeOf(un_op)); - break :result try self.isNonErr(operand); + break :result try self.isNonErr(inst, operand_ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -2914,7 +3421,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void { const loop = self.air.extraData(Air.Block, ty_pl.payload); const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]); - const start_index: Mir.Inst.Index = @intCast(self.code.items.len); + const start_index: Mir.Inst.Index = @intCast(self.mir_instructions.len); try self.genBody(body); try self.jump(start_index); @@ -2933,6 +3440,12 @@ fn jump(self: *Self, index: Mir.Inst.Index) !void { } fn airBlock(self: *Self, inst: Air.Inst.Index) !void { + const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = self.air.extraData(Air.Block, ty_pl.payload); + try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len])); +} + +fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) !void { try self.blocks.putNoClobber(self.gpa, inst, .{ // A block is a setup to be able to jump to the end. .relocs = .{}, @@ -2945,9 +3458,6 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { }); defer self.blocks.getPtr(inst).?.relocs.deinit(self.gpa); - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Block, ty_pl.payload); - const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]); // TODO emit debug info lexical block try self.genBody(body); @@ -2982,6 +3492,8 @@ fn performReloc(self: *Self, inst: Mir.Inst.Index, target: Mir.Inst.Index) !void => self.mir_instructions.items(.data)[inst].b_type.inst = target, .jal, => self.mir_instructions.items(.data)[inst].j_type.inst = target, + .j, + => self.mir_instructions.items(.data)[inst].inst = target, else => return self.fail("TODO: performReloc {s}", .{@tagName(tag)}), } } @@ -3023,13 +3535,8 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void { try block_data.relocs.ensureUnusedCapacity(self.gpa, 1); block_data.relocs.appendAssumeCapacity(try self.addInst(.{ - .tag = .jal, - .data = .{ - .j_type = .{ - .rd = .ra, - .inst = undefined, - }, - }, + .tag = .j, + .data = .{ .inst = undefined }, })); } @@ -3273,59 +3780,22 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_mcv: MCValue) Inner else => unreachable, // register can hold a max of 8 bytes } }, - .stack_offset, .load_symbol => { - switch (src_mcv) { - .stack_offset => |off| if (off == stack_offset) return, - else => {}, - } + .stack_offset, + .indirect, + .load_symbol, + => { + if (src_mcv == .stack_offset and src_mcv.stack_offset == stack_offset) return; if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, src_mcv); return self.genSetStack(ty, stack_offset, .{ .register = reg }); } - const ptr_ty = try mod.singleMutPtrType(ty); - - // TODO call extern memcpy - const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); - const regs_locks = self.register_manager.lockRegsAssumeUnused(5, regs); - defer for (regs_locks) |reg| { - self.register_manager.unlockReg(reg); - }; - - const src_reg = regs[0]; - const dst_reg = regs[1]; - const len_reg = regs[2]; - const count_reg = regs[3]; - const tmp_reg = regs[4]; - - switch (src_mcv) { - .stack_offset => |offset| { - try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = offset }); - }, - .load_symbol => |sym_off| { - const atom_index = try self.symbolIndex(); - - // setup the src pointer - _ = try self.addInst(.{ - .tag = .load_symbol, - .data = .{ - .payload = try self.addExtra(Mir.LoadSymbolPayload{ - .register = src_reg.id(), - .atom_index = atom_index, - .sym_index = sym_off.sym, - }), - }, - }); - }, - else => return self.fail("TODO: genSetStack unreachable {s}", .{@tagName(src_mcv)}), - } - - try self.genSetReg(ptr_ty, dst_reg, .{ .ptr_stack_offset = stack_offset }); - try self.genSetReg(Type.usize, len_reg, .{ .immediate = abi_size }); - - // memcpy(src, dst, len) - try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg); + try self.genInlineMemcpy( + .{ .ptr_stack_offset = stack_offset }, + src_mcv.address(), + .{ .immediate = abi_size }, + ); }, .air_ref => |ref| try self.genSetStack(ty, stack_offset, try self.resolveInst(ref)), else => return self.fail("TODO: genSetStack {s}", .{@tagName(src_mcv)}), @@ -3344,13 +3814,22 @@ fn genSetMem(self: *Self, ty: Type, addr: u64, src_mcv: MCValue) InnerError!void fn genInlineMemcpy( self: *Self, - src: Register, - dst: Register, - len: Register, - count: Register, - tmp: Register, + dst_ptr: MCValue, + src_ptr: MCValue, + len: MCValue, ) !void { - try self.genSetReg(Type.usize, count, .{ .register = len }); + const regs = try self.register_manager.allocRegs(4, .{null} ** 4, tp); + const locks = self.register_manager.lockRegsAssumeUnused(4, regs); + defer for (locks) |lock| self.register_manager.unlockReg(lock); + + const count = regs[0]; + const tmp = regs[1]; + const src = regs[2]; + const dst = regs[3]; + + try self.genSetReg(Type.usize, count, len); + try self.genSetReg(Type.usize, src, src_ptr); + try self.genSetReg(Type.usize, dst, dst_ptr); // lb tmp, 0(src) const first_inst = try self.addInst(.{ @@ -3437,6 +3916,14 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! const mod = self.bin_file.comp.module.?; const abi_size: u32 = @intCast(ty.abiSize(mod)); + const load_tag: Mir.Inst.Tag = switch (abi_size) { + 1 => .lb, + 2 => .lh, + 4 => .lw, + 8 => .ld, + else => return self.fail("TODO: genSetReg for size {d}", .{abi_size}), + }; + switch (src_mcv) { .dead => unreachable, .ptr_stack_offset => |off| { @@ -3550,16 +4037,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! }); }, .stack_offset => |off| { - const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => .lb, - 2 => .lh, - 4 => .lw, - 8 => .ld, - else => return self.fail("TODO: genSetReg for size {d}", .{abi_size}), - }; - _ = try self.addInst(.{ - .tag = tag, + .tag = load_tag, .data = .{ .i_type = .{ .rd = reg, .rs1 = .sp, @@ -3569,53 +4048,14 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! } }, }); }, - .load_symbol => |sym_off| { - assert(sym_off.off == 0); - - const atom_index = try self.symbolIndex(); - - _ = try self.addInst(.{ - .tag = .load_symbol, - .data = .{ - .payload = try self.addExtra(Mir.LoadSymbolPayload{ - .register = reg.id(), - .atom_index = atom_index, - .sym_index = sym_off.sym, - }), - }, - }); - - const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => .lb, - 2 => .lh, - 4 => .lw, - 8 => .ld, - else => return self.fail("TODO: genSetReg for size {d}", .{abi_size}), - }; - - _ = try self.addInst(.{ - .tag = tag, - .data = .{ - .i_type = .{ - .rd = reg, - .rs1 = reg, - .imm12 = 0, - }, - }, - }); + .load_symbol => { + try self.genSetReg(ty, reg, src_mcv.address()); + try self.genSetReg(ty, reg, .{ .indirect = .{ .reg = reg } }); }, .air_ref => |ref| try self.genSetReg(ty, reg, try self.resolveInst(ref)), .indirect => |reg_off| { - const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => .lb, - 2 => .lh, - 4 => .lw, - 8 => .ld, - else => return self.fail("TODO: genSetReg for size {d}", .{abi_size}), - }; - _ = try self.addInst(.{ - .tag = tag, + .tag = load_tag, .data = .{ .i_type = .{ .rd = reg, @@ -4026,3 +4466,25 @@ fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { fn hasFeature(self: *Self, feature: Target.riscv.Feature) bool { return Target.riscv.featureSetHas(self.target.cpu.features, feature); } + +pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + return 0; + } else { + return payload_align.forward(Type.anyerror.abiSize(mod)); + } +} + +pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + return error_align.forward(payload_ty.abiSize(mod)); + } else { + return 0; + } +} diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 8161e4a99b..a3156fc499 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -4,6 +4,8 @@ mir: Mir, bin_file: *link.File, debug_output: DebugInfoOutput, +output_mode: std.builtin.OutputMode, +link_mode: std.builtin.LinkMode, target: *const std.Target, err_msg: ?*ErrorMsg = null, src_loc: Module.SrcLoc, @@ -47,6 +49,7 @@ pub fn emitMir( switch (tag) { .add => try emit.mirRType(inst), .sub => try emit.mirRType(inst), + .mul => try emit.mirRType(inst), .@"or" => try emit.mirRType(inst), .cmp_eq => try emit.mirRType(inst), @@ -56,7 +59,9 @@ pub fn emitMir( .cmp_lt => try emit.mirRType(inst), .cmp_imm_gte => try emit.mirRType(inst), .cmp_imm_eq => try emit.mirIType(inst), + .cmp_imm_neq => try emit.mirIType(inst), .cmp_imm_lte => try emit.mirIType(inst), + .cmp_imm_lt => try emit.mirIType(inst), .beq => try emit.mirBType(inst), .bne => try emit.mirBType(inst), @@ -186,6 +191,7 @@ fn mirRType(emit: *Emit, inst: Mir.Inst.Index) !void { switch (tag) { .add => try emit.writeInstruction(Instruction.add(rd, rs1, rs2)), .sub => try emit.writeInstruction(Instruction.sub(rd, rs1, rs2)), + .mul => try emit.writeInstruction(Instruction.mul(rd, rs1, rs2)), .cmp_gt => { // rs1 > rs2 try emit.writeInstruction(Instruction.sltu(rd, rs2, rs1)); @@ -284,6 +290,14 @@ fn mirIType(emit: *Emit, inst: Mir.Inst.Index) !void { try emit.writeInstruction(Instruction.xori(rd, rs1, imm12)); try emit.writeInstruction(Instruction.sltiu(rd, rd, 1)); }, + .cmp_imm_neq => { + try emit.writeInstruction(Instruction.xori(rd, rs1, imm12)); + try emit.writeInstruction(Instruction.sltu(rd, .x0, rd)); + }, + + .cmp_imm_lt => { + try emit.writeInstruction(Instruction.slti(rd, rs1, imm12)); + }, .cmp_imm_lte => { try emit.writeInstruction(Instruction.sltiu(rd, rs1, @bitCast(imm12))); @@ -447,6 +461,7 @@ fn mirLoadSymbol(emit: *Emit, inst: Mir.Inst.Index) !void { const start_offset = @as(u32, @intCast(emit.code.items.len)); try emit.writeInstruction(Instruction.lui(reg, 0)); + try emit.writeInstruction(Instruction.addi(reg, reg, 0)); switch (emit.bin_file.tag) { .elf => { @@ -463,12 +478,6 @@ fn mirLoadSymbol(emit: *Emit, inst: Mir.Inst.Index) !void { hi_r_type = Elf.R_ZIG_GOT_HI20; lo_r_type = Elf.R_ZIG_GOT_LO12; - - // we need to deref once if we are getting from zig_got, as itll - // reloc an address of the address in the got. - try emit.writeInstruction(Instruction.ld(reg, 0, reg)); - } else { - try emit.writeInstruction(Instruction.addi(reg, reg, 0)); } try atom_ptr.addReloc(elf_file, .{ @@ -544,6 +553,7 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize { .cmp_eq, .cmp_neq, .cmp_imm_eq, + .cmp_imm_neq, .cmp_gte, .load_symbol, .abs, diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index d8c8775a6d..dd9064b4a4 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -33,6 +33,8 @@ pub const Inst = struct { add, /// Subtraction sub, + /// Multiply, uses r_type. Needs the M extension. + mul, /// Absolute Value, uses i_type payload. abs, @@ -76,8 +78,12 @@ pub const Inst = struct { /// Immediate `==`, uses i_type cmp_imm_eq, + /// Immediate `!=`, uses i_type. + cmp_imm_neq, /// Immediate `<=`, uses i_type cmp_imm_lte, + /// Immediate `<`, uses i_type + cmp_imm_lt, /// Branch if equal, Uses b_type beq, diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig index b7efdef765..0e87478025 100644 --- a/src/arch/riscv64/bits.zig +++ b/src/arch/riscv64/bits.zig @@ -112,7 +112,7 @@ pub const Instruction = union(enum) { // -- less burden on callsite, bonus semantic checking fn bType(op: u7, fn3: u3, r1: Register, r2: Register, imm: i13) Instruction { const umm = @as(u13, @bitCast(imm)); - assert(umm % 2 == 0); // misaligned branch target + assert(umm % 4 == 0); // misaligned branch target return Instruction{ .B = .{ @@ -201,6 +201,12 @@ pub const Instruction = union(enum) { return rType(0b0110011, 0b011, 0b0000000, rd, r1, r2); } + // M extension operations + + pub fn mul(rd: Register, r1: Register, r2: Register) Instruction { + return rType(0b0110011, 0b000, 0b0000001, rd, r1, r2); + } + // Arithmetic/Logical, Register-Register (32-bit) pub fn addw(rd: Register, r1: Register, r2: Register) Instruction { From 9d0bb6371df31dd25e86b7ef4161852740f39f07 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sat, 30 Mar 2024 06:43:13 -0700 Subject: [PATCH 33/44] riscv: almost `@errorName` but not loading correctly --- src/arch/riscv64/CodeGen.zig | 62 ++++++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 6 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index bbb672649e..4920cb5baa 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -4194,13 +4194,63 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { } fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.comp.module.?; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const operand = try self.resolveInst(un_op); - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else { - _ = operand; - return self.fail("TODO implement airErrorName for riscv64", .{}); - }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); + + const err_ty = self.typeOf(un_op); + const err_mcv = try self.resolveInst(un_op); + + const err_reg = try self.copyToTmpRegister(err_ty, err_mcv); + const err_lock = self.register_manager.lockRegAssumeUnused(err_reg); + defer self.register_manager.unlockReg(err_lock); + + const addr_reg, const addr_lock = try self.allocReg(); + defer self.register_manager.unlockReg(addr_lock); + + const lazy_sym = link.File.LazySymbol.initDecl(.const_data, null, mod); + if (self.bin_file.cast(link.File.Elf)) |elf_file| { + const sym_index = elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + const sym = elf_file.symbol(sym_index); + try self.genSetReg(Type.usize, addr_reg, .{ .load_symbol = .{ .sym = sym.esym_index } }); + } else { + return self.fail("TODO: riscv non-elf", .{}); + } + + const start_reg, const start_lock = try self.allocReg(); + defer self.register_manager.unlockReg(start_lock); + + const end_reg, const end_lock = try self.allocReg(); + defer self.register_manager.unlockReg(end_lock); + + _ = try self.addInst(.{ + .tag = .slli, + .data = .{ + .i_type = .{ + .rd = err_reg, + .rs1 = err_reg, + .imm12 = 4, + }, + }, + }); + + try self.binOpMir( + .add, + null, + Type.usize, + .{ .register = err_reg }, + .{ .register = addr_reg }, + ); + + try self.genSetReg(Type.usize, start_reg, .{ .indirect = .{ .reg = err_reg } }); + try self.genSetReg(Type.usize, end_reg, .{ .indirect = .{ .reg = err_reg, .off = 8 } }); + + const dst_mcv = try self.allocRegOrMem(inst, false); + + try self.genSetStack(Type.usize, dst_mcv.stack_offset, .{ .register = start_reg }); + try self.genSetStack(Type.usize, dst_mcv.stack_offset + 8, .{ .register = end_reg }); + + return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); } fn airSplat(self: *Self, inst: Air.Inst.Index) !void { From 6740c1f0849dd2615859e4d65df355087165e073 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Wed, 3 Apr 2024 00:15:56 -0700 Subject: [PATCH 34/44] riscv: big rewrite to use latest liveness this one is even harder to document then the last large overhaul. TLDR; - split apart Emit.zig into an Emit.zig and a Lower.zig - created seperate files for the encoding, and now adding a new instruction is as simple as just adding it to a couple of switch statements and providing the encoding. - relocs are handled in a more sane maner, and we have a clear defining boundary between lea_symbol and load_symbol now. - a lot of different abstractions for things like the stack, memory, registers, and others. - we're using x86_64's FrameIndex now, which simplifies a lot of the tougher design process. - a lot more that I don't have the energy to document. at this point, just read the commit itself :p --- lib/compiler/test_runner.zig | 6 +- lib/std/builtin.zig | 10 +- lib/std/start.zig | 3 +- src/arch/riscv64/CodeGen.zig | 2766 +++++++++++++++++++++------------ src/arch/riscv64/Emit.zig | 693 ++------- src/arch/riscv64/Encoding.zig | 333 ++++ src/arch/riscv64/Lower.zig | 222 +++ src/arch/riscv64/Mir.zig | 275 ++-- src/arch/riscv64/abi.zig | 30 +- src/arch/riscv64/bits.zig | 571 ++----- src/arch/riscv64/encoder.zig | 49 + src/link/riscv.zig | 51 +- src/register_manager.zig | 1 + src/target.zig | 2 +- test/behavior/align.zig | 17 + test/behavior/array.zig | 95 ++ 16 files changed, 3021 insertions(+), 2103 deletions(-) create mode 100644 src/arch/riscv64/Encoding.zig create mode 100644 src/arch/riscv64/Lower.zig create mode 100644 src/arch/riscv64/encoder.zig diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index 41dbbf0986..5c674cecce 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -252,12 +252,16 @@ pub fn mainSimple() anyerror!void { pub fn mainExtraSimple() !void { var pass_count: u8 = 0; + var skip_count: u8 = 0; + var fail_count: u8 = 0; for (builtin.test_functions) |test_fn| { test_fn.func() catch |err| { if (err != error.SkipZigTest) { - @panic(test_fn.name); + fail_count += 1; + continue; } + skip_count += 1; continue; }; pass_count += 1; diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 15d405eed9..c5ddf02188 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -775,15 +775,7 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr } if (builtin.zig_backend == .stage2_riscv64) { - asm volatile ("ecall" - : - : [number] "{a7}" (64), - [arg1] "{a0}" (1), - [arg2] "{a1}" (@intFromPtr(msg.ptr)), - [arg3] "{a2}" (msg.len), - : "rcx", "r11", "memory" - ); - std.posix.exit(127); + unreachable; } switch (builtin.os.tag) { diff --git a/lib/std/start.zig b/lib/std/start.zig index 0228ffdc2b..5fad443956 100644 --- a/lib/std/start.zig +++ b/lib/std/start.zig @@ -208,8 +208,7 @@ fn wasi_start() callconv(.C) void { } fn riscv_start() callconv(.C) noreturn { - const code = @call(.always_inline, callMain, .{}); - std.process.exit(code); + std.process.exit(@call(.always_inline, callMain, .{})); } fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) callconv(.C) usize { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 4920cb5baa..36014d64ba 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -19,7 +19,8 @@ const Allocator = mem.Allocator; const trace = @import("../../tracy.zig").trace; const DW = std.dwarf; const leb128 = std.leb; -const log = std.log.scoped(.codegen); +const log = std.log.scoped(.riscv_codegen); +const tracking_log = std.log.scoped(.tracking); const build_options = @import("build_options"); const codegen = @import("../../codegen.zig"); const Alignment = InternPool.Alignment; @@ -31,6 +32,9 @@ const DebugInfoOutput = codegen.DebugInfoOutput; const bits = @import("bits.zig"); const abi = @import("abi.zig"); const Register = bits.Register; +const Immediate = bits.Immediate; +const Memory = bits.Memory; +const FrameIndex = bits.FrameIndex; const RegisterManager = abi.RegisterManager; const RegisterLock = RegisterManager.RegisterLock; const callee_preserved_regs = abi.callee_preserved_regs; @@ -58,11 +62,10 @@ code: *std.ArrayList(u8), debug_output: DebugInfoOutput, err_msg: ?*ErrorMsg, args: []MCValue, -ret_mcv: MCValue, +ret_mcv: InstTracking, fn_type: Type, arg_index: usize, src_loc: Module.SrcLoc, -stack_align: Alignment, /// MIR Instructions mir_instructions: std.MultiArrayList(Mir.Inst) = .{}, @@ -73,6 +76,8 @@ mir_extra: std.ArrayListUnmanaged(u32) = .{}, end_di_line: u32, end_di_column: u32, +scope_generation: u32, + /// The value is an offset into the `Function` `code` from the beginning. /// To perform the reloc, write 32-bit signed little-endian integer /// which is a relative jump, based on the address following the reloc. @@ -91,14 +96,12 @@ branch_stack: *std.ArrayList(Branch), blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, register_manager: RegisterManager = .{}, -/// Maps offset to what is stored there. -stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{}, +const_tracking: ConstTrackingMap = .{}, +inst_tracking: InstTrackingMap = .{}, -/// Offset from the stack base, representing the end of the stack frame. -max_end_stack: u32 = 0, -/// Represents the current end stack offset. If there is no existing slot -/// to place a new stack allocation, it goes here, and then bumps `max_end_stack`. -next_stack_offset: u32 = 0, +frame_allocs: std.MultiArrayList(FrameAlloc) = .{}, +free_frame_indices: std.AutoArrayHashMapUnmanaged(FrameIndex, void) = .{}, +frame_locs: std.MultiArrayList(Mir.FrameLoc) = .{}, /// Debug field, used to find bugs in the compiler. air_bookkeeping: @TypeOf(air_bookkeeping_init) = air_bookkeeping_init, @@ -107,6 +110,7 @@ const air_bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {} const SymbolOffset = struct { sym: u32, off: i32 = 0 }; const RegisterOffset = struct { reg: Register, off: i32 = 0 }; +pub const FrameAddr = struct { index: FrameIndex, off: i32 = 0 }; const MCValue = union(enum) { /// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc. @@ -116,7 +120,8 @@ const MCValue = union(enum) { /// Control flow will not allow this value to be observed. unreach, /// No more references to this value remain. - dead, + /// The payload is the value of scope_generation at the point where the death occurred + dead: u32, /// The value is undefined. undef, /// A pointer-sized integer that fits in a register. @@ -125,7 +130,7 @@ const MCValue = union(enum) { /// The value doesn't exist in memory yet. load_symbol: SymbolOffset, /// The address of the memory location not-yet-allocated by the linker. - addr_symbol: SymbolOffset, + lea_symbol: SymbolOffset, /// The value is in a target-specific register. register: Register, /// The value is split across two registers @@ -133,16 +138,21 @@ const MCValue = union(enum) { /// The value is in memory at a hard-coded address. /// If the type is a pointer, it means the pointer address is at this memory location. memory: u64, - /// The value is one of the stack variables. - /// If the type is a pointer, it means the pointer address is in the stack at this offset. - stack_offset: u32, - /// The value is a pointer to one of the stack variables (payload is stack offset). - ptr_stack_offset: u32, + /// The value stored at an offset from a frame index + /// Payload is a frame address. + load_frame: FrameAddr, + /// The address of an offset from a frame index + /// Payload is a frame address. + lea_frame: FrameAddr, air_ref: Air.Inst.Ref, /// The value is in memory at a constant offset from the address in a register. indirect: RegisterOffset, /// The value is a constant offset from the value in a register. register_offset: RegisterOffset, + /// This indicates that we have already allocated a frame index for this instruction, + /// but it has not been spilled there yet in the current control flow. + /// Payload is a frame index. + reserved_frame: FrameIndex, fn isMemory(mcv: MCValue) bool { return switch (mcv) { @@ -166,16 +176,17 @@ const MCValue = union(enum) { .immediate, .memory, - .ptr_stack_offset, + .lea_frame, .undef, - .addr_symbol, + .lea_symbol, .air_ref, + .reserved_frame, => false, .register, .register_pair, .register_offset, - .stack_offset, + .load_frame, .load_symbol, .indirect, => true, @@ -188,18 +199,19 @@ const MCValue = union(enum) { .unreach, .dead, .immediate, - .ptr_stack_offset, + .lea_frame, .register_offset, .register_pair, .register, .undef, .air_ref, - .addr_symbol, + .lea_symbol, + .reserved_frame, => unreachable, // not in memory - .load_symbol => |sym_off| .{ .addr_symbol = sym_off }, + .load_symbol => |sym_off| .{ .lea_symbol = sym_off }, .memory => |addr| .{ .immediate = addr }, - .stack_offset => |off| .{ .ptr_stack_offset = off }, + .load_frame => |off| .{ .lea_frame = off }, .indirect => |reg_off| switch (reg_off.off) { 0 => .{ .register = reg_off.reg }, else => .{ .register_offset = reg_off }, @@ -216,16 +228,17 @@ const MCValue = union(enum) { .indirect, .undef, .air_ref, - .stack_offset, + .load_frame, .register_pair, .load_symbol, + .reserved_frame, => unreachable, // not a pointer .immediate => |addr| .{ .memory = addr }, - .ptr_stack_offset => |off| .{ .stack_offset = off }, + .lea_frame => |off| .{ .load_frame = off }, .register => |reg| .{ .indirect = .{ .reg = reg } }, .register_offset => |reg_off| .{ .indirect = reg_off }, - .addr_symbol => |sym_off| .{ .load_symbol = sym_off }, + .lea_symbol => |sym_off| .{ .load_symbol = sym_off }, }; } @@ -236,13 +249,14 @@ const MCValue = union(enum) { .dead, .undef, .air_ref, + .reserved_frame, => unreachable, // not valid .register_pair, .memory, .indirect, - .stack_offset, + .load_frame, .load_symbol, - .addr_symbol, + .lea_symbol, => switch (off) { 0 => mcv, else => unreachable, // not offsettable @@ -250,7 +264,26 @@ const MCValue = union(enum) { .immediate => |imm| .{ .immediate = @bitCast(@as(i64, @bitCast(imm)) +% off) }, .register => |reg| .{ .register_offset = .{ .reg = reg, .off = off } }, .register_offset => |reg_off| .{ .register_offset = .{ .reg = reg_off.reg, .off = reg_off.off + off } }, - .ptr_stack_offset => |stack_off| .{ .ptr_stack_offset = @intCast(@as(i64, @intCast(stack_off)) +% off) }, + .lea_frame => |frame_addr| .{ + .lea_frame = .{ .index = frame_addr.index, .off = frame_addr.off + off }, + }, + }; + } + + fn getReg(mcv: MCValue) ?Register { + return switch (mcv) { + .register => |reg| reg, + .register_offset, .indirect => |ro| ro.reg, + else => null, + }; + } + + fn getRegs(mcv: *const MCValue) []const Register { + return switch (mcv.*) { + .register => |*reg| @as(*const [1]Register, reg), + .register_pair => |*regs| regs, + .register_offset, .indirect => |*ro| @as(*const [1]Register, &ro.reg), + else => &.{}, }; } }; @@ -264,6 +297,265 @@ const Branch = struct { } }; +const InstTrackingMap = std.AutoArrayHashMapUnmanaged(Air.Inst.Index, InstTracking); +const ConstTrackingMap = std.AutoArrayHashMapUnmanaged(InternPool.Index, InstTracking); +const InstTracking = struct { + long: MCValue, + short: MCValue, + + fn init(result: MCValue) InstTracking { + return .{ .long = switch (result) { + .none, + .unreach, + .undef, + .immediate, + .memory, + .load_frame, + .lea_frame, + .load_symbol, + .lea_symbol, + => result, + .dead, + .reserved_frame, + .air_ref, + => unreachable, + .register, + .register_pair, + .register_offset, + .indirect, + => .none, + }, .short = result }; + } + + fn getReg(self: InstTracking) ?Register { + return self.short.getReg(); + } + + fn getRegs(self: *const InstTracking) []const Register { + return self.short.getRegs(); + } + + fn spill(self: *InstTracking, function: *Self, inst: Air.Inst.Index) !void { + if (std.meta.eql(self.long, self.short)) return; // Already spilled + // Allocate or reuse frame index + switch (self.long) { + .none => self.long = try function.allocRegOrMem(inst, false), + .load_frame => {}, + .reserved_frame => |index| self.long = .{ .load_frame = .{ .index = index } }, + else => unreachable, + } + tracking_log.debug("spill %{d} from {} to {}", .{ inst, self.short, self.long }); + try function.genCopy(function.typeOfIndex(inst), self.long, self.short); + } + + fn reuseFrame(self: *InstTracking) void { + switch (self.long) { + .reserved_frame => |index| self.long = .{ .load_frame = .{ .index = index } }, + else => {}, + } + self.short = switch (self.long) { + .none, + .unreach, + .undef, + .immediate, + .memory, + .load_frame, + .lea_frame, + .load_symbol, + .lea_symbol, + => self.long, + .dead, + .register, + .register_pair, + .register_offset, + .indirect, + .reserved_frame, + .air_ref, + => unreachable, + }; + } + + fn trackSpill(self: *InstTracking, function: *Self, inst: Air.Inst.Index) !void { + try function.freeValue(self.short); + self.reuseFrame(); + tracking_log.debug("%{d} => {} (spilled)", .{ inst, self.* }); + } + + fn verifyMaterialize(self: InstTracking, target: InstTracking) void { + switch (self.long) { + .none, + .unreach, + .undef, + .immediate, + .memory, + .lea_frame, + .load_symbol, + .lea_symbol, + => assert(std.meta.eql(self.long, target.long)), + .load_frame, + .reserved_frame, + => switch (target.long) { + .none, + .load_frame, + .reserved_frame, + => {}, + else => unreachable, + }, + .dead, + .register, + .register_pair, + .register_offset, + .indirect, + .air_ref, + => unreachable, + } + } + + fn materialize( + self: *InstTracking, + function: *Self, + inst: Air.Inst.Index, + target: InstTracking, + ) !void { + self.verifyMaterialize(target); + try self.materializeUnsafe(function, inst, target); + } + + fn materializeUnsafe( + self: InstTracking, + function: *Self, + inst: Air.Inst.Index, + target: InstTracking, + ) !void { + const ty = function.typeOfIndex(inst); + if ((self.long == .none or self.long == .reserved_frame) and target.long == .load_frame) + try function.genCopy(ty, target.long, self.short); + try function.genCopy(ty, target.short, self.short); + } + + fn trackMaterialize(self: *InstTracking, inst: Air.Inst.Index, target: InstTracking) void { + self.verifyMaterialize(target); + // Don't clobber reserved frame indices + self.long = if (target.long == .none) switch (self.long) { + .load_frame => |addr| .{ .reserved_frame = addr.index }, + .reserved_frame => self.long, + else => target.long, + } else target.long; + self.short = target.short; + tracking_log.debug("%{d} => {} (materialize)", .{ inst, self.* }); + } + + fn resurrect(self: *InstTracking, inst: Air.Inst.Index, scope_generation: u32) void { + switch (self.short) { + .dead => |die_generation| if (die_generation >= scope_generation) { + self.reuseFrame(); + tracking_log.debug("%{d} => {} (resurrect)", .{ inst, self.* }); + }, + else => {}, + } + } + + fn die(self: *InstTracking, function: *Self, inst: Air.Inst.Index) !void { + if (self.short == .dead) return; + try function.freeValue(self.short); + self.short = .{ .dead = function.scope_generation }; + tracking_log.debug("%{d} => {} (death)", .{ inst, self.* }); + } + + fn reuse( + self: *InstTracking, + function: *Self, + new_inst: ?Air.Inst.Index, + old_inst: Air.Inst.Index, + ) void { + self.short = .{ .dead = function.scope_generation }; + if (new_inst) |inst| + tracking_log.debug("%{d} => {} (reuse %{d})", .{ inst, self.*, old_inst }) + else + tracking_log.debug("tmp => {} (reuse %{d})", .{ self.*, old_inst }); + } + + fn liveOut(self: *InstTracking, function: *Self, inst: Air.Inst.Index) void { + for (self.getRegs()) |reg| { + if (function.register_manager.isRegFree(reg)) { + tracking_log.debug("%{d} => {} (live-out)", .{ inst, self.* }); + continue; + } + + const index = RegisterManager.indexOfRegIntoTracked(reg).?; + const tracked_inst = function.register_manager.registers[index]; + const tracking = function.getResolvedInstValue(tracked_inst); + + // Disable death. + var found_reg = false; + var remaining_reg: Register = .zero; + for (tracking.getRegs()) |tracked_reg| if (tracked_reg.id() == reg.id()) { + assert(!found_reg); + found_reg = true; + } else { + assert(remaining_reg == .zero); + remaining_reg = tracked_reg; + }; + assert(found_reg); + tracking.short = switch (remaining_reg) { + .zero => .{ .dead = function.scope_generation }, + else => .{ .register = remaining_reg }, + }; + + // Perform side-effects of freeValue manually. + function.register_manager.freeReg(reg); + + tracking_log.debug("%{d} => {} (live-out %{d})", .{ inst, self.*, tracked_inst }); + } + } + + pub fn format( + self: InstTracking, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + if (!std.meta.eql(self.long, self.short)) try writer.print("|{}| ", .{self.long}); + try writer.print("{}", .{self.short}); + } +}; + +const FrameAlloc = struct { + abi_size: u31, + spill_pad: u3, + abi_align: Alignment, + ref_count: u16, + + fn init(alloc_abi: struct { size: u64, pad: u3 = 0, alignment: Alignment }) FrameAlloc { + return .{ + .abi_size = @intCast(alloc_abi.size), + .spill_pad = alloc_abi.pad, + .abi_align = alloc_abi.alignment, + .ref_count = 0, + }; + } + fn initType(ty: Type, zcu: *Module) FrameAlloc { + return init(.{ + .size = ty.abiSize(zcu), + .alignment = ty.abiAlignment(zcu), + }); + } + fn initSpill(ty: Type, zcu: *Module) FrameAlloc { + const abi_size = ty.abiSize(zcu); + const spill_size = if (abi_size < 8) + math.ceilPowerOfTwoAssert(u64, abi_size) + else + std.mem.alignForward(u64, abi_size, 8); + return init(.{ + .size = spill_size, + .pad = @intCast(spill_size - abi_size), + .alignment = ty.abiAlignment(zcu).maxStrict( + Alignment.fromNonzeroByteUnits(@min(spill_size, 8)), + ), + }); + } +}; + const StackAllocation = struct { inst: ?Air.Inst.Index, /// TODO: make the size inferred from the bits of the inst @@ -271,36 +563,127 @@ const StackAllocation = struct { }; const BlockData = struct { - relocs: std.ArrayListUnmanaged(Mir.Inst.Index), - /// The first break instruction encounters `null` here and chooses a - /// machine code value for the block result, populating this field. - /// Following break instructions encounter that value and use it for - /// the location to store their block results. - mcv: MCValue, + relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{}, + state: State, + + fn deinit(self: *BlockData, gpa: Allocator) void { + self.relocs.deinit(gpa); + self.* = undefined; + } }; -const BigTomb = struct { - function: *Self, - inst: Air.Inst.Index, - lbt: Liveness.BigTomb, +const State = struct { + registers: RegisterManager.TrackedRegisters, + reg_tracking: [RegisterManager.RegisterBitSet.bit_length]InstTracking, + free_registers: RegisterManager.RegisterBitSet, + inst_tracking_len: u32, + scope_generation: u32, +}; - fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void { - const dies = bt.lbt.feed(); - const op_index = op_ref.toIndex() orelse return; - if (!dies) return; - bt.function.processDeath(op_index); +fn initRetroactiveState(self: *Self) State { + var state: State = undefined; + state.inst_tracking_len = @intCast(self.inst_tracking.count()); + state.scope_generation = self.scope_generation; + return state; +} + +fn saveRetroactiveState(self: *Self, state: *State) !void { + const free_registers = self.register_manager.free_registers; + var it = free_registers.iterator(.{ .kind = .unset }); + while (it.next()) |index| { + const tracked_inst = self.register_manager.registers[index]; + state.registers[index] = tracked_inst; + state.reg_tracking[index] = self.inst_tracking.get(tracked_inst).?; + } + state.free_registers = free_registers; +} + +fn saveState(self: *Self) !State { + var state = self.initRetroactiveState(); + try self.saveRetroactiveState(&state); + return state; +} + +fn restoreState(self: *Self, state: State, deaths: []const Air.Inst.Index, comptime opts: struct { + emit_instructions: bool, + update_tracking: bool, + resurrect: bool, + close_scope: bool, +}) !void { + if (opts.close_scope) { + for ( + self.inst_tracking.keys()[state.inst_tracking_len..], + self.inst_tracking.values()[state.inst_tracking_len..], + ) |inst, *tracking| try tracking.die(self, inst); + self.inst_tracking.shrinkRetainingCapacity(state.inst_tracking_len); } - fn finishAir(bt: *BigTomb, result: MCValue) void { - const is_used = !bt.function.liveness.isUnused(bt.inst); - if (is_used) { - log.debug("%{d} => {}", .{ bt.inst, result }); - const branch = &bt.function.branch_stack.items[bt.function.branch_stack.items.len - 1]; - branch.inst_table.putAssumeCapacityNoClobber(bt.inst, result); + if (opts.resurrect) for ( + self.inst_tracking.keys()[0..state.inst_tracking_len], + self.inst_tracking.values()[0..state.inst_tracking_len], + ) |inst, *tracking| tracking.resurrect(inst, state.scope_generation); + for (deaths) |death| try self.processDeath(death); + + const ExpectedContents = [@typeInfo(RegisterManager.TrackedRegisters).Array.len]RegisterLock; + var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = + if (opts.update_tracking) + {} else std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + + var reg_locks = if (opts.update_tracking) {} else try std.ArrayList(RegisterLock).initCapacity( + stack.get(), + @typeInfo(ExpectedContents).Array.len, + ); + defer if (!opts.update_tracking) { + for (reg_locks.items) |lock| self.register_manager.unlockReg(lock); + reg_locks.deinit(); + }; + + for (0..state.registers.len) |index| { + const current_maybe_inst = if (self.register_manager.free_registers.isSet(index)) + null + else + self.register_manager.registers[index]; + const target_maybe_inst = if (state.free_registers.isSet(index)) + null + else + state.registers[index]; + if (std.debug.runtime_safety) if (target_maybe_inst) |target_inst| + assert(self.inst_tracking.getIndex(target_inst).? < state.inst_tracking_len); + if (opts.emit_instructions) { + if (current_maybe_inst) |current_inst| { + try self.inst_tracking.getPtr(current_inst).?.spill(self, current_inst); + } + if (target_maybe_inst) |target_inst| { + const target_tracking = self.inst_tracking.getPtr(target_inst).?; + try target_tracking.materialize(self, target_inst, state.reg_tracking[index]); + } } - bt.function.finishAirBookkeeping(); + if (opts.update_tracking) { + if (current_maybe_inst) |current_inst| { + try self.inst_tracking.getPtr(current_inst).?.trackSpill(self, current_inst); + } + { + const reg = RegisterManager.regAtTrackedIndex(@intCast(index)); + self.register_manager.freeReg(reg); + self.register_manager.getRegAssumeFree(reg, target_maybe_inst); + } + if (target_maybe_inst) |target_inst| { + self.inst_tracking.getPtr(target_inst).?.trackMaterialize( + target_inst, + state.reg_tracking[index], + ); + } + } else if (target_maybe_inst) |_| + try reg_locks.append(self.register_manager.lockRegIndexAssumeUnused(@intCast(index))); } -}; + + if (opts.update_tracking and std.debug.runtime_safety) { + assert(self.register_manager.free_registers.eql(state.free_registers)); + var used_reg_it = state.free_registers.iterator(.{ .kind = .unset }); + while (used_reg_it.next()) |index| + assert(self.register_manager.registers[index] == state.registers[index]); + } +} const Self = @This(); @@ -310,7 +693,7 @@ const CallView = enum(u1) { }; pub fn generate( - lf: *link.File, + bin_file: *link.File, src_loc: Module.SrcLoc, func_index: InternPool.Index, air: Air, @@ -318,14 +701,17 @@ pub fn generate( code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) CodeGenError!Result { - const gpa = lf.comp.gpa; - const zcu = lf.comp.module.?; + const comp = bin_file.comp; + const gpa = comp.gpa; + const zcu = comp.module.?; + const ip = &zcu.intern_pool; const func = zcu.funcInfo(func_index); const fn_owner_decl = zcu.declPtr(func.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.typeOf(zcu); const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace); const target = &namespace.file_scope.mod.resolved_target.result; + const mod = namespace.file_scope.mod; var branch_stack = std.ArrayList(Branch).init(gpa); defer { @@ -340,7 +726,7 @@ pub fn generate( .air = air, .liveness = liveness, .target = target, - .bin_file = lf, + .bin_file = bin_file, .func_index = func_index, .code = code, .debug_output = debug_output, @@ -351,15 +737,39 @@ pub fn generate( .arg_index = 0, .branch_stack = &branch_stack, .src_loc = src_loc, - .stack_align = undefined, .end_di_line = func.rbrace_line, .end_di_column = func.rbrace_column, + .scope_generation = 0, }; - defer function.stack.deinit(gpa); - defer function.blocks.deinit(gpa); - defer function.exitlude_jump_relocs.deinit(gpa); + defer { + function.frame_allocs.deinit(gpa); + function.free_frame_indices.deinit(gpa); + function.frame_locs.deinit(gpa); + var block_it = function.blocks.valueIterator(); + while (block_it.next()) |block| block.deinit(gpa); + function.blocks.deinit(gpa); + function.inst_tracking.deinit(gpa); + function.const_tracking.deinit(gpa); + function.exitlude_jump_relocs.deinit(gpa); + function.mir_instructions.deinit(gpa); + function.mir_extra.deinit(gpa); + } - var call_info = function.resolveCallingConventionValues(fn_type, .callee) catch |err| switch (err) { + try function.frame_allocs.resize(gpa, FrameIndex.named_count); + function.frame_allocs.set( + @intFromEnum(FrameIndex.stack_frame), + FrameAlloc.init(.{ + .size = 0, + .alignment = func.analysis(ip).stack_alignment.max(.@"1"), + }), + ); + function.frame_allocs.set( + @intFromEnum(FrameIndex.call_frame), + FrameAlloc.init(.{ .size = 0, .alignment = .@"1" }), + ); + + const fn_info = zcu.typeToFunc(fn_type).?; + var call_info = function.resolveCallingConventionValues(fn_info) catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, error.OutOfRegisters => return Result{ .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), @@ -371,8 +781,25 @@ pub fn generate( function.args = call_info.args; function.ret_mcv = call_info.return_value; - function.stack_align = call_info.stack_align; - function.max_end_stack = call_info.stack_byte_count; + function.frame_allocs.set(@intFromEnum(FrameIndex.ret_addr), FrameAlloc.init(.{ + .size = Type.usize.abiSize(zcu), + .alignment = Type.usize.abiAlignment(zcu).min(call_info.stack_align), + })); + function.frame_allocs.set(@intFromEnum(FrameIndex.base_ptr), FrameAlloc.init(.{ + .size = Type.usize.abiSize(zcu), + .alignment = Alignment.min( + call_info.stack_align, + Alignment.fromNonzeroByteUnits(function.target.stackAlignment()), + ), + })); + function.frame_allocs.set(@intFromEnum(FrameIndex.args_frame), FrameAlloc.init(.{ + .size = call_info.stack_byte_count, + .alignment = call_info.stack_align, + })); + function.frame_allocs.set(@intFromEnum(FrameIndex.spill_frame), FrameAlloc.init(.{ + .size = 0, + .alignment = Type.usize.abiAlignment(zcu), + })); function.gen() catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, @@ -382,41 +809,47 @@ pub fn generate( else => |e| return e, }; - // Create list of registers to save in the prologue. - var save_reg_list = Mir.RegisterList{}; - for (callee_preserved_regs) |reg| { - if (function.register_manager.isRegAllocated(reg)) { - save_reg_list.push(&callee_preserved_regs, reg); - } - } - var mir = Mir{ .instructions = function.mir_instructions.toOwnedSlice(), .extra = try function.mir_extra.toOwnedSlice(gpa), + .frame_locs = function.frame_locs.toOwnedSlice(), }; defer mir.deinit(gpa); var emit = Emit{ - .mir = mir, - .bin_file = lf, + .lower = .{ + .bin_file = bin_file, + .allocator = gpa, + .mir = mir, + .cc = fn_info.cc, + .src_loc = src_loc, + .output_mode = comp.config.output_mode, + .link_mode = comp.config.link_mode, + .pic = mod.pic, + }, .debug_output = debug_output, - .target = target, - .src_loc = src_loc, .code = code, .prev_di_pc = 0, .prev_di_line = func.lbrace_line, .prev_di_column = func.lbrace_column, - .code_offset_mapping = .{}, - // need to at least decrease the sp by -8 - .stack_size = @max(8, mem.alignForward(u32, function.max_end_stack, 16)), - .save_reg_list = save_reg_list, - .output_mode = lf.comp.config.output_mode, - .link_mode = lf.comp.config.link_mode, }; defer emit.deinit(); emit.emitMir() catch |err| switch (err) { - error.EmitFail => return Result{ .fail = emit.err_msg.? }, + error.LowerFail, error.EmitFail => return Result{ .fail = emit.lower.err_msg.? }, + error.InvalidInstruction => |e| { + const msg = switch (e) { + error.InvalidInstruction => "CodeGen failed to find a viable instruction.", + }; + return Result{ + .fail = try ErrorMsg.create( + gpa, + src_loc, + "{s} This is a bug in the Zig compiler.", + .{msg}, + ), + }; + }, else => |e| return e, }; @@ -438,9 +871,26 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { } fn addNop(self: *Self) error{OutOfMemory}!Mir.Inst.Index { - return try self.addInst(.{ + return self.addInst(.{ .tag = .nop, - .data = .{ .nop = {} }, + .ops = .none, + .data = undefined, + }); +} + +fn addPseudoNone(self: *Self, ops: Mir.Inst.Ops) !void { + _ = try self.addInst(.{ + .tag = .pseudo, + .ops = ops, + .data = undefined, + }); +} + +fn addPseudo(self: *Self, ops: Mir.Inst.Ops) !Mir.Inst.Index { + return self.addInst(.{ + .tag = .pseudo, + .ops = ops, + .data = undefined, }); } @@ -464,22 +914,132 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { - _ = try self.addInst(.{ - .tag = .psuedo_prologue, - .data = .{ .nop = {} }, // Backpatched later. - }); + const mod = self.bin_file.comp.module.?; + const fn_info = mod.typeToFunc(self.fn_type).?; - _ = try self.addInst(.{ - .tag = .dbg_prologue_end, - .data = .{ .nop = {} }, - }); + if (fn_info.cc != .Naked) { + try self.addPseudoNone(.pseudo_dbg_prologue_end); - try self.genBody(self.air.getMainBody()); + const backpatch_stack_alloc = try self.addPseudo(.pseudo_dead); + const backpatch_ra_spill = try self.addPseudo(.pseudo_dead); + const backpatch_fp_spill = try self.addPseudo(.pseudo_dead); + const backpatch_fp_add = try self.addPseudo(.pseudo_dead); + const backpatch_spill_callee_preserved_regs = try self.addPseudo(.pseudo_dead); + + try self.genBody(self.air.getMainBody()); + + for (self.exitlude_jump_relocs.items) |jmp_reloc| { + self.mir_instructions.items(.data)[jmp_reloc].inst = + @intCast(self.mir_instructions.len); + } + + try self.addPseudoNone(.pseudo_dbg_epilogue_begin); + + const backpatch_restore_callee_preserved_regs = try self.addPseudo(.pseudo_dead); + const backpatch_ra_restore = try self.addPseudo(.pseudo_dead); + const backpatch_fp_restore = try self.addPseudo(.pseudo_dead); + const backpatch_stack_alloc_restore = try self.addPseudo(.pseudo_dead); + try self.addPseudoNone(.pseudo_ret); + + const frame_layout = try self.computeFrameLayout(); + const need_save_reg = frame_layout.save_reg_list.count() > 0; + + self.mir_instructions.set(backpatch_stack_alloc, .{ + .tag = .addi, + .ops = .rri, + .data = .{ .i_type = .{ + .rd = .sp, + .rs1 = .sp, + .imm12 = Immediate.s(-@as(i32, @intCast(frame_layout.stack_adjust))), + } }, + }); + self.mir_instructions.set(backpatch_ra_spill, .{ + .tag = .pseudo, + .ops = .pseudo_store_rm, + .data = .{ .rm = .{ + .r = .ra, + .m = .{ + .base = .{ .frame = .ret_addr }, + .mod = .{ .rm = .{ .size = .dword } }, + }, + } }, + }); + self.mir_instructions.set(backpatch_ra_restore, .{ + .tag = .pseudo, + .ops = .pseudo_load_rm, + .data = .{ .rm = .{ + .r = .ra, + .m = .{ + .base = .{ .frame = .ret_addr }, + .mod = .{ .rm = .{ .size = .dword } }, + }, + } }, + }); + self.mir_instructions.set(backpatch_fp_spill, .{ + .tag = .pseudo, + .ops = .pseudo_store_rm, + .data = .{ .rm = .{ + .r = .s0, + .m = .{ + .base = .{ .frame = .base_ptr }, + .mod = .{ .rm = .{ .size = .dword } }, + }, + } }, + }); + self.mir_instructions.set(backpatch_fp_restore, .{ + .tag = .pseudo, + .ops = .pseudo_load_rm, + .data = .{ .rm = .{ + .r = .s0, + .m = .{ + .base = .{ .frame = .base_ptr }, + .mod = .{ .rm = .{ .size = .dword } }, + }, + } }, + }); + self.mir_instructions.set(backpatch_fp_add, .{ + .tag = .addi, + .ops = .rri, + .data = .{ .i_type = .{ + .rd = .s0, + .rs1 = .sp, + .imm12 = Immediate.s(@intCast(frame_layout.stack_adjust)), + } }, + }); + self.mir_instructions.set(backpatch_stack_alloc_restore, .{ + .tag = .addi, + .ops = .rri, + .data = .{ .i_type = .{ + .rd = .sp, + .rs1 = .sp, + .imm12 = Immediate.s(@intCast(frame_layout.stack_adjust)), + } }, + }); + + if (need_save_reg) { + self.mir_instructions.set(backpatch_spill_callee_preserved_regs, .{ + .tag = .pseudo, + .ops = .pseudo_spill_regs, + .data = .{ .reg_list = frame_layout.save_reg_list }, + }); + + self.mir_instructions.set(backpatch_restore_callee_preserved_regs, .{ + .tag = .pseudo, + .ops = .pseudo_restore_regs, + .data = .{ .reg_list = frame_layout.save_reg_list }, + }); + } + } else { + try self.addPseudoNone(.pseudo_dbg_prologue_end); + try self.genBody(self.air.getMainBody()); + try self.addPseudoNone(.pseudo_dbg_epilogue_begin); + } // Drop them off at the rbrace. _ = try self.addInst(.{ - .tag = .dbg_line, - .data = .{ .dbg_line_column = .{ + .tag = .pseudo, + .ops = .pseudo_dbg_line_column, + .data = .{ .pseudo_dbg_line_column = .{ .line = self.end_di_line, .column = self.end_di_column, } }, @@ -487,18 +1047,15 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { - const mod = self.bin_file.comp.module.?; - const ip = &mod.intern_pool; + const zcu = self.bin_file.comp.module.?; + const ip = &zcu.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { - // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) - continue; + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const old_air_bookkeeping = self.air_bookkeeping; - try self.ensureProcessDeathCapacity(Liveness.bpi); - + try self.inst_tracking.ensureUnusedCapacity(self.gpa, 1); switch (air_tags[@intFromEnum(inst)]) { // zig fmt: off .ptr_add => try self.airPtrArithmetic(inst, .ptr_add), @@ -731,30 +1288,58 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .work_group_id => unreachable, // zig fmt: on } + + assert(!self.register_manager.lockedRegsExist()); + if (std.debug.runtime_safety) { if (self.air_bookkeeping < old_air_bookkeeping + 1) { std.debug.panic("in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. Look for a missing call to finishAir.", .{ inst, air_tags[@intFromEnum(inst)] }); } + + { // check consistency of tracked registers + var it = self.register_manager.free_registers.iterator(.{ .kind = .unset }); + while (it.next()) |index| { + const tracked_inst = self.register_manager.registers[index]; + const tracking = self.getResolvedInstValue(tracked_inst); + for (tracking.getRegs()) |reg| { + if (RegisterManager.indexOfRegIntoTracked(reg).? == index) break; + } else return self.fail( + \\%{} takes up these regs: {any}, however those regs don't use it + , .{ index, tracking.getRegs() }); + } + } } } } +fn getValue(self: *Self, value: MCValue, inst: ?Air.Inst.Index) !void { + for (value.getRegs()) |reg| try self.register_manager.getReg(reg, inst); +} + +fn getValueIfFree(self: *Self, value: MCValue, inst: ?Air.Inst.Index) void { + for (value.getRegs()) |reg| if (self.register_manager.isRegFree(reg)) + self.register_manager.getRegAssumeFree(reg, inst); +} + +fn freeValue(self: *Self, value: MCValue) !void { + switch (value) { + .register => |reg| self.register_manager.freeReg(reg), + .register_pair => |regs| for (regs) |reg| self.register_manager.freeReg(reg), + .register_offset => |reg_off| self.register_manager.freeReg(reg_off.reg), + else => {}, // TODO process stack allocation death + } +} + fn feed(self: *Self, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) !void { - if (bt.feed()) if (operand.toIndex()) |inst| self.processDeath(inst); + if (bt.feed()) if (operand.toIndex()) |inst| { + log.debug("feed inst: %{}", .{inst}); + try self.processDeath(inst); + }; } /// Asserts there is already capacity to insert into top branch inst_table. -fn processDeath(self: *Self, inst: Air.Inst.Index) void { - // When editing this function, note that the logic must synchronize with `reuseOperand`. - const prev_value = self.getResolvedInstValue(inst); - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - branch.inst_table.putAssumeCapacity(inst, .dead); - switch (prev_value) { - .register => |reg| { - self.register_manager.freeReg(reg); - }, - else => {}, // TODO process stack allocation death by freeing it to be reused later - } +fn processDeath(self: *Self, inst: Air.Inst.Index) !void { + try self.inst_tracking.getPtr(inst).?.die(self, inst); } /// Called when there are no operands, and the instruction is always unreferenced. @@ -769,23 +1354,12 @@ fn finishAirResult(self: *Self, inst: Air.Inst.Index, result: MCValue) void { .none, .dead, .unreach => {}, else => unreachable, // Why didn't the result die? } else { - log.debug("%{d} => {}", .{ inst, result }); - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - branch.inst_table.putAssumeCapacityNoClobber(inst, result); - - switch (result) { - .register => |reg| { - // In some cases (such as bitcast), an operand - // may be the same MCValue as the result. If - // that operand died and was a register, it - // was freed by processDeath. We have to - // "re-allocate" the register. - if (self.register_manager.isRegFree(reg)) { - self.register_manager.getRegAssumeFree(reg, inst); - } - }, - else => {}, - } + tracking_log.debug("%{d} => {} (birth)", .{ inst, result }); + self.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(result)); + // In some cases, an operand may be reused as the result. + // If that operand died and was a register, it was freed by + // processDeath, so we have to "re-allocate" the register. + self.getValueIfFree(result, inst); } self.finishAirBookkeeping(); } @@ -801,43 +1375,153 @@ fn finishAir( const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; - self.processDeath(op.toIndexAllowNone() orelse continue); + try self.processDeath(op.toIndexAllowNone() orelse continue); } self.finishAirResult(inst, result); } +const FrameLayout = struct { + stack_adjust: u32, + save_reg_list: Mir.RegisterList, +}; + +fn setFrameLoc( + self: *Self, + frame_index: FrameIndex, + base: Register, + offset: *i32, + comptime aligned: bool, +) void { + const frame_i = @intFromEnum(frame_index); + if (aligned) { + const alignment: InternPool.Alignment = self.frame_allocs.items(.abi_align)[frame_i]; + offset.* = if (math.sign(offset.*) < 0) + -1 * @as(i32, @intCast(alignment.backward(@intCast(@abs(offset.*))))) + else + @intCast(alignment.forward(@intCast(@abs(offset.*)))); + } + self.frame_locs.set(frame_i, .{ .base = base, .disp = offset.* }); + offset.* += self.frame_allocs.items(.abi_size)[frame_i]; +} + +fn computeFrameLayout(self: *Self) !FrameLayout { + const frame_allocs_len = self.frame_allocs.len; + try self.frame_locs.resize(self.gpa, frame_allocs_len); + const stack_frame_order = try self.gpa.alloc(FrameIndex, frame_allocs_len - FrameIndex.named_count); + defer self.gpa.free(stack_frame_order); + + const frame_size = self.frame_allocs.items(.abi_size); + const frame_align = self.frame_allocs.items(.abi_align); + + for (stack_frame_order, FrameIndex.named_count..) |*frame_order, frame_index| + frame_order.* = @enumFromInt(frame_index); + + { + const SortContext = struct { + frame_align: @TypeOf(frame_align), + pub fn lessThan(context: @This(), lhs: FrameIndex, rhs: FrameIndex) bool { + return context.frame_align[@intFromEnum(lhs)].compare(.gt, context.frame_align[@intFromEnum(rhs)]); + } + }; + const sort_context = SortContext{ .frame_align = frame_align }; + mem.sort(FrameIndex, stack_frame_order, sort_context, SortContext.lessThan); + } + + var save_reg_list = Mir.RegisterList{}; + for (callee_preserved_regs) |reg| { + if (self.register_manager.isRegAllocated(reg)) { + save_reg_list.push(&callee_preserved_regs, reg); + } + } + + const total_alloc_size: i32 = blk: { + var i: i32 = 0; + for (stack_frame_order) |frame_index| { + i += frame_size[@intFromEnum(frame_index)]; + } + break :blk i; + }; + const saved_reg_size = save_reg_list.size(); + + frame_size[@intFromEnum(FrameIndex.spill_frame)] = @intCast(saved_reg_size); + + // The total frame size is calculated by the amount of s registers you need to save * 8, as each + // register is 8 bytes, the total allocation sizes, and 16 more register for the spilled ra and s0 + // register. Finally we align the frame size to the align of the base pointer. + const acc_frame_size: i32 = std.mem.alignForward( + i32, + total_alloc_size + 16 + frame_size[@intFromEnum(FrameIndex.args_frame)] + frame_size[@intFromEnum(FrameIndex.spill_frame)], + @intCast(frame_align[@intFromEnum(FrameIndex.base_ptr)].toByteUnits().?), + ); + log.debug("frame size: {}", .{acc_frame_size}); + + // store the ra at total_size - 8, so it's the very first thing in the stack + // relative to the fp + self.frame_locs.set( + @intFromEnum(FrameIndex.ret_addr), + .{ .base = .sp, .disp = acc_frame_size - 8 }, + ); + self.frame_locs.set( + @intFromEnum(FrameIndex.base_ptr), + .{ .base = .sp, .disp = acc_frame_size - 16 }, + ); + + // now we grow the stack frame from the bottom of total frame in order to + // not need to know the size of the first allocation. Stack offsets point at the "bottom" + // of variables. + var s0_offset: i32 = -acc_frame_size; + self.setFrameLoc(.stack_frame, .s0, &s0_offset, true); + for (stack_frame_order) |frame_index| self.setFrameLoc(frame_index, .s0, &s0_offset, true); + self.setFrameLoc(.args_frame, .s0, &s0_offset, true); + self.setFrameLoc(.call_frame, .s0, &s0_offset, true); + self.setFrameLoc(.spill_frame, .s0, &s0_offset, true); + + return .{ + .stack_adjust = @intCast(acc_frame_size), + .save_reg_list = save_reg_list, + }; +} + fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { const table = &self.branch_stack.items[self.branch_stack.items.len - 1].inst_table; try table.ensureUnusedCapacity(self.gpa, additional_count); } -fn splitType(self: *Self, ty: Type) ![2]Type { +fn memSize(self: *Self, ty: Type) Memory.Size { const mod = self.bin_file.comp.module.?; - const classes = mem.sliceTo(&abi.classifySystemV(ty, mod), .none); + return switch (ty.zigTypeTag(mod)) { + .Float => Memory.Size.fromBitSize(ty.floatBits(self.target.*)), + else => Memory.Size.fromSize(@intCast(ty.abiSize(mod))), + }; +} + +fn splitType(self: *Self, ty: Type) ![2]Type { + const zcu = self.bin_file.comp.module.?; + const classes = mem.sliceTo(&abi.classifySystem(ty, zcu), .none); var parts: [2]Type = undefined; if (classes.len == 2) for (&parts, classes, 0..) |*part, class, part_i| { part.* = switch (class) { .integer => switch (part_i) { 0 => Type.u64, 1 => part: { - const elem_size = ty.abiAlignment(mod).minStrict(.@"8").toByteUnitsOptional().?; - const elem_ty = try mod.intType(.unsigned, @intCast(elem_size * 8)); - break :part switch (@divExact(ty.abiSize(mod) - 8, elem_size)) { + const elem_size = ty.abiAlignment(zcu).minStrict(.@"8").toByteUnits().?; + const elem_ty = try zcu.intType(.unsigned, @intCast(elem_size * 8)); + break :part switch (@divExact(ty.abiSize(zcu) - 8, elem_size)) { 1 => elem_ty, - else => |len| try mod.arrayType(.{ .len = len, .child = elem_ty.toIntern() }), + else => |len| try zcu.arrayType(.{ .len = len, .child = elem_ty.toIntern() }), }; }, else => unreachable, }, else => break, }; - } else if (parts[0].abiSize(mod) + parts[1].abiSize(mod) == ty.abiSize(mod)) return parts; - return self.fail("TODO implement splitType for {}", .{ty.fmt(mod)}); + } else if (parts[0].abiSize(zcu) + parts[1].abiSize(zcu) == ty.abiSize(zcu)) return parts; + return self.fail("TODO implement splitType for {}", .{ty.fmt(zcu)}); } fn symbolIndex(self: *Self) !u32 { - const mod = self.bin_file.comp.module.?; - const decl_index = mod.funcOwnerDeclIndex(self.func_index); + const zcu = self.bin_file.comp.module.?; + const decl_index = zcu.funcOwnerDeclIndex(self.func_index); return switch (self.bin_file.tag) { .elf => blk: { const elf_file = self.bin_file.cast(link.File.Elf).?; @@ -848,41 +1532,49 @@ fn symbolIndex(self: *Self) !u32 { }; } -fn allocMem(self: *Self, inst: ?Air.Inst.Index, abi_size: u32, abi_align: Alignment) !u32 { - self.stack_align = self.stack_align.max(abi_align); - // TODO find a free slot instead of always appending - const offset: u32 = @intCast(abi_align.forward(self.next_stack_offset)); - self.next_stack_offset = offset + abi_size; - if (self.next_stack_offset > self.max_end_stack) - self.max_end_stack = self.next_stack_offset; - try self.stack.putNoClobber(self.gpa, offset, .{ - .inst = inst, - .size = abi_size, - }); - return offset; +fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { + const frame_allocs_slice = self.frame_allocs.slice(); + const frame_size = frame_allocs_slice.items(.abi_size); + const frame_align = frame_allocs_slice.items(.abi_align); + + const stack_frame_align = &frame_align[@intFromEnum(FrameIndex.stack_frame)]; + stack_frame_align.* = stack_frame_align.max(alloc.abi_align); + + for (self.free_frame_indices.keys(), 0..) |frame_index, free_i| { + const abi_size = frame_size[@intFromEnum(frame_index)]; + if (abi_size != alloc.abi_size) continue; + const abi_align = &frame_align[@intFromEnum(frame_index)]; + abi_align.* = abi_align.max(alloc.abi_align); + + _ = self.free_frame_indices.swapRemoveAt(free_i); + return frame_index; + } + const frame_index: FrameIndex = @enumFromInt(self.frame_allocs.len); + try self.frame_allocs.append(self.gpa, alloc); + log.debug("allocated frame {}", .{frame_index}); + return frame_index; } /// Use a pointer instruction as the basis for allocating stack memory. -fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const mod = self.bin_file.comp.module.?; - const elem_ty = self.typeOfIndex(inst).childType(mod); - const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); - }; - // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(mod); - return self.allocMem(inst, abi_size, abi_align); +fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { + const zcu = self.bin_file.comp.module.?; + const ptr_ty = self.typeOfIndex(inst); + const val_ty = ptr_ty.childType(zcu); + return self.allocFrameIndex(FrameAlloc.init(.{ + .size = math.cast(u32, val_ty.abiSize(zcu)) orelse { + return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(zcu)}); + }, + .alignment = ptr_ty.ptrAlignment(zcu).max(.@"1"), + })); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const elem_ty = self.typeOfIndex(inst); - const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); + const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse { + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(zcu)}); }; - const abi_align = elem_ty.abiAlignment(mod); - self.stack_align = self.stack_align.max(abi_align); if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. @@ -894,8 +1586,9 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { } } } - const stack_offset = try self.allocMem(inst, abi_size, abi_align); - return .{ .stack_offset = stack_offset }; + + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(elem_ty, zcu)); + return .{ .load_frame = .{ .index = frame_index } }; } /// Allocates a register from the general purpose set and returns the Register and the Lock. @@ -938,19 +1631,12 @@ fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Regi } pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; - const elem_ty = self.typeOfIndex(inst); - - // there isn't anything to spill - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return; - - const stack_mcv = try self.allocRegOrMem(inst, false); - log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv }); - const reg_mcv = self.getResolvedInstValue(inst); - assert(reg == reg_mcv.register); - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + const tracking = self.inst_tracking.getPtr(inst) orelse return; + for (tracking.getRegs()) |tracked_reg| { + if (tracked_reg.id() == reg.id()) break; + } else unreachable; // spilled reg not tracked with spilled instruciton + try tracking.spill(self, inst); + try tracking.trackSpill(self, inst); } /// Copies a value to a register without tracking the register. The register is not considered @@ -972,39 +1658,48 @@ fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCVa } fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { - const stack_offset = try self.allocMemPtr(inst); - log.debug("airAlloc offset: {}", .{stack_offset}); - return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none }); + const result = MCValue{ .lea_frame = .{ .index = try self.allocMemPtr(inst) } }; + return self.finishAir(inst, result, .{ .none, .none, .none }); } fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { - const stack_offset = try self.allocMemPtr(inst); - return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none }); + const result: MCValue = switch (self.ret_mcv.long) { + else => unreachable, + .none => .{ .lea_frame = .{ .index = try self.allocMemPtr(inst) } }, + .load_frame => .{ .register_offset = .{ + .reg = (try self.copyToNewRegister( + inst, + self.ret_mcv.long, + )).register, + .off = self.ret_mcv.short.indirect.off, + } }, + }; + return self.finishAir(inst, result, .{ .none, .none, .none }); } fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFptrunc for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airFptrunc for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airFpext(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFpext for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airFpext for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const src_ty = self.typeOf(ty_op.operand); const dst_ty = self.typeOfIndex(inst); const result: MCValue = result: { - const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod)); + const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu)); - const src_int_info = src_ty.intInfo(mod); - const dst_int_info = dst_ty.intInfo(mod); + const src_int_info = src_ty.intInfo(zcu); + const dst_int_info = dst_ty.intInfo(zcu); const extend = switch (src_int_info.signedness) { .signed => dst_int_info, .unsigned => src_int_info, @@ -1019,7 +1714,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const src_storage_bits: u16 = switch (src_mcv) { .register => 64, - .stack_offset => src_int_info.bits, + .load_frame => src_int_info.bits, else => return self.fail("airIntCast from {s}", .{@tagName(src_mcv)}), }; @@ -1042,7 +1737,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; } orelse return self.fail("TODO implement airIntCast from {} to {}", .{ - src_ty.fmt(mod), dst_ty.fmt(mod), + src_ty.fmt(zcu), dst_ty.fmt(zcu), }); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1051,7 +1746,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; if (self.liveness.isUnused(inst)) - return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); + return self.finishAir(inst, .unreach, .{ ty_op.operand, .none, .none }); const operand = try self.resolveInst(ty_op.operand); _ = operand; @@ -1062,19 +1757,19 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else operand; + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else operand; return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const mod = self.bin_file.comp.module.?; + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { + const zcu = self.bin_file.comp.module.?; const operand = try self.resolveInst(ty_op.operand); const ty = self.typeOf(ty_op.operand); - switch (ty.zigTypeTag(mod)) { + switch (ty.zigTypeTag(zcu)) { .Bool => { const operand_reg = blk: { if (operand == .register) break :blk operand.register; @@ -1089,6 +1784,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { _ = try self.addInst(.{ .tag = .not, + .ops = .rr, .data = .{ .rr = .{ .rs = operand_reg, @@ -1108,20 +1804,20 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { fn airMin(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement min for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement min for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airMax(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement max for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement max for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement slice for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -1132,7 +1828,7 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const lhs_ty = self.typeOf(bin_op.lhs); const rhs_ty = self.typeOf(bin_op.rhs); - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -1175,7 +1871,8 @@ fn binOp( lhs_ty: Type, rhs_ty: Type, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; + switch (tag) { // Arithmetic operations on integers and floats .add, @@ -1188,12 +1885,12 @@ fn binOp( .cmp_lt, .cmp_lte, => { - switch (lhs_ty.zigTypeTag(mod)) { + switch (lhs_ty.zigTypeTag(zcu)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(mod); + assert(lhs_ty.eql(rhs_ty, zcu)); + const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 64) { if (rhs == .immediate and supportImmediate(tag)) { return self.binOpImm(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); @@ -1210,14 +1907,14 @@ fn binOp( .ptr_add, .ptr_sub, => { - switch (lhs_ty.zigTypeTag(mod)) { + switch (lhs_ty.zigTypeTag(zcu)) { .Pointer => { const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize(mod)) { - .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type - else => ptr_ty.childType(mod), + const elem_ty = switch (ptr_ty.ptrSize(zcu)) { + .One => ptr_ty.childType(zcu).childType(zcu), // ptr to array, so get array element type + else => ptr_ty.childType(zcu), }; - const elem_size = elem_ty.abiSize(mod); + const elem_size = elem_ty.abiSize(zcu); if (elem_size == 1) { const base_tag: Air.Inst.Tag = switch (tag) { @@ -1256,11 +1953,11 @@ fn binOp( .shr, .shl, => { - switch (lhs_ty.zigTypeTag(mod)) { + switch (lhs_ty.zigTypeTag(zcu)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(mod); + const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 64) { if (rhs == .immediate) { return self.binOpImm(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); @@ -1332,6 +2029,7 @@ fn binOpRegister( _ = try self.addInst(.{ .tag = mir_tag, + .ops = .rrr, .data = .{ .r_type = .{ .rd = dest_reg, @@ -1402,24 +2100,26 @@ fn binOpImm( => { _ = try self.addInst(.{ .tag = mir_tag, + .ops = .rri, .data = .{ .i_type = .{ .rd = dest_reg, .rs1 = lhs_reg, - .imm12 = math.cast(i12, rhs.immediate) orelse { + .imm12 = Immediate.s(math.cast(i12, rhs.immediate) orelse { return self.fail("TODO: binOpImm larger than i12 i_type payload", .{}); - }, + }), } }, }); }, .addiw => { _ = try self.addInst(.{ .tag = mir_tag, + .ops = .rri, .data = .{ .i_type = .{ .rd = dest_reg, .rs1 = lhs_reg, - .imm12 = -(math.cast(i12, rhs.immediate) orelse { + .imm12 = Immediate.s(-(math.cast(i12, rhs.immediate) orelse { return self.fail("TODO: binOpImm larger than i12 i_type payload", .{}); - }), + })), } }, }); }, @@ -1428,6 +2128,7 @@ fn binOpImm( _ = try self.addInst(.{ .tag = mir_tag, + .ops = .rrr, .data = .{ .r_type = .{ .rd = dest_reg, .rs1 = imm_reg, @@ -1449,8 +2150,8 @@ fn binOpMir( dst_mcv: MCValue, src_mcv: MCValue, ) !void { - const mod = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(ty.abiSize(mod)); + const zcu = self.bin_file.comp.module.?; + const abi_size: u32 = @intCast(ty.abiSize(zcu)); _ = abi_size; _ = maybe_inst; @@ -1461,6 +2162,7 @@ fn binOpMir( _ = try self.addInst(.{ .tag = mir_tag, + .ops = .rrr, .data = .{ .r_type = .{ .rd = dst_reg, @@ -1483,25 +2185,25 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void const lhs_ty = self.typeOf(bin_op.lhs); const rhs_ty = self.typeOf(bin_op.rhs); - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airAddWrap(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement addwrap for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement addwrap for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement add_sat for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement add_sat for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airSubWrap(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { // RISCV arthemtic instructions already wrap, so this is simply a sub binOp with // no overflow checks. const lhs = try self.resolveInst(bin_op.lhs); @@ -1516,34 +2218,34 @@ fn airSubWrap(self: *Self, inst: Air.Inst.Index) !void { fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement sub_sat for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement sub_sat for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airMul(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement mul for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement mul for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airMulWrap(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement mulwrap for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement mulwrap for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement mul_sat for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement mul_sat for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.typeOf(extra.lhs); @@ -1554,16 +2256,21 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { defer self.register_manager.unlockReg(add_result_lock); const tuple_ty = self.typeOfIndex(inst); - const int_info = lhs_ty.intInfo(mod); + const int_info = lhs_ty.intInfo(zcu); // TODO: optimization, set this to true. needs the other struct access stuff to support // accessing registers. const result_mcv = try self.allocRegOrMem(inst, false); - const offset = result_mcv.stack_offset; + const offset = result_mcv.load_frame; - const result_offset = tuple_ty.structFieldOffset(0, mod) + offset; - - try self.genSetStack(lhs_ty, @intCast(result_offset), add_result_mcv); + try self.genSetStack( + lhs_ty, + .{ + .index = offset.index, + .off = offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))), + }, + add_result_mcv, + ); if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { if (int_info.signedness == .unsigned) { @@ -1585,10 +2292,11 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = try self.addInst(.{ .tag = .andi, + .ops = .rri, .data = .{ .i_type = .{ .rd = overflow_reg, .rs1 = add_reg, - .imm12 = @intCast(max_val), + .imm12 = Immediate.s(max_val), } }, }); @@ -1601,8 +2309,14 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { lhs_ty, ); - const overflow_offset = tuple_ty.structFieldOffset(1, mod) + offset; - try self.genSetStack(Type.u1, @intCast(overflow_offset), overflow_mcv); + try self.genSetStack( + Type.u1, + .{ + .index = offset.index, + .off = offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), + }, + overflow_mcv, + ); break :result result_mcv; }, @@ -1629,18 +2343,18 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { //const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const mod = self.bin_file.comp.module.?; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const zcu = self.bin_file.comp.module.?; + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.typeOf(extra.lhs); const rhs_ty = self.typeOf(extra.rhs); - switch (lhs_ty.zigTypeTag(mod)) { + switch (lhs_ty.zigTypeTag(zcu)) { else => |x| return self.fail("TODO: airMulWithOverflow {s}", .{@tagName(x)}), .Int => { - assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(mod); + assert(lhs_ty.eql(rhs_ty, zcu)); + const int_info = lhs_ty.intInfo(zcu); switch (int_info.bits) { 1...32 => { if (self.hasFeature(.m)) { @@ -1654,11 +2368,11 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { // TODO: optimization, set this to true. needs the other struct access stuff to support // accessing registers. const result_mcv = try self.allocRegOrMem(inst, false); - const offset = result_mcv.stack_offset; - const result_offset = tuple_ty.structFieldOffset(0, mod) + offset; + const result_off: i32 = @intCast(tuple_ty.structFieldOffset(0, zcu)); + const overflow_off: i32 = @intCast(tuple_ty.structFieldOffset(1, zcu)); - try self.genSetStack(lhs_ty, @intCast(result_offset), dest); + try self.genSetStack(lhs_ty, result_mcv.offset(result_off).load_frame, dest); if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { if (int_info.signedness == .unsigned) { @@ -1680,10 +2394,11 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = try self.addInst(.{ .tag = .andi, + .ops = .rri, .data = .{ .i_type = .{ .rd = overflow_reg, .rs1 = add_reg, - .imm12 = @intCast(max_val), + .imm12 = Immediate.s(max_val), } }, }); @@ -1696,8 +2411,11 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { lhs_ty, ); - const overflow_offset = tuple_ty.structFieldOffset(1, mod) + offset; - try self.genSetStack(Type.u1, @intCast(overflow_offset), overflow_mcv); + try self.genSetStack( + lhs_ty, + result_mcv.offset(overflow_off).load_frame, + overflow_mcv, + ); break :result result_mcv; }, @@ -1730,43 +2448,43 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { fn airDiv(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement div for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement div for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airRem(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement rem for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement rem for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airMod(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement mod for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement zcu for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement bitwise and for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement bitwise and for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airBitOr(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement bitwise or for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement bitwise or for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airXor(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement xor for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement xor for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airShl(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.typeOf(bin_op.lhs); @@ -1779,52 +2497,52 @@ fn airShl(self: *Self, inst: Air.Inst.Index) !void { fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airShr(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shr for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement shr for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .optional_payload for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement .optional_payload for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .optional_payload_ptr_set for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement .optional_payload_ptr_set for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const err_union_ty = self.typeOf(ty_op.operand); - const err_ty = err_union_ty.errorUnionSet(mod); - const payload_ty = err_union_ty.errorUnionPayload(mod); + const err_ty = err_union_ty.errorUnionSet(zcu); + const payload_ty = err_union_ty.errorUnionPayload(zcu); const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { - if (err_ty.errorSetIsEmpty(mod)) { + if (err_ty.errorSetIsEmpty(zcu)) { break :result .{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { break :result operand; } - const err_off: u32 = @intCast(errUnionErrorOffset(payload_ty, mod)); + const err_off: u32 = @intCast(errUnionErrorOffset(payload_ty, zcu)); switch (operand) { .register => |reg| { @@ -1865,16 +2583,18 @@ fn genUnwrapErrUnionPayloadMir( err_union_ty: Type, err_union: MCValue, ) !MCValue { - const mod = self.bin_file.comp.module.?; - - const payload_ty = err_union_ty.errorUnionPayload(mod); + const zcu = self.bin_file.comp.module.?; + const payload_ty = err_union_ty.errorUnionPayload(zcu); const result: MCValue = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none; - const payload_off: u32 = @intCast(errUnionPayloadOffset(payload_ty, mod)); + const payload_off: u31 = @intCast(errUnionPayloadOffset(payload_ty, zcu)); switch (err_union) { - .stack_offset => |off| break :result .{ .stack_offset = off + payload_off }, + .load_frame => |frame_addr| break :result .{ .load_frame = .{ + .index = frame_addr.index, + .off = frame_addr.off + payload_off, + } }, .register => |reg| { const eu_lock = self.register_manager.lockReg(reg); defer if (eu_lock) |lock| self.register_manager.unlockReg(lock); @@ -1904,26 +2624,26 @@ fn genUnwrapErrUnionPayloadMir( // *(E!T) -> E fn airUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement unwrap error union error ptr for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement unwrap error union error ptr for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } // *(E!T) -> *T fn airUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement unwrap error union payload ptr for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement unwrap error union payload ptr for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .errunion_payload_ptr_set for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement .errunion_payload_ptr_set for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) - .dead + .unreach else return self.fail("TODO implement airErrReturnTrace for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ .none, .none, .none }); @@ -1941,12 +2661,12 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const mod = self.bin_file.comp.module.?; + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { + const zcu = self.bin_file.comp.module.?; const optional_ty = self.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true - if (optional_ty.abiSize(mod) == 1) + if (optional_ty.abiSize(zcu) == 1) break :result MCValue{ .immediate = 1 }; return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}); @@ -1957,29 +2677,29 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement wrap errunion payload for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement wrap errunion payload for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const eu_ty = ty_op.ty.toType(); - const pl_ty = eu_ty.errorUnionPayload(mod); - const err_ty = eu_ty.errorUnionSet(mod); + const pl_ty = eu_ty.errorUnionPayload(zcu); + const err_ty = eu_ty.errorUnionSet(zcu); const result: MCValue = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result try self.resolveInst(ty_op.operand); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result try self.resolveInst(ty_op.operand); - const stack_off = try self.allocMem(null, @intCast(eu_ty.abiSize(mod)), eu_ty.abiAlignment(mod)); - const pl_off: u32 = @intCast(errUnionPayloadOffset(pl_ty, mod)); - const err_off: u32 = @intCast(errUnionErrorOffset(pl_ty, mod)); - try self.genSetStack(pl_ty, stack_off + pl_off, .undef); + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu)); + const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu)); + const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu)); + try self.genSetStack(pl_ty, .{ .index = frame_index, .off = pl_off }, .undef); const operand = try self.resolveInst(ty_op.operand); - try self.genSetStack(err_ty, stack_off + err_off, operand); - break :result .{ .stack_offset = stack_off }; + try self.genSetStack(err_ty, .{ .index = frame_index, .off = err_off }, operand); + break :result .{ .load_frame = .{ .index = frame_index } }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } @@ -2001,67 +2721,41 @@ fn genTry( operand_ty: Type, operand_is_ptr: bool, ) !MCValue { - const liveness_condbr = self.liveness.getCondBr(inst); - _ = operand_is_ptr; + const liveness_cond_br = self.liveness.getCondBr(inst); + const operand_mcv = try self.resolveInst(operand); const is_err_mcv = try self.isErr(null, operand_ty, operand_mcv); - const cond_reg = try self.register_manager.allocReg(inst, gp); - const cond_reg_lock = self.register_manager.lockRegAssumeUnused(cond_reg); - defer self.register_manager.unlockReg(cond_reg_lock); - // A branch to the false section. Uses beq. 1 is the default "true" state. - const reloc = try self.condBr(Type.anyerror, is_err_mcv, cond_reg); + const reloc = try self.condBr(Type.anyerror, is_err_mcv); if (self.liveness.operandDies(inst, 0)) { - if (operand.toIndex()) |op_inst| self.processDeath(op_inst); + if (operand.toIndex()) |operand_inst| try self.processDeath(operand_inst); } - // Save state - const parent_next_stack_offset = self.next_stack_offset; - const parent_free_registers = self.register_manager.free_registers; - var parent_stack = try self.stack.clone(self.gpa); - defer parent_stack.deinit(self.gpa); - const parent_registers = self.register_manager.registers; - - try self.branch_stack.append(.{}); - errdefer { - _ = self.branch_stack.pop(); - } - - try self.ensureProcessDeathCapacity(liveness_condbr.else_deaths.len); - for (liveness_condbr.else_deaths) |op| { - self.processDeath(op); - } + self.scope_generation += 1; + const state = try self.saveState(); + for (liveness_cond_br.else_deaths) |death| try self.processDeath(death); try self.genBody(body); + try self.restoreState(state, &.{}, .{ + .emit_instructions = false, + .update_tracking = true, + .resurrect = true, + .close_scope = true, + }); - // Restore state - var saved_then_branch = self.branch_stack.pop(); - defer saved_then_branch.deinit(self.gpa); + self.performReloc(reloc); - self.register_manager.registers = parent_registers; - - self.stack.deinit(self.gpa); - self.stack = parent_stack; - parent_stack = .{}; - - self.next_stack_offset = parent_next_stack_offset; - self.register_manager.free_registers = parent_free_registers; - - try self.performReloc(reloc, @intCast(self.mir_instructions.len)); - - try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len); - for (liveness_condbr.then_deaths) |op| { - self.processDeath(op); - } + for (liveness_cond_br.then_deaths) |death| try self.processDeath(death); const result = if (self.liveness.isUnused(inst)) .unreach else try self.genUnwrapErrUnionPayloadMir(operand_ty, operand_mcv); + return result; } @@ -2081,11 +2775,14 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void { fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const src_mcv = try self.resolveInst(ty_op.operand); switch (src_mcv) { - .stack_offset => |off| { - const len_mcv: MCValue = .{ .stack_offset = off + 8 }; + .load_frame => |frame_addr| { + const len_mcv: MCValue = .{ .load_frame = .{ + .index = frame_addr.index, + .off = frame_addr.off + 8, + } }; if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result len_mcv; const dst_mcv = try self.allocRegOrMem(inst, true); @@ -2109,29 +2806,33 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_slice_len_ptr for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement ptr_slice_len_ptr for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_slice_ptr_ptr for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement ptr_slice_ptr_ptr for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - if (!is_volatile and self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); + if (!is_volatile and self.liveness.isUnused(inst)) return self.finishAir( + inst, + .unreach, + .{ bin_op.lhs, bin_op.rhs, .none }, + ); const result: MCValue = result: { const slice_mcv = try self.resolveInst(bin_op.lhs); const index_mcv = try self.resolveInst(bin_op.rhs); const slice_ty = self.typeOf(bin_op.lhs); - const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(zcu); const index_lock: ?RegisterLock = if (index_mcv == .register) self.register_manager.lockRegAssumeUnused(index_mcv.register) @@ -2140,7 +2841,9 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { defer if (index_lock) |reg| self.register_manager.unlockReg(reg); const base_mcv: MCValue = switch (slice_mcv) { - .stack_offset => |off| .{ .register = try self.copyToTmpRegister(slice_ptr_field_type, .{ .stack_offset = off }) }, + .load_frame, + .load_symbol, + => .{ .register = try self.copyToTmpRegister(slice_ptr_field_type, slice_mcv) }, else => return self.fail("TODO slice_elem_val when slice is {}", .{slice_mcv}), }; @@ -2156,38 +2859,34 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice_elem_ptr for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement slice_elem_ptr for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const array_ty = self.typeOf(bin_op.lhs); const array_mcv = try self.resolveInst(bin_op.lhs); const index_mcv = try self.resolveInst(bin_op.rhs); const index_ty = self.typeOf(bin_op.rhs); - const elem_ty = array_ty.childType(mod); - const elem_abi_size = elem_ty.abiSize(mod); + const elem_ty = array_ty.childType(zcu); + const elem_abi_size = elem_ty.abiSize(zcu); const addr_reg, const addr_reg_lock = try self.allocReg(); defer self.register_manager.unlockReg(addr_reg_lock); switch (array_mcv) { .register => { - const stack_offset = try self.allocMem( - null, - @intCast(array_ty.abiSize(mod)), - array_ty.abiAlignment(mod), - ); - try self.genSetStack(array_ty, stack_offset, array_mcv); - try self.genSetReg(Type.usize, addr_reg, .{ .ptr_stack_offset = stack_offset }); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, zcu)); + try self.genSetStack(array_ty, .{ .index = frame_index }, array_mcv); + try self.genSetReg(Type.usize, addr_reg, .{ .lea_frame = .{ .index = frame_index } }); }, - .stack_offset => |off| { - try self.genSetReg(Type.usize, addr_reg, .{ .ptr_stack_offset = off }); + .load_frame => |frame_addr| { + try self.genSetReg(Type.usize, addr_reg, .{ .lea_frame = frame_addr }); }, else => try self.genSetReg(Type.usize, addr_reg, array_mcv.address()), } @@ -2213,14 +2912,14 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_elem_val for {}", .{self.target.cpu.arch}); + const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement ptr_elem_val for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_elem_ptr for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement ptr_elem_ptr for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } @@ -2233,19 +2932,19 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airGetUnionTag for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airGetUnionTag for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airClz(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airClz for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airClz for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airCtz(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -2270,8 +2969,8 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { } fn ctz(self: *Self, src: Register, dst: Register, ty: Type) !void { - const mod = self.bin_file.comp.module.?; - const length = (ty.abiSize(mod) * 8) - 1; + const zcu = self.bin_file.comp.module.?; + const length = (ty.abiSize(zcu) * 8) - 1; const count_reg, const count_lock = try self.allocReg(); defer self.register_manager.unlockReg(count_lock); @@ -2282,17 +2981,6 @@ fn ctz(self: *Self, src: Register, dst: Register, ty: Type) !void { try self.genSetReg(Type.usize, count_reg, .{ .immediate = 0 }); try self.genSetReg(Type.usize, len_reg, .{ .immediate = length }); - _ = try self.addInst(.{ - .tag = .beq, - .data = .{ - .b_type = .{ - .rs1 = count_reg, - .rs2 = len_reg, - .inst = @intCast(self.mir_instructions.len + 0), - }, - }, - }); - _ = src; _ = dst; @@ -2301,23 +2989,23 @@ fn ctz(self: *Self, src: Register, dst: Register, ty: Type) !void { fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airPopcount for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airPopcount for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airAbs(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const ty = self.typeOf(ty_op.operand); - const scalar_ty = ty.scalarType(mod); + const scalar_ty = ty.scalarType(zcu); const operand = try self.resolveInst(ty_op.operand); - switch (scalar_ty.zigTypeTag(mod)) { - .Int => if (ty.zigTypeTag(mod) == .Vector) { - return self.fail("TODO implement airAbs for {}", .{ty.fmt(mod)}); + switch (scalar_ty.zigTypeTag(zcu)) { + .Int => if (ty.zigTypeTag(zcu) == .Vector) { + return self.fail("TODO implement airAbs for {}", .{ty.fmt(zcu)}); } else { - const int_bits = ty.intInfo(mod).bits; + const int_bits = ty.intInfo(zcu).bits; if (int_bits > 32) { return self.fail("TODO: airAbs for larger than 32 bits", .{}); @@ -2330,18 +3018,19 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void { _ = try self.addInst(.{ .tag = .abs, + .ops = .rri, .data = .{ .i_type = .{ .rs1 = src_mcv.register, .rd = temp_reg, - .imm12 = @intCast(int_bits - 1), + .imm12 = Immediate.s(int_bits - 1), }, }, }); break :result src_mcv; }, - else => return self.fail("TODO: implement airAbs {}", .{scalar_ty.fmt(mod)}), + else => return self.fail("TODO: implement airAbs {}", .{scalar_ty.fmt(zcu)}), } }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -2349,12 +3038,12 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void { fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const mod = self.bin_file.comp.module.?; + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { + const zcu = self.bin_file.comp.module.?; const ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const int_bits = ty.intInfo(mod).bits; + const int_bits = ty.intInfo(zcu).bits; // bytes are no-op if (int_bits == 8 and self.reuseOperand(inst, ty_op.operand, 0, operand)) { @@ -2372,14 +3061,16 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { assert(temp == .register); _ = try self.addInst(.{ .tag = .slli, + .ops = .rri, .data = .{ .i_type = .{ - .imm12 = 8, + .imm12 = Immediate.s(8), .rd = dest_reg, .rs1 = dest_reg, } }, }); _ = try self.addInst(.{ .tag = .@"or", + .ops = .rri, .data = .{ .r_type = .{ .rd = dest_reg, .rs1 = dest_reg, @@ -2397,62 +3088,78 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airBitReverse for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airBitReverse for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) - .dead + .unreach else return self.fail("TODO implement airUnaryMath for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ un_op, .none, .none }); } -fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool { +fn reuseOperand( + self: *Self, + inst: Air.Inst.Index, + operand: Air.Inst.Ref, + op_index: Liveness.OperandInt, + mcv: MCValue, +) bool { + return self.reuseOperandAdvanced(inst, operand, op_index, mcv, inst); +} + +fn reuseOperandAdvanced( + self: *Self, + inst: Air.Inst.Index, + operand: Air.Inst.Ref, + op_index: Liveness.OperandInt, + mcv: MCValue, + maybe_tracked_inst: ?Air.Inst.Index, +) bool { if (!self.liveness.operandDies(inst, op_index)) return false; switch (mcv) { - .register => |reg| { - // If it's in the registers table, need to associate the register with the + .register, + .register_pair, + => for (mcv.getRegs()) |reg| { + // If it's in the registers table, need to associate the register(s) with the // new instruction. - if (RegisterManager.indexOfRegIntoTracked(reg)) |index| { + if (maybe_tracked_inst) |tracked_inst| { if (!self.register_manager.isRegFree(reg)) { - self.register_manager.registers[index] = inst; + if (RegisterManager.indexOfRegIntoTracked(reg)) |index| { + self.register_manager.registers[index] = tracked_inst; + } } - } - log.debug("%{d} => {} (reused)", .{ inst, reg }); - }, - .stack_offset => |off| { - log.debug("%{d} => stack offset {d} (reused)", .{ inst, off }); + } else self.register_manager.freeReg(reg); }, + .load_frame => |frame_addr| if (frame_addr.index.isNamed()) return false, else => return false, } // Prevent the operand deaths processing code from deallocating it. self.liveness.clearOperandDeath(inst, op_index); - - // That makes us responsible for doing the rest of the stuff that processDeath would have done. - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - branch.inst_table.putAssumeCapacity(operand.toIndex().?, .dead); + const op_inst = operand.toIndex().?; + self.getResolvedInstValue(op_inst).reuse(self, maybe_tracked_inst, op_inst); return true; } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits(mod)) + if (!elem_ty.hasRuntimeBits(zcu)) break :result .none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(zcu); if (self.liveness.isUnused(inst) and !is_volatile) - break :result .dead; + break :result .unreach; const dst_mcv: MCValue = blk: { if (self.reuseOperand(inst, ty_op.operand, 0, ptr)) { @@ -2462,6 +3169,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; + try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); break :result dst_mcv; }; @@ -2469,10 +3177,10 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { } fn load(self: *Self, dst_mcv: MCValue, ptr_mcv: MCValue, ptr_ty: Type) InnerError!void { - const mod = self.bin_file.comp.module.?; - const dst_ty = ptr_ty.childType(mod); + const zcu = self.bin_file.comp.module.?; + const dst_ty = ptr_ty.childType(zcu); - log.debug("loading {}:{} into {}", .{ ptr_mcv, ptr_ty.fmt(mod), dst_mcv }); + log.debug("loading {}:{} into {}", .{ ptr_mcv, ptr_ty.fmt(zcu), dst_mcv }); switch (ptr_mcv) { .none, @@ -2480,19 +3188,20 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_mcv: MCValue, ptr_ty: Type) InnerErro .unreach, .dead, .register_pair, + .reserved_frame, => unreachable, // not a valid pointer .immediate, .register, .register_offset, - .ptr_stack_offset, - .addr_symbol, + .lea_frame, + .lea_symbol, => try self.genCopy(dst_ty, dst_mcv, ptr_mcv.deref()), .memory, .indirect, .load_symbol, - .stack_offset, + .load_frame, => { const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv); const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); @@ -2518,14 +3227,14 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { try self.store(ptr, value, ptr_ty, value_ty); - return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); + return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); } /// Loads `value` into the "payload" of `pointer`. fn store(self: *Self, ptr_mcv: MCValue, src_mcv: MCValue, ptr_ty: Type, src_ty: Type) !void { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; - log.debug("storing {}:{} in {}:{}", .{ src_mcv, src_ty.fmt(mod), ptr_mcv, ptr_ty.fmt(mod) }); + log.debug("storing {}:{} in {}:{}", .{ src_mcv, src_ty.fmt(zcu), ptr_mcv, ptr_ty.fmt(zcu) }); switch (ptr_mcv) { .none => unreachable, @@ -2533,18 +3242,19 @@ fn store(self: *Self, ptr_mcv: MCValue, src_mcv: MCValue, ptr_ty: Type, src_ty: .unreach => unreachable, .dead => unreachable, .register_pair => unreachable, + .reserved_frame => unreachable, .immediate, .register, .register_offset, - .addr_symbol, - .ptr_stack_offset, + .lea_symbol, + .lea_frame, => try self.genCopy(src_ty, ptr_mcv.deref(), src_mcv), .memory, .indirect, .load_symbol, - .stack_offset, + .load_frame, => { const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv); const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); @@ -2570,24 +3280,24 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { } fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const ptr_field_ty = self.typeOfIndex(inst); const ptr_container_ty = self.typeOf(operand); - const ptr_container_ty_info = ptr_container_ty.ptrInfo(mod); - const container_ty = ptr_container_ty.childType(mod); + const ptr_container_ty_info = ptr_container_ty.ptrInfo(zcu); + const container_ty = ptr_container_ty.childType(zcu); - const field_offset: i32 = if (mod.typeToPackedStruct(container_ty)) |struct_obj| - if (ptr_field_ty.ptrInfo(mod).packed_offset.host_size == 0) - @divExact(mod.structPackedFieldBitOffset(struct_obj, index) + + const field_offset: i32 = if (zcu.typeToPackedStruct(container_ty)) |struct_obj| + if (ptr_field_ty.ptrInfo(zcu).packed_offset.host_size == 0) + @divExact(zcu.structPackedFieldBitOffset(struct_obj, index) + ptr_container_ty_info.packed_offset.bit_offset, 8) else 0 else - @intCast(container_ty.structFieldOffset(index, mod)); + @intCast(container_ty.structFieldOffset(index, zcu)); const src_mcv = try self.resolveInst(operand); const dst_mcv = if (switch (src_mcv) { - .immediate, .ptr_stack_offset => true, + .immediate, .lea_frame => true, .register, .register_offset => self.reuseOperand(inst, operand, 0, src_mcv), else => false, }) src_mcv else try self.copyToNewRegister(inst, src_mcv); @@ -2595,21 +3305,24 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde } fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.comp.module.?; + const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; const operand = extra.struct_operand; const index = extra.field_index; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const mod = self.bin_file.comp.module.?; + + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { + const zcu = self.bin_file.comp.module.?; const src_mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); - const field_ty = struct_ty.structFieldType(index, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; + const field_ty = struct_ty.structFieldType(index, zcu); + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none; - const field_off: u32 = switch (struct_ty.containerLayout(mod)) { - .auto, .@"extern" => @intCast(struct_ty.structFieldOffset(index, mod) * 8), - .@"packed" => if (mod.typeToStruct(struct_ty)) |struct_type| - mod.structPackedFieldBitOffset(struct_type, index) + const field_off: u32 = switch (struct_ty.containerLayout(zcu)) { + .auto, .@"extern" => @intCast(struct_ty.structFieldOffset(index, zcu) * 8), + .@"packed" => if (zcu.typeToStruct(struct_ty)) |struct_type| + zcu.structPackedFieldBitOffset(struct_type, index) else 0, }; @@ -2632,13 +3345,12 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { if (field_off > 0) { _ = try self.addInst(.{ .tag = .srli, - .data = .{ - .i_type = .{ - .imm12 = @intCast(field_off), - .rd = dst_reg, - .rs1 = dst_reg, - }, - }, + .ops = .rri, + .data = .{ .i_type = .{ + .imm12 = Immediate.s(@intCast(field_off)), + .rd = dst_reg, + .rs1 = dst_reg, + } }, }); return self.fail("TODO: airStructFieldVal register with field_off > 0", .{}); @@ -2646,10 +3358,49 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { break :result if (field_off == 0) dst_mcv else try self.copyToNewRegister(inst, dst_mcv); }, - .stack_offset => |off| { - log.debug("airStructFieldVal off: {}", .{field_off}); - const field_byte_off: u32 = @divExact(field_off, 8); - break :result MCValue{ .stack_offset = off + field_byte_off }; + .load_frame => { + const field_abi_size: u32 = @intCast(field_ty.abiSize(mod)); + if (field_off % 8 == 0) { + const field_byte_off = @divExact(field_off, 8); + const off_mcv = src_mcv.address().offset(@intCast(field_byte_off)).deref(); + const field_bit_size = field_ty.bitSize(mod); + + if (field_abi_size <= 8) { + const int_ty = try mod.intType( + if (field_ty.isAbiInt(mod)) field_ty.intInfo(mod).signedness else .unsigned, + @intCast(field_bit_size), + ); + + const dst_reg, const dst_lock = try self.allocReg(); + const dst_mcv = MCValue{ .register = dst_reg }; + defer self.register_manager.unlockReg(dst_lock); + + try self.genCopy(int_ty, dst_mcv, off_mcv); + break :result try self.copyToNewRegister(inst, dst_mcv); + } + + const container_abi_size: u32 = @intCast(struct_ty.abiSize(mod)); + const dst_mcv = if (field_byte_off + field_abi_size <= container_abi_size and + self.reuseOperand(inst, operand, 0, src_mcv)) + off_mcv + else dst: { + const dst_mcv = try self.allocRegOrMem(inst, true); + try self.genCopy(field_ty, dst_mcv, off_mcv); + break :dst dst_mcv; + }; + if (field_abi_size * 8 > field_bit_size and dst_mcv.isMemory()) { + const tmp_reg, const tmp_lock = try self.allocReg(); + defer self.register_manager.unlockReg(tmp_lock); + + const hi_mcv = + dst_mcv.address().offset(@intCast(field_bit_size / 64 * 8)).deref(); + try self.genSetReg(Type.usize, tmp_reg, hi_mcv); + try self.genCopy(Type.usize, hi_mcv, .{ .register = tmp_reg }); + } + break :result dst_mcv; + } + + return self.fail("TODO: airStructFieldVal load_frame field_off non multiple of 8", .{}); }, else => return self.fail("TODO: airStructField {s}", .{@tagName(src_mcv)}), } @@ -2664,18 +3415,18 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { } fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const arg = self.air.instructions.items(.data)[@intFromEnum(inst)].arg; const ty = arg.ty.toType(); - const owner_decl = mod.funcOwnerDeclIndex(self.func_index); - const name = mod.getParamName(self.func_index, arg.src_index); + const owner_decl = zcu.funcOwnerDeclIndex(self.func_index); + const name = zcu.getParamName(self.func_index, arg.src_index); switch (self.debug_output) { .dwarf => |dw| switch (mcv) { .register => |reg| try dw.genArgDbgInfo(name, ty, owner_decl, .{ .register = reg.dwarfLocOp(), }), - .stack_offset => {}, + .load_frame => {}, else => {}, }, .plan9 => {}, @@ -2694,12 +3445,8 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const src_mcv = self.args[arg_index]; const dst_mcv = switch (src_mcv) { - .register => |src_reg| dst: { - self.register_manager.getRegAssumeFree(src_reg, null); - break :dst src_mcv; - }, - .register_pair => |pair| dst: { - for (pair) |reg| self.register_manager.getRegAssumeFree(reg, null); + .register, .register_pair, .load_frame => dst: { + for (src_mcv.getRegs()) |reg| self.register_manager.getRegAssumeFree(reg, inst); break :dst src_mcv; }, else => return self.fail("TODO: airArg {s}", .{@tagName(src_mcv)}), @@ -2715,7 +3462,8 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { fn airTrap(self: *Self) !void { _ = try self.addInst(.{ .tag = .unimp, - .data = .{ .nop = {} }, + .ops = .none, + .data = undefined, }); return self.finishAirBookkeeping(); } @@ -2723,19 +3471,22 @@ fn airTrap(self: *Self) !void { fn airBreakpoint(self: *Self) !void { _ = try self.addInst(.{ .tag = .ebreak, - .data = .{ .nop = {} }, + .ops = .none, + .data = undefined, }); return self.finishAirBookkeeping(); } fn airRetAddr(self: *Self, inst: Air.Inst.Index) !void { - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airRetAddr for riscv64", .{}); - return self.finishAir(inst, result, .{ .none, .none, .none }); + const dst_mcv = try self.allocRegOrMem(inst, true); + try self.genCopy(Type.usize, dst_mcv, .{ .load_frame = .{ .index = .ret_addr } }); + return self.finishAir(inst, dst_mcv, .{ .none, .none, .none }); } fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void { - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFrameAddress for riscv64", .{}); - return self.finishAir(inst, result, .{ .none, .none, .none }); + const dst_mcv = try self.allocRegOrMem(inst, true); + try self.genCopy(Type.usize, dst_mcv, .{ .lea_frame = .{ .index = .base_ptr } }); + return self.finishAir(inst, dst_mcv, .{ .none, .none, .none }); } fn airFence(self: *Self) !void { @@ -2790,39 +3541,55 @@ fn genCall( arg_tys: []const Type, args: []const MCValue, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const fn_ty = switch (info) { .air => |callee| fn_info: { const callee_ty = self.typeOf(callee); - break :fn_info switch (callee_ty.zigTypeTag(mod)) { + break :fn_info switch (callee_ty.zigTypeTag(zcu)) { .Fn => callee_ty, - .Pointer => callee_ty.childType(mod), + .Pointer => callee_ty.childType(zcu), else => unreachable, }; }, - .lib => |lib| try mod.funcType(.{ + .lib => |lib| try zcu.funcType(.{ .param_types = lib.param_types, .return_type = lib.return_type, .cc = .C, }), }; - var call_info = try self.resolveCallingConventionValues(fn_ty, .caller); + const fn_info = zcu.typeToFunc(fn_ty).?; + var call_info = try self.resolveCallingConventionValues(fn_info); defer call_info.deinit(self); + // We need a properly aligned and sized call frame to be able to call this function. + { + const needed_call_frame = FrameAlloc.init(.{ + .size = call_info.stack_byte_count, + .alignment = call_info.stack_align, + }); + const frame_allocs_slice = self.frame_allocs.slice(); + const stack_frame_size = + &frame_allocs_slice.items(.abi_size)[@intFromEnum(FrameIndex.call_frame)]; + stack_frame_size.* = @max(stack_frame_size.*, needed_call_frame.abi_size); + const stack_frame_align = + &frame_allocs_slice.items(.abi_align)[@intFromEnum(FrameIndex.call_frame)]; + stack_frame_align.* = stack_frame_align.max(needed_call_frame.abi_align); + } + for (call_info.args, 0..) |mc_arg, arg_i| try self.genCopy(arg_tys[arg_i], mc_arg, args[arg_i]); // Due to incremental compilation, how function calls are generated depends // on linking. switch (info) { .air => |callee| { - if (try self.air.value(callee, mod)) |func_value| { - const func_key = mod.intern_pool.indexToKey(func_value.ip_index); + if (try self.air.value(callee, zcu)) |func_value| { + const func_key = zcu.intern_pool.indexToKey(func_value.ip_index); switch (switch (func_key) { else => func_key, .ptr => |ptr| switch (ptr.addr) { - .decl => |decl| mod.intern_pool.indexToKey(mod.declPtr(decl).val.toIntern()), + .decl => |decl| zcu.intern_pool.indexToKey(zcu.declPtr(decl).val.toIntern()), else => func_key, }, }) { @@ -2835,10 +3602,11 @@ fn genCall( try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr }); _ = try self.addInst(.{ .tag = .jalr, + .ops = .rri, .data = .{ .i_type = .{ .rd = .ra, .rs1 = .ra, - .imm12 = 0, + .imm12 = Immediate.s(0), } }, }); } else if (self.bin_file.cast(link.File.Coff)) |_| { @@ -2855,16 +3623,17 @@ fn genCall( else => return self.fail("TODO implement calling bitcasted functions", .{}), } } else { - assert(self.typeOf(callee).zigTypeTag(mod) == .Pointer); + assert(self.typeOf(callee).zigTypeTag(zcu) == .Pointer); const addr_reg, const addr_lock = try self.allocReg(); defer self.register_manager.unlockReg(addr_lock); try self.genSetReg(Type.usize, addr_reg, .{ .air_ref = callee }); _ = try self.addInst(.{ .tag = .jalr, + .ops = .rri, .data = .{ .i_type = .{ .rd = .ra, .rs1 = addr_reg, - .imm12 = 0, + .imm12 = Immediate.s(0), } }, }); } @@ -2872,11 +3641,12 @@ fn genCall( .lib => return self.fail("TODO: lib func calls", .{}), } - return call_info.return_value; + return call_info.return_value.short; } fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; + const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; if (safety) { // safe @@ -2884,32 +3654,35 @@ fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void { // not safe } - const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const operand = try self.resolveInst(un_op); + const ret_ty = self.fn_type.fnReturnType(zcu); + switch (self.ret_mcv.short) { + .none => {}, + .register, + .register_pair, + => try self.genCopy(ret_ty, self.ret_mcv.short, .{ .air_ref = un_op }), + .indirect => |reg_off| { + try self.register_manager.getReg(reg_off.reg, null); + const lock = self.register_manager.lockRegAssumeUnused(reg_off.reg); + defer self.register_manager.unlockReg(lock); - _ = try self.addInst(.{ - .tag = .dbg_epilogue_begin, - .data = .{ .nop = {} }, - }); + try self.genSetReg(Type.usize, reg_off.reg, self.ret_mcv.long); + try self.genCopy( + ret_ty, + .{ .register_offset = reg_off }, + .{ .air_ref = un_op }, + ); + }, + else => unreachable, + } - const ret_ty = self.fn_type.fnReturnType(mod); - try self.genCopy(ret_ty, self.ret_mcv, operand); + self.ret_mcv.liveOut(self, inst); + try self.finishAir(inst, .unreach, .{ un_op, .none, .none }); - try self.ret(); - - return self.finishAir(inst, .dead, .{ un_op, .none, .none }); -} - -fn ret(self: *Self) !void { - _ = try self.addInst(.{ - .tag = .psuedo_epilogue, - .data = .{ .nop = {} }, - }); - - // Just add space for an instruction, patch this later + // Just add space for an instruction, reloced this later const index = try self.addInst(.{ - .tag = .ret, - .data = .{ .nop = {} }, + .tag = .pseudo, + .ops = .pseudo_j, + .data = .{ .inst = undefined }, }); try self.exitlude_jump_relocs.append(self.gpa, index); @@ -2918,37 +3691,49 @@ fn ret(self: *Self) !void { fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ptr = try self.resolveInst(un_op); + const ptr_ty = self.typeOf(un_op); + switch (self.ret_mcv.short) { + .none => {}, + .register, .register_pair => try self.load(self.ret_mcv.short, ptr, ptr_ty), + .indirect => |reg_off| try self.genSetReg(ptr_ty, reg_off.reg, ptr), + else => unreachable, + } + self.ret_mcv.liveOut(self, inst); + try self.finishAir(inst, .unreach, .{ un_op, .none, .none }); - try self.load(self.ret_mcv, ptr, ptr_ty); + // Just add space for an instruction, reloced this later + const index = try self.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_j, + .data = .{ .inst = undefined }, + }); - try self.ret(); - - return self.finishAir(inst, .dead, .{ un_op, .none, .none }); + try self.exitlude_jump_relocs.append(self.gpa, index); } fn airCmp(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.typeOf(bin_op.lhs); - const int_ty = switch (lhs_ty.zigTypeTag(mod)) { + const int_ty = switch (lhs_ty.zigTypeTag(zcu)) { .Vector => unreachable, // Handled by cmp_vector. - .Enum => lhs_ty.intTagType(mod), + .Enum => lhs_ty.intTagType(zcu), .Int => lhs_ty, .Bool => Type.u1, .Pointer => Type.usize, .ErrorSet => Type.u16, .Optional => blk: { - const payload_ty = lhs_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const payload_ty = lhs_ty.optionalChild(zcu); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { break :blk Type.u1; - } else if (lhs_ty.isPtrLikeOptional(mod)) { + } else if (lhs_ty.isPtrLikeOptional(zcu)) { break :blk Type.usize; } else { return self.fail("TODO riscv cmp non-pointer optionals", .{}); @@ -2958,7 +3743,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, }; - const int_info = int_ty.intInfo(mod); + const int_info = int_ty.intInfo(zcu); if (int_info.bits <= 64) { break :result try self.binOp(tag, null, lhs, rhs, int_ty, int_ty); } else { @@ -2978,7 +3763,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); _ = operand; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airCmpLtErrorsLen for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airCmpLtErrorsLen for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -2986,8 +3771,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt; _ = try self.addInst(.{ - .tag = .dbg_line, - .data = .{ .dbg_line_column = .{ + .tag = .pseudo, + .ops = .pseudo_dbg_line_column, + .data = .{ .pseudo_dbg_line_column = .{ .line = dbg_stmt.line, .column = dbg_stmt.column, } }, @@ -3023,7 +3809,7 @@ fn genVarDbgInfo( mcv: MCValue, name: [:0]const u8, ) !void { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const is_ptr = switch (tag) { .dbg_var_ptr => true, .dbg_var_val => false, @@ -3043,11 +3829,11 @@ fn genVarDbgInfo( .undef => .undef, .none => .none, else => blk: { - log.debug("TODO generate debug info for {}", .{mcv}); + log.warn("TODO generate debug info for {}", .{mcv}); break :blk .nop; }, }; - try dw.genVarDbgInfo(name, ty, mod.funcOwnerDeclIndex(self.func_index), is_ptr, loc); + try dw.genVarDbgInfo(name, ty, zcu.funcOwnerDeclIndex(self.func_index), is_ptr, loc); }, .plan9 => {}, .none => {}, @@ -3061,146 +3847,49 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.CondBr, pl_op.payload); const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.then_body_len]); const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]); - const liveness_condbr = self.liveness.getCondBr(inst); - - const cond_reg = try self.register_manager.allocReg(inst, gp); - const cond_reg_lock = self.register_manager.lockRegAssumeUnused(cond_reg); - defer self.register_manager.unlockReg(cond_reg_lock); - - // A branch to the false section. Uses beq. 1 is the default "true" state. - const reloc = try self.condBr(cond_ty, cond, cond_reg); + const liveness_cond_br = self.liveness.getCondBr(inst); // If the condition dies here in this condbr instruction, process // that death now instead of later as this has an effect on // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { - if (pl_op.operand.toIndex()) |op_inst| self.processDeath(op_inst); + if (pl_op.operand.toIndex()) |op_inst| try self.processDeath(op_inst); } - // Save state - const parent_next_stack_offset = self.next_stack_offset; - const parent_free_registers = self.register_manager.free_registers; - var parent_stack = try self.stack.clone(self.gpa); - defer parent_stack.deinit(self.gpa); - const parent_registers = self.register_manager.registers; + self.scope_generation += 1; + const state = try self.saveState(); + const reloc = try self.condBr(cond_ty, cond); - try self.branch_stack.append(.{}); - errdefer { - _ = self.branch_stack.pop(); - } - - try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len); - for (liveness_condbr.then_deaths) |operand| { - self.processDeath(operand); - } + for (liveness_cond_br.then_deaths) |death| try self.processDeath(death); try self.genBody(then_body); + try self.restoreState(state, &.{}, .{ + .emit_instructions = false, + .update_tracking = true, + .resurrect = true, + .close_scope = true, + }); - // Restore state - var saved_then_branch = self.branch_stack.pop(); - defer saved_then_branch.deinit(self.gpa); + self.performReloc(reloc); - self.register_manager.registers = parent_registers; - - self.stack.deinit(self.gpa); - self.stack = parent_stack; - parent_stack = .{}; - - self.next_stack_offset = parent_next_stack_offset; - self.register_manager.free_registers = parent_free_registers; - - const else_branch = self.branch_stack.addOneAssumeCapacity(); - else_branch.* = .{}; - - try self.performReloc(reloc, @intCast(self.mir_instructions.len)); - - try self.ensureProcessDeathCapacity(liveness_condbr.else_deaths.len); - for (liveness_condbr.else_deaths) |operand| { - self.processDeath(operand); - } + for (liveness_cond_br.else_deaths) |death| try self.processDeath(death); try self.genBody(else_body); + try self.restoreState(state, &.{}, .{ + .emit_instructions = false, + .update_tracking = true, + .resurrect = true, + .close_scope = true, + }); - // At this point, each branch will possibly have conflicting values for where - // each instruction is stored. They agree, however, on which instructions are alive/dead. - // We use the first ("then") branch as canonical, and here emit - // instructions into the second ("else") branch to make it conform. - // We continue respect the data structure semantic guarantees of the else_branch so - // that we can use all the code emitting abstractions. This is why at the bottom we - // assert that parent_branch.free_registers equals the saved_then_branch.free_registers - // rather than assigning it. - const parent_branch = &self.branch_stack.items[self.branch_stack.items.len - 2]; - try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, else_branch.inst_table.count()); - const else_slice = else_branch.inst_table.entries.slice(); - const else_keys = else_slice.items(.key); - const else_values = else_slice.items(.value); - for (else_keys, 0..) |else_key, else_idx| { - const else_value = else_values[else_idx]; - const canon_mcv = if (saved_then_branch.inst_table.fetchSwapRemove(else_key)) |then_entry| blk: { - // The instruction's MCValue is overridden in both branches. - log.debug("condBr put branch table (key = %{d}, value = {})", .{ else_key, then_entry.value }); - parent_branch.inst_table.putAssumeCapacity(else_key, then_entry.value); - if (else_value == .dead) { - assert(then_entry.value == .dead); - continue; - } - break :blk then_entry.value; - } else blk: { - if (else_value == .dead) - continue; - // The instruction is only overridden in the else branch. - var i: usize = self.branch_stack.items.len - 2; - while (true) { - i -= 1; // If this overflows, the question is: why wasn't the instruction marked dead? - if (self.branch_stack.items[i].inst_table.get(else_key)) |mcv| { - assert(mcv != .dead); - break :blk mcv; - } - } - }; - log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); - // TODO make sure the destination stack offset / register does not already have something - // going on there. - try self.genCopy(self.typeOfIndex(else_key), canon_mcv, else_value); - // TODO track the new register / stack allocation - } - try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); - const then_slice = saved_then_branch.inst_table.entries.slice(); - const then_keys = then_slice.items(.key); - const then_values = then_slice.items(.value); - for (then_keys, 0..) |then_key, then_idx| { - const then_value = then_values[then_idx]; - // We already deleted the items from this table that matched the else_branch. - // So these are all instructions that are only overridden in the then branch. - parent_branch.inst_table.putAssumeCapacity(then_key, then_value); - if (then_value == .dead) - continue; - const parent_mcv = blk: { - var i: usize = self.branch_stack.items.len - 2; - while (true) { - i -= 1; - if (self.branch_stack.items[i].inst_table.get(then_key)) |mcv| { - assert(mcv != .dead); - break :blk mcv; - } - } - }; - log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); - // TODO make sure the destination stack offset / register does not already have something - // going on there. - try self.genCopy(self.typeOfIndex(then_key), parent_mcv, then_value); - // TODO track the new register / stack allocation - } - - { - var item = self.branch_stack.pop(); - item.deinit(self.gpa); - } + // We already took care of pl_op.operand earlier, so there's nothing left to do. + self.finishAirBookkeeping(); } -fn condBr(self: *Self, cond_ty: Type, condition: MCValue, cond_reg: Register) !Mir.Inst.Index { - try self.genSetReg(cond_ty, cond_reg, condition); +fn condBr(self: *Self, cond_ty: Type, condition: MCValue) !Mir.Inst.Index { + const cond_reg = try self.copyToTmpRegister(cond_ty, condition); return try self.addInst(.{ .tag = .beq, + .ops = .rr_inst, .data = .{ .b_type = .{ .rs1 = cond_reg, @@ -3213,7 +3902,7 @@ fn condBr(self: *Self, cond_ty: Type, condition: MCValue, cond_reg: Register) !M fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const operand = try self.resolveInst(un_op); break :result try self.isNull(operand); }; @@ -3222,7 +3911,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { @@ -3247,7 +3936,7 @@ fn isNull(self: *Self, operand: MCValue) !MCValue { fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const operand = try self.resolveInst(un_op); break :result try self.isNonNull(operand); }; @@ -3263,7 +3952,7 @@ fn isNonNull(self: *Self, operand: MCValue) !MCValue { fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { @@ -3281,7 +3970,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); break :result try self.isErr(inst, operand_ty, operand); @@ -3290,9 +3979,9 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { @@ -3304,7 +3993,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { }; try self.load(operand, operand_ptr, self.typeOf(un_op)); const operand_ptr_ty = self.typeOf(un_op); - const operand_ty = operand_ptr_ty.childType(mod); + const operand_ty = operand_ptr_ty.childType(zcu); break :result try self.isErr(inst, operand_ty, operand); }; @@ -3315,13 +4004,13 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { /// /// Result is in the return register. fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue { - const mod = self.bin_file.comp.module.?; - const err_ty = eu_ty.errorUnionSet(mod); - if (err_ty.errorSetIsEmpty(mod)) return MCValue{ .immediate = 0 }; // always false + const zcu = self.bin_file.comp.module.?; + const err_ty = eu_ty.errorUnionSet(zcu); + if (err_ty.errorSetIsEmpty(zcu)) return MCValue{ .immediate = 0 }; // always false _ = maybe_inst; - const err_off = errUnionErrorOffset(eu_ty.errorUnionPayload(mod), mod); + const err_off = errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), zcu); switch (eu_mcv) { .register => |reg| { @@ -3361,7 +4050,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const operand = try self.resolveInst(un_op); const ty = self.typeOf(un_op); break :result try self.isNonErr(inst, ty, operand); @@ -3375,6 +4064,7 @@ fn isNonErr(self: *Self, inst: Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MC .register => |reg| { _ = try self.addInst(.{ .tag = .not, + .ops = .rr, .data = .{ .rr = .{ .rd = reg, @@ -3394,9 +4084,9 @@ fn isNonErr(self: *Self, inst: Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MC } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { @@ -3407,7 +4097,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { } }; const operand_ptr_ty = self.typeOf(un_op); - const operand_ty = operand_ptr_ty.childType(mod); + const operand_ty = operand_ptr_ty.childType(zcu); try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isNonErr(inst, operand_ty, operand); @@ -3421,18 +4111,27 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void { const loop = self.air.extraData(Air.Block, ty_pl.payload); const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]); - const start_index: Mir.Inst.Index = @intCast(self.mir_instructions.len); + self.scope_generation += 1; + const state = try self.saveState(); + const jmp_target: Mir.Inst.Index = @intCast(self.mir_instructions.len); try self.genBody(body); - try self.jump(start_index); + try self.restoreState(state, &.{}, .{ + .emit_instructions = true, + .update_tracking = false, + .resurrect = false, + .close_scope = true, + }); + _ = try self.jump(jmp_target); - return self.finishAirBookkeeping(); + self.finishAirBookkeeping(); } /// Send control flow to the `index` of `self.code`. -fn jump(self: *Self, index: Mir.Inst.Index) !void { - _ = try self.addInst(.{ - .tag = .j, +fn jump(self: *Self, index: Mir.Inst.Index) !Mir.Inst.Index { + return self.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_j, .data = .{ .inst = index, }, @@ -3446,33 +4145,34 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { } fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) !void { - try self.blocks.putNoClobber(self.gpa, inst, .{ - // A block is a setup to be able to jump to the end. - .relocs = .{}, - // It also acts as a receptacle for break operands. - // Here we use `MCValue.none` to represent a null value so that the first - // break instruction will choose a MCValue for the block result and overwrite - // this field. Following break instructions will use that MCValue to put their - // block results. - .mcv = MCValue{ .none = {} }, - }); - defer self.blocks.getPtr(inst).?.relocs.deinit(self.gpa); + // A block is a setup to be able to jump to the end. + const inst_tracking_i = self.inst_tracking.count(); + self.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(.unreach)); + + self.scope_generation += 1; + try self.blocks.putNoClobber(self.gpa, inst, .{ .state = self.initRetroactiveState() }); + const liveness = self.liveness.getBlock(inst); // TODO emit debug info lexical block try self.genBody(body); - for (self.blocks.getPtr(inst).?.relocs.items) |reloc| { - // here we are relocing to point at the instruction after the block. - // [then case] - // [jump to end] // this is reloced - // [else case] - // [jump to end] // this is reloced - // [this isn't generated yet] // point to here - try self.performReloc(reloc, @intCast(self.mir_instructions.len)); + var block_data = self.blocks.fetchRemove(inst).?; + defer block_data.value.deinit(self.gpa); + if (block_data.value.relocs.items.len > 0) { + try self.restoreState(block_data.value.state, liveness.deaths, .{ + .emit_instructions = false, + .update_tracking = true, + .resurrect = true, + .close_scope = true, + }); + for (block_data.value.relocs.items) |reloc| self.performReloc(reloc); } - const result = self.blocks.getPtr(inst).?.mcv; - return self.finishAir(inst, result, .{ .none, .none, .none }); + if (std.debug.runtime_safety) assert(self.inst_tracking.getIndex(inst).? == inst_tracking_i); + const tracking = &self.inst_tracking.values()[inst_tracking_i]; + if (self.liveness.isUnused(inst)) try tracking.die(self, inst); + self.getValueIfFree(tracking.short, inst); + self.finishAirBookkeeping(); } fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { @@ -3483,8 +4183,10 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { // return self.finishAir(inst, .dead, .{ condition, .none, .none }); } -fn performReloc(self: *Self, inst: Mir.Inst.Index, target: Mir.Inst.Index) !void { +fn performReloc(self: *Self, inst: Mir.Inst.Index) void { const tag = self.mir_instructions.items(.tag)[inst]; + const ops = self.mir_instructions.items(.ops)[inst]; + const target: Mir.Inst.Index = @intCast(self.mir_instructions.len); switch (tag) { .bne, @@ -3492,67 +4194,99 @@ fn performReloc(self: *Self, inst: Mir.Inst.Index, target: Mir.Inst.Index) !void => self.mir_instructions.items(.data)[inst].b_type.inst = target, .jal, => self.mir_instructions.items(.data)[inst].j_type.inst = target, - .j, - => self.mir_instructions.items(.data)[inst].inst = target, - else => return self.fail("TODO: performReloc {s}", .{@tagName(tag)}), + .pseudo => switch (ops) { + .pseudo_j => self.mir_instructions.items(.data)[inst].inst = target, + else => std.debug.panic("TODO: performReloc {s}", .{@tagName(ops)}), + }, + else => std.debug.panic("TODO: performReloc {s}", .{@tagName(tag)}), } } fn airBr(self: *Self, inst: Air.Inst.Index) !void { - const branch = self.air.instructions.items(.data)[@intFromEnum(inst)].br; - try self.br(branch.block_inst, branch.operand); - return self.finishAir(inst, .dead, .{ branch.operand, .none, .none }); + const mod = self.bin_file.comp.module.?; + const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br; + + const block_ty = self.typeOfIndex(br.block_inst); + const block_unused = + !block_ty.hasRuntimeBitsIgnoreComptime(mod) or self.liveness.isUnused(br.block_inst); + const block_tracking = self.inst_tracking.getPtr(br.block_inst).?; + const block_data = self.blocks.getPtr(br.block_inst).?; + const first_br = block_data.relocs.items.len == 0; + const block_result = result: { + if (block_unused) break :result .none; + + if (!first_br) try self.getValue(block_tracking.short, null); + const src_mcv = try self.resolveInst(br.operand); + + if (self.reuseOperandAdvanced(inst, br.operand, 0, src_mcv, br.block_inst)) { + if (first_br) break :result src_mcv; + + try self.getValue(block_tracking.short, br.block_inst); + // .long = .none to avoid merging operand and block result stack frames. + const current_tracking: InstTracking = .{ .long = .none, .short = src_mcv }; + try current_tracking.materializeUnsafe(self, br.block_inst, block_tracking.*); + for (current_tracking.getRegs()) |src_reg| self.register_manager.freeReg(src_reg); + break :result block_tracking.short; + } + + const dst_mcv = if (first_br) try self.allocRegOrMem(br.block_inst, true) else dst: { + try self.getValue(block_tracking.short, br.block_inst); + break :dst block_tracking.short; + }; + try self.genCopy(block_ty, dst_mcv, try self.resolveInst(br.operand)); + break :result dst_mcv; + }; + + // Process operand death so that it is properly accounted for in the State below. + if (self.liveness.operandDies(inst, 0)) { + if (br.operand.toIndex()) |op_inst| try self.processDeath(op_inst); + } + + if (first_br) { + block_tracking.* = InstTracking.init(block_result); + try self.saveRetroactiveState(&block_data.state); + } else try self.restoreState(block_data.state, &.{}, .{ + .emit_instructions = true, + .update_tracking = false, + .resurrect = false, + .close_scope = false, + }); + + // Emit a jump with a relocation. It will be patched up after the block ends. + // Leave the jump offset undefined + const jmp_reloc = try self.jump(undefined); + try block_data.relocs.append(self.gpa, jmp_reloc); + + // Stop tracking block result without forgetting tracking info + try self.freeValue(block_tracking.short); + + self.finishAirBookkeeping(); } fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const air_tags = self.air.instructions.items(.tag); _ = air_tags; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement boolean operations for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement boolean operations for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { - const block_data = self.blocks.getPtr(block).?; - - const mod = self.bin_file.comp.module.?; - if (self.typeOf(operand).hasRuntimeBits(mod)) { - const operand_mcv = try self.resolveInst(operand); - const block_mcv = block_data.mcv; - if (block_mcv == .none) { - block_data.mcv = operand_mcv; - } else { - try self.genCopy(self.typeOfIndex(block), block_mcv, operand_mcv); - } - } - return self.brVoid(block); -} - -fn brVoid(self: *Self, block: Air.Inst.Index) !void { - const block_data = self.blocks.getPtr(block).?; - - // Emit a jump with a relocation. It will be patched up after the block ends. - try block_data.relocs.ensureUnusedCapacity(self.gpa, 1); - - block_data.relocs.appendAssumeCapacity(try self.addInst(.{ - .tag = .j, - .data = .{ .inst = undefined }, - })); -} - fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; const clobbers_len: u31 = @truncate(extra.data.flags); var extra_i: usize = extra.end; - const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs: []const Air.Inst.Ref = + @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]); extra_i += outputs.len; const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]); extra_i += inputs.len; + log.debug("airAsm input: {any}", .{inputs}); + const dead = !is_volatile and self.liveness.isUnused(inst); - const result: MCValue = if (dead) .dead else result: { + const result: MCValue = if (dead) .unreach else result: { if (outputs.len > 1) { return self.fail("TODO implement codegen for asm with more than 1 output", .{}); } @@ -3599,19 +4333,25 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { // for the string, we still use the next u32 for the null terminator. extra_i += clobber.len / 4 + 1; - // TODO honor these + if (std.mem.eql(u8, clobber, "") or std.mem.eql(u8, clobber, "memory")) { + // nothing really to do + } else { + try self.register_manager.getReg(parseRegName(clobber) orelse + return self.fail("invalid clobber: '{s}'", .{clobber}), null); + } } } const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len]; - if (mem.eql(u8, asm_source, "ecall")) { + if (std.meta.stringToEnum(Mir.Inst.Tag, asm_source)) |tag| { _ = try self.addInst(.{ - .tag = .ecall, - .data = .{ .nop = {} }, + .tag = tag, + .ops = .none, + .data = undefined, }); } else { - return self.fail("TODO implement support for more riscv64 assembly instructions", .{}); + return self.fail("TODO: asm_source {s}", .{asm_source}); } if (output_constraint) |output| { @@ -3621,11 +4361,12 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - break :result MCValue{ .register = reg }; + break :result .{ .register = reg }; } else { - break :result MCValue{ .none = {} }; + break :result .{ .none = {} }; } }; + simple: { var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); var buf_index: usize = 0; @@ -3640,30 +4381,15 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { @memcpy(buf[buf_index..][0..inputs.len], inputs); return self.finishAir(inst, result, buf); } - var bt = try self.iterateBigTomb(inst, outputs.len + inputs.len); - for (outputs) |output| { - if (output == .none) continue; - - bt.feed(output); - } - for (inputs) |input| { - bt.feed(input); - } - return bt.finishAir(result); -} - -fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigTomb { - try self.ensureProcessDeathCapacity(operand_count + 1); - return BigTomb{ - .function = self, - .inst = inst, - .lbt = self.liveness.iterateBigTomb(inst), - }; + var bt = self.liveness.iterateBigTomb(inst); + for (outputs) |output| if (output != .none) try self.feed(&bt, output); + for (inputs) |input| try self.feed(&bt, input); + return self.finishAirResult(inst, result); } /// Sets the value without any modifications to register allocation metadata or stack allocation metadata. fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; // There isn't anything to store if (dst_mcv == .none) return; @@ -3690,11 +4416,11 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { .off = -dst_reg_off.off, } }, }), - .stack_offset => |off| return self.genSetStack(ty, off, src_mcv), - .memory => |addr| return self.genSetMem(ty, addr, src_mcv), + .load_frame => |frame| return self.genSetStack(ty, frame, src_mcv), + .memory => return self.fail("TODO: genCopy memory", .{}), .register_pair => |dst_regs| { const src_info: ?struct { addr_reg: Register, addr_lock: RegisterLock } = switch (src_mcv) { - .register_pair, .memory, .indirect, .stack_offset => null, + .register_pair, .memory, .indirect, .load_frame => null, .load_symbol => src: { const src_addr_reg, const src_addr_lock = try self.allocReg(); errdefer self.register_manager.unlockReg(src_addr_lock); @@ -3708,7 +4434,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { try self.resolveInst(src_ref), ), else => return self.fail("TODO implement genCopy for {s} of {}", .{ - @tagName(src_mcv), ty.fmt(mod), + @tagName(src_mcv), ty.fmt(zcu), }), }; defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock); @@ -3717,34 +4443,38 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { for (dst_regs, try self.splitType(ty), 0..) |dst_reg, dst_ty, part_i| { try self.genSetReg(dst_ty, dst_reg, switch (src_mcv) { .register_pair => |src_regs| .{ .register = src_regs[part_i] }, - .memory, .indirect, .stack_offset => src_mcv.address().offset(part_disp).deref(), + .memory, .indirect, .load_frame => src_mcv.address().offset(part_disp).deref(), .load_symbol => .{ .indirect = .{ .reg = src_info.?.addr_reg, .off = part_disp, } }, else => unreachable, }); - part_disp += @intCast(dst_ty.abiSize(mod)); + part_disp += @intCast(dst_ty.abiSize(zcu)); } }, else => return std.debug.panic("TODO: genCopy {s} with {s}", .{ @tagName(dst_mcv), @tagName(src_mcv) }), } } -/// Sets the value of `src_mcv` into stack memory at `stack_offset`. -fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_mcv: MCValue) InnerError!void { - const mod = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(ty.abiSize(mod)); +fn genSetStack( + self: *Self, + ty: Type, + frame: FrameAddr, + src_mcv: MCValue, +) InnerError!void { + const zcu = self.bin_file.comp.module.?; + const abi_size: u32 = @intCast(ty.abiSize(zcu)); switch (src_mcv) { .none => return, .dead => unreachable, .undef => { if (!self.wantSafety()) return; - try self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); + try self.genSetStack(ty, frame, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); }, .immediate, - .ptr_stack_offset, + .lea_frame, => { // TODO: remove this lock in favor of a copyToTmpRegister when we load 64 bit immediates with // a register allocation. @@ -3753,26 +4483,24 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_mcv: MCValue) Inner try self.genSetReg(ty, reg, src_mcv); - return self.genSetStack(ty, stack_offset, .{ .register = reg }); + return self.genSetStack(ty, frame, .{ .register = reg }); }, .register => |reg| { switch (abi_size) { 1, 2, 4, 8 => { - const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => .sb, - 2 => .sh, - 4 => .sw, - 8 => .sd, - else => unreachable, - }; - _ = try self.addInst(.{ - .tag = tag, - .data = .{ .i_type = .{ - .rd = reg, - .rs1 = .sp, - .imm12 = math.cast(i12, stack_offset) orelse { - return self.fail("TODO: genSetStack bigger stack values", .{}); + .tag = .pseudo, + .ops = .pseudo_store_rm, + .data = .{ .rm = .{ + .r = reg, + .m = .{ + .base = .{ .frame = frame.index }, + .mod = .{ + .rm = .{ + .size = self.memSize(ty), + .disp = frame.off, + }, + }, }, } }, }); @@ -3780,38 +4508,26 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, src_mcv: MCValue) Inner else => unreachable, // register can hold a max of 8 bytes } }, - .stack_offset, + .load_frame, .indirect, .load_symbol, => { - if (src_mcv == .stack_offset and src_mcv.stack_offset == stack_offset) return; - if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, src_mcv); - return self.genSetStack(ty, stack_offset, .{ .register = reg }); + return self.genSetStack(ty, frame, .{ .register = reg }); } try self.genInlineMemcpy( - .{ .ptr_stack_offset = stack_offset }, + .{ .lea_frame = frame }, src_mcv.address(), .{ .immediate = abi_size }, ); }, - .air_ref => |ref| try self.genSetStack(ty, stack_offset, try self.resolveInst(ref)), + .air_ref => |ref| try self.genSetStack(ty, frame, try self.resolveInst(ref)), else => return self.fail("TODO: genSetStack {s}", .{@tagName(src_mcv)}), } } -fn genSetMem(self: *Self, ty: Type, addr: u64, src_mcv: MCValue) InnerError!void { - const mod = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(ty.abiSize(mod)); - _ = abi_size; - _ = addr; - _ = src_mcv; - - return self.fail("TODO: genSetMem", .{}); -} - fn genInlineMemcpy( self: *Self, dst_ptr: MCValue, @@ -3834,11 +4550,12 @@ fn genInlineMemcpy( // lb tmp, 0(src) const first_inst = try self.addInst(.{ .tag = .lb, + .ops = .rri, .data = .{ .i_type = .{ .rd = tmp, .rs1 = src, - .imm12 = 0, + .imm12 = Immediate.s(0), }, }, }); @@ -3846,11 +4563,12 @@ fn genInlineMemcpy( // sb tmp, 0(dst) _ = try self.addInst(.{ .tag = .sb, + .ops = .rri, .data = .{ .i_type = .{ .rd = tmp, .rs1 = dst, - .imm12 = 0, + .imm12 = Immediate.s(0), }, }, }); @@ -3858,11 +4576,12 @@ fn genInlineMemcpy( // dec count by 1 _ = try self.addInst(.{ .tag = .addi, + .ops = .rri, .data = .{ .i_type = .{ .rd = count, .rs1 = count, - .imm12 = -1, + .imm12 = Immediate.s(-1), }, }, }); @@ -3870,6 +4589,7 @@ fn genInlineMemcpy( // branch if count is 0 _ = try self.addInst(.{ .tag = .beq, + .ops = .rr_inst, .data = .{ .b_type = .{ .inst = @intCast(self.mir_instructions.len + 4), // points after the last inst @@ -3882,29 +4602,32 @@ fn genInlineMemcpy( // increment the pointers _ = try self.addInst(.{ .tag = .addi, + .ops = .rri, .data = .{ .i_type = .{ .rd = src, .rs1 = src, - .imm12 = 1, + .imm12 = Immediate.s(1), }, }, }); _ = try self.addInst(.{ .tag = .addi, + .ops = .rri, .data = .{ .i_type = .{ .rd = dst, .rs1 = dst, - .imm12 = 1, + .imm12 = Immediate.s(1), }, }, }); // jump back to start of loop _ = try self.addInst(.{ - .tag = .j, + .tag = .pseudo, + .ops = .pseudo_j, .data = .{ .inst = first_inst, }, @@ -3913,31 +4636,13 @@ fn genInlineMemcpy( /// Sets the value of `src_mcv` into `reg`. Assumes you have a lock on it. fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError!void { - const mod = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(ty.abiSize(mod)); + const zcu = self.bin_file.comp.module.?; + const abi_size: u32 = @intCast(ty.abiSize(zcu)); - const load_tag: Mir.Inst.Tag = switch (abi_size) { - 1 => .lb, - 2 => .lh, - 4 => .lw, - 8 => .ld, - else => return self.fail("TODO: genSetReg for size {d}", .{abi_size}), - }; + if (abi_size > 8) return self.fail("tried to set reg with size {}", .{abi_size}); switch (src_mcv) { .dead => unreachable, - .ptr_stack_offset => |off| { - _ = try self.addInst(.{ - .tag = .addi, - .data = .{ .i_type = .{ - .rd = reg, - .rs1 = .sp, - .imm12 = math.cast(i12, off) orelse { - return self.fail("TODO: bigger stack sizes", .{}); - }, - } }, - }); - }, .unreach, .none => return, // Nothing to do. .undef => { if (!self.wantSafety()) @@ -3950,10 +4655,11 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! if (math.minInt(i12) <= x and x <= math.maxInt(i12)) { _ = try self.addInst(.{ .tag = .addi, + .ops = .rri, .data = .{ .i_type = .{ .rd = reg, .rs1 = .zero, - .imm12 = @intCast(x), + .imm12 = Immediate.s(@intCast(x)), } }, }); } else if (math.minInt(i32) <= x and x <= math.maxInt(i32)) { @@ -3963,17 +4669,19 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! _ = try self.addInst(.{ .tag = .lui, + .ops = .ri, .data = .{ .u_type = .{ .rd = reg, - .imm20 = hi20, + .imm20 = Immediate.s(hi20), } }, }); _ = try self.addInst(.{ .tag = .addi, + .ops = .rri, .data = .{ .i_type = .{ .rd = reg, .rs1 = reg, - .imm12 = lo12, + .imm12 = Immediate.s(lo12), } }, }); } else { @@ -3992,15 +4700,17 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! _ = try self.addInst(.{ .tag = .slli, + .ops = .rri, .data = .{ .i_type = .{ - .imm12 = 32, .rd = reg, .rs1 = reg, + .imm12 = Immediate.s(32), } }, }); _ = try self.addInst(.{ .tag = .add, + .ops = .rrr, .data = .{ .r_type = .{ .rd = reg, .rs1 = reg, @@ -4016,7 +4726,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! // mov reg, src_reg _ = try self.addInst(.{ - .tag = .mv, + .tag = .pseudo, + .ops = .pseudo_mv, .data = .{ .rr = .{ .rd = reg, .rs = src_reg, @@ -4029,21 +4740,46 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! _ = try self.addInst(.{ .tag = .ld, + .ops = .rri, .data = .{ .i_type = .{ .rd = reg, .rs1 = reg, - .imm12 = 0, + .imm12 = Immediate.s(0), } }, }); }, - .stack_offset => |off| { + .load_frame => |frame| { _ = try self.addInst(.{ - .tag = load_tag, - .data = .{ .i_type = .{ - .rd = reg, - .rs1 = .sp, - .imm12 = math.cast(i12, off) orelse { - return self.fail("TODO: genSetReg support larger stack sizes", .{}); + .tag = .pseudo, + .ops = .pseudo_load_rm, + .data = .{ .rm = .{ + .r = reg, + .m = .{ + .base = .{ .frame = frame.index }, + .mod = .{ + .rm = .{ + .size = self.memSize(ty), + .disp = frame.off, + }, + }, + }, + } }, + }); + }, + .lea_frame => |frame| { + _ = try self.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_lea_rm, + .data = .{ .rm = .{ + .r = reg, + .m = .{ + .base = .{ .frame = frame.index }, + .mod = .{ + .rm = .{ + .size = self.memSize(ty), + .disp = frame.off, + }, + }, }, } }, }); @@ -4052,35 +4788,41 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, src_mcv: MCValue) InnerError! try self.genSetReg(ty, reg, src_mcv.address()); try self.genSetReg(ty, reg, .{ .indirect = .{ .reg = reg } }); }, - .air_ref => |ref| try self.genSetReg(ty, reg, try self.resolveInst(ref)), .indirect => |reg_off| { + const load_tag: Mir.Inst.Tag = switch (abi_size) { + 1 => .lb, + 2 => .lh, + 4 => .lw, + 8 => .ld, + else => return self.fail("TODO: genSetReg for size {d}", .{abi_size}), + }; + _ = try self.addInst(.{ .tag = load_tag, - .data = .{ - .i_type = .{ - .rd = reg, - .rs1 = reg_off.reg, - .imm12 = @intCast(reg_off.off), - }, - }, + .ops = .rri, + .data = .{ .i_type = .{ + .rd = reg, + .rs1 = reg_off.reg, + .imm12 = Immediate.s(reg_off.off), + } }, }); }, - .addr_symbol => |sym_off| { + .lea_symbol => |sym_off| { assert(sym_off.off == 0); const atom_index = try self.symbolIndex(); _ = try self.addInst(.{ - .tag = .load_symbol, - .data = .{ - .payload = try self.addExtra(Mir.LoadSymbolPayload{ - .register = reg.id(), - .atom_index = atom_index, - .sym_index = sym_off.sym, - }), - }, + .tag = .pseudo, + .ops = .pseudo_load_symbol, + .data = .{ .payload = try self.addExtra(Mir.LoadSymbolPayload{ + .register = reg.id(), + .atom_index = atom_index, + .sym_index = sym_off.sym, + }) }, }); }, + .air_ref => |ref| try self.genSetReg(ty, reg, try self.resolveInst(ref)), else => return self.fail("TODO: genSetReg {s}", .{@tagName(src_mcv)}), } } @@ -4100,27 +4842,44 @@ fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { + const zcu = self.bin_file.comp.module.?; + const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result = if (self.liveness.isUnused(inst)) .dead else result: { - const operand = try self.resolveInst(ty_op.operand); - if (self.reuseOperand(inst, ty_op.operand, 0, operand)) break :result operand; + const result = if (self.liveness.isUnused(inst)) .unreach else result: { + const src_mcv = try self.resolveInst(ty_op.operand); - const operand_lock = switch (operand) { - .register => |reg| self.register_manager.lockReg(reg), - else => null, + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); + + const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; + defer if (src_lock) |lock| self.register_manager.unlockReg(lock); + + const dst_mcv = if (dst_ty.abiSize(zcu) <= src_ty.abiSize(zcu) and + self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: { + const dst_mcv = try self.allocRegOrMem(inst, true); + try self.genCopy(switch (math.order(dst_ty.abiSize(zcu), src_ty.abiSize(zcu))) { + .lt => dst_ty, + .eq => if (!dst_mcv.isMemory() or src_mcv.isMemory()) dst_ty else src_ty, + .gt => src_ty, + }, dst_mcv, src_mcv); + break :dst dst_mcv; }; - defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - const dest = try self.allocRegOrMem(inst, true); - try self.genCopy(self.typeOfIndex(inst), dest, operand); - break :result dest; + if (dst_ty.isAbiInt(zcu) and src_ty.isAbiInt(zcu) and + dst_ty.intInfo(zcu).signedness == src_ty.intInfo(zcu).signedness) break :result dst_mcv; + + const abi_size = dst_ty.abiSize(zcu); + const bit_size = dst_ty.bitSize(zcu); + if (abi_size * 8 <= bit_size) break :result dst_mcv; + + return self.fail("TODO: airBitCast {} to {}", .{ src_ty.fmt(zcu), dst_ty.fmt(zcu) }); }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airArrayToSlice for {}", .{ + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airArrayToSlice for {}", .{ self.target.cpu.arch, }); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -4128,7 +4887,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFloatFromInt for {}", .{ + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airFloatFromInt for {}", .{ self.target.cpu.arch, }); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -4136,7 +4895,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void { fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airIntFromFloat for {}", .{ + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airIntFromFloat for {}", .{ self.target.cpu.arch, }); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -4186,7 +4945,7 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { fn airTagName(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else { _ = operand; return self.fail("TODO implement airTagName for riscv64", .{}); }; @@ -4194,7 +4953,7 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { } fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const err_ty = self.typeOf(un_op); @@ -4207,7 +4966,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { const addr_reg, const addr_lock = try self.allocReg(); defer self.register_manager.unlockReg(addr_lock); - const lazy_sym = link.File.LazySymbol.initDecl(.const_data, null, mod); + const lazy_sym = link.File.LazySymbol.initDecl(.const_data, null, zcu); if (self.bin_file.cast(link.File.Elf)) |elf_file| { const sym_index = elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, lazy_sym) catch |err| return self.fail("{s} creating lazy symbol", .{@errorName(err)}); @@ -4223,69 +4982,45 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { const end_reg, const end_lock = try self.allocReg(); defer self.register_manager.unlockReg(end_lock); - _ = try self.addInst(.{ - .tag = .slli, - .data = .{ - .i_type = .{ - .rd = err_reg, - .rs1 = err_reg, - .imm12 = 4, - }, - }, - }); + _ = start_reg; + _ = end_reg; - try self.binOpMir( - .add, - null, - Type.usize, - .{ .register = err_reg }, - .{ .register = addr_reg }, - ); - - try self.genSetReg(Type.usize, start_reg, .{ .indirect = .{ .reg = err_reg } }); - try self.genSetReg(Type.usize, end_reg, .{ .indirect = .{ .reg = err_reg, .off = 8 } }); - - const dst_mcv = try self.allocRegOrMem(inst, false); - - try self.genSetStack(Type.usize, dst_mcv.stack_offset, .{ .register = start_reg }); - try self.genSetStack(Type.usize, dst_mcv.stack_offset + 8, .{ .register = end_reg }); - - return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); + return self.fail("TODO: airErrorName", .{}); } fn airSplat(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSplat for riscv64", .{}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airSplat for riscv64", .{}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airSelect(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSelect for riscv64", .{}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airSelect for riscv64", .{}); return self.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs }); } fn airShuffle(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airShuffle for riscv64", .{}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airShuffle for riscv64", .{}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airReduce(self: *Self, inst: Air.Inst.Index) !void { const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airReduce for riscv64", .{}); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement airReduce for riscv64", .{}); return self.finishAir(inst, result, .{ reduce.operand, .none, .none }); } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(mod); + const len = vector_ty.vectorLen(zcu); const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { - if (self.liveness.isUnused(inst)) break :res MCValue.dead; + if (self.liveness.isUnused(inst)) break :res .unreach; return self.fail("TODO implement airAggregateInit for riscv64", .{}); }; @@ -4294,11 +5029,9 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { @memcpy(buf[0..elements.len], elements); return self.finishAir(inst, result, buf); } - var bt = try self.iterateBigTomb(inst, elements.len); - for (elements) |elem| { - bt.feed(elem); - } - return bt.finishAir(result); + var bt = self.liveness.iterateBigTomb(inst); + for (elements) |elem| try self.feed(&bt, elem); + return self.finishAirResult(inst, result); } fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { @@ -4313,49 +5046,55 @@ fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void { const prefetch = self.air.instructions.items(.data)[@intFromEnum(inst)].prefetch; // TODO: RISC-V does have prefetch instruction variants. // see here: https://raw.githubusercontent.com/riscv/riscv-CMOs/master/specifications/cmobase-v1.0.1.pdf - return self.finishAir(inst, MCValue.dead, .{ prefetch.ptr, .none, .none }); + return self.finishAir(inst, .unreach, .{ prefetch.ptr, .none, .none }); } fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else { + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else { return self.fail("TODO implement airMulAdd for riscv64", .{}); }; return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand }); } -fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; +fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { + const zcu = self.bin_file.comp.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.typeOf(inst); - if (!inst_ty.hasRuntimeBits(mod)) - return MCValue{ .none = {} }; + const inst_ty = self.typeOf(ref); + if (!inst_ty.hasRuntimeBits(zcu)) + return .none; - const inst_index = inst.toIndex() orelse return self.genTypedValue((try self.air.value(inst, mod)).?); - return self.getResolvedInstValue(inst_index); + const mcv = if (ref.toIndex()) |inst| mcv: { + break :mcv self.inst_tracking.getPtr(inst).?.short; + } else mcv: { + const ip_index = ref.toInterned().?; + const gop = try self.const_tracking.getOrPut(self.gpa, ip_index); + if (!gop.found_existing) gop.value_ptr.* = InstTracking.init( + try self.genTypedValue(Value.fromInterned(ip_index)), + ); + break :mcv gop.value_ptr.short; + }; + + return mcv; } -fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { - // Treat each stack item as a "layer" on top of the previous one. - var i: usize = self.branch_stack.items.len; - while (true) { - i -= 1; - if (self.branch_stack.items[i].inst_table.get(inst)) |mcv| { - assert(mcv != .dead); - return mcv; - } - } +fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { + const tracking = self.inst_tracking.getPtr(inst).?; + return switch (tracking.short) { + .none, .unreach, .dead => unreachable, + else => tracking, + }; } fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const zcu = self.bin_file.comp.module.?; const result = try codegen.genTypedValue( self.bin_file, self.src_loc, val, - mod.funcOwnerDeclIndex(self.func_index), + zcu.funcOwnerDeclIndex(self.func_index), ); const mcv: MCValue = switch (result) { .mcv => |mcv| switch (mcv) { @@ -4378,8 +5117,8 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { const CallMCValues = struct { args: []MCValue, - return_value: MCValue, - stack_byte_count: u32, + return_value: InstTracking, + stack_byte_count: u31, stack_align: Alignment, fn deinit(self: *CallMCValues, func: *Self) void { @@ -4389,86 +5128,115 @@ const CallMCValues = struct { }; /// Caller must call `CallMCValues.deinit`. -fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: CallView) !CallMCValues { - const mod = self.bin_file.comp.module.?; - const ip = &mod.intern_pool; +fn resolveCallingConventionValues( + self: *Self, + fn_info: InternPool.Key.FuncType, +) !CallMCValues { + const zcu = self.bin_file.comp.module.?; + const ip = &zcu.intern_pool; - _ = role; + const param_types = try self.gpa.alloc(Type, fn_info.param_types.len); + defer self.gpa.free(param_types); + + for (param_types[0..fn_info.param_types.len], fn_info.param_types.get(ip)) |*dest, src| { + dest.* = Type.fromInterned(src); + } - const fn_info = mod.typeToFunc(fn_ty).?; const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), + .args = try self.gpa.alloc(MCValue, param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, - .stack_byte_count = undefined, + .stack_byte_count = 0, .stack_align = undefined, }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(mod); + const ret_ty = Type.fromInterned(fn_info.return_type); switch (cc) { .Naked => { assert(result.args.len == 0); - result.return_value = .{ .unreach = {} }; - result.stack_byte_count = 0; - result.stack_align = .@"1"; - return result; + result.return_value = InstTracking.init(.unreach); + result.stack_align = .@"8"; }, - .Unspecified, .C => { + .C, .Unspecified => { if (result.args.len > 8) { - return self.fail("TODO: support more than 8 function args", .{}); + return self.fail("RISC-V calling convention does not support more than 8 arguments", .{}); } - var fa_reg_i: u32 = 0; + var ret_int_reg_i: u32 = 0; + var param_int_reg_i: u32 = 0; - // spill the needed argument registers - for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| { - const param_ty = Type.fromInterned(ty); - const param_size = param_ty.abiSize(mod); - - switch (param_size) { - 1...8 => { - const arg_reg: Register = abi.function_arg_regs[fa_reg_i]; - fa_reg_i += 1; - try self.register_manager.getReg(arg_reg, null); - result_arg.* = .{ .register = arg_reg }; - }, - 9...16 => { - const arg_regs: [2]Register = abi.function_arg_regs[fa_reg_i..][0..2].*; - fa_reg_i += 2; - for (arg_regs) |reg| try self.register_manager.getReg(reg, null); - result_arg.* = .{ .register_pair = arg_regs }; - }, - else => return self.fail("TODO: support args of size {}", .{param_size}), - } - } - - result.stack_byte_count = self.max_end_stack; result.stack_align = .@"16"; + + // Return values + if (ret_ty.zigTypeTag(zcu) == .NoReturn) { + result.return_value = InstTracking.init(.unreach); + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + result.return_value = InstTracking.init(.none); + } else { + var ret_tracking: [2]InstTracking = undefined; + var ret_tracking_i: usize = 0; + + const classes = mem.sliceTo(&abi.classifySystem(ret_ty, zcu), .none); + + for (classes) |class| switch (class) { + .integer => { + const ret_int_reg = abi.function_arg_regs[ret_int_reg_i]; + ret_int_reg_i += 1; + + ret_tracking[ret_tracking_i] = InstTracking.init(.{ .register = ret_int_reg }); + ret_tracking_i += 1; + }, + else => return self.fail("TODO: C calling convention return class {}", .{class}), + }; + + result.return_value = switch (ret_tracking_i) { + else => return self.fail("ty {} took {} tracking return indices", .{ ret_ty.fmt(zcu), ret_tracking_i }), + 1 => ret_tracking[0], + 2 => InstTracking.init(.{ .register_pair = .{ + ret_tracking[0].short.register, ret_tracking[1].short.register, + } }), + }; + } + + for (param_types, result.args) |ty, *arg| { + assert(ty.hasRuntimeBitsIgnoreComptime(zcu)); + + var arg_mcv: [2]MCValue = undefined; + var arg_mcv_i: usize = 0; + + const classes = mem.sliceTo(&abi.classifySystem(ty, zcu), .none); + + for (classes) |class| switch (class) { + .integer => { + const param_int_regs = abi.function_arg_regs; + if (param_int_reg_i >= param_int_regs.len) break; + + const param_int_reg = param_int_regs[param_int_reg_i]; + param_int_reg_i += 1; + + arg_mcv[arg_mcv_i] = .{ .register = param_int_reg }; + arg_mcv_i += 1; + }, + else => return self.fail("TODO: C calling convention arg class {}", .{class}), + } else { + arg.* = switch (arg_mcv_i) { + else => return self.fail("ty {} took {} tracking arg indices", .{ ty.fmt(zcu), arg_mcv_i }), + 1 => arg_mcv[0], + 2 => .{ .register_pair = .{ arg_mcv[0].register, arg_mcv[1].register } }, + }; + continue; + } + + return self.fail("TODO: pass args by stack", .{}); + } }, else => return self.fail("TODO implement function parameters for {} on riscv64", .{cc}), } - if (ret_ty.zigTypeTag(mod) == .NoReturn) { - result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits(mod)) { - result.return_value = .{ .none = {} }; - } else switch (cc) { - .Naked => unreachable, - .Unspecified, .C => { - const ret_ty_size: u32 = @intCast(ret_ty.abiSize(mod)); - if (ret_ty_size <= 8) { - result.return_value = .{ .register = .a0 }; - } else if (ret_ty_size <= 16) { - return self.fail("TODO support returning with a0 + a1", .{}); - } else { - return self.fail("TODO support return by reference", .{}); - } - }, - else => return self.fail("TODO implement function return values for {}", .{cc}), - } + result.stack_byte_count = @intCast(result.stack_align.forward(result.stack_byte_count)); return result; } @@ -4504,36 +5272,36 @@ fn parseRegName(name: []const u8) ?Register { } fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { - const mod = self.bin_file.comp.module.?; - return self.air.typeOf(inst, &mod.intern_pool); + const zcu = self.bin_file.comp.module.?; + return self.air.typeOf(inst, &zcu.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { - const mod = self.bin_file.comp.module.?; - return self.air.typeOfIndex(inst, &mod.intern_pool); + const zcu = self.bin_file.comp.module.?; + return self.air.typeOfIndex(inst, &zcu.intern_pool); } fn hasFeature(self: *Self, feature: Target.riscv.Feature) bool { return Target.riscv.featureSetHas(self.target.cpu.features, feature); } -pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 { - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; - const payload_align = payload_ty.abiAlignment(mod); - const error_align = Type.anyerror.abiAlignment(mod); - if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { +pub fn errUnionPayloadOffset(payload_ty: Type, zcu: *Module) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return 0; + const payload_align = payload_ty.abiAlignment(zcu); + const error_align = Type.anyerror.abiAlignment(zcu); + if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { return 0; } else { - return payload_align.forward(Type.anyerror.abiSize(mod)); + return payload_align.forward(Type.anyerror.abiSize(zcu)); } } -pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 { - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; - const payload_align = payload_ty.abiAlignment(mod); - const error_align = Type.anyerror.abiAlignment(mod); - if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return error_align.forward(payload_ty.abiSize(mod)); +pub fn errUnionErrorOffset(payload_ty: Type, zcu: *Module) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return 0; + const payload_align = payload_ty.abiAlignment(zcu); + const error_align = Type.anyerror.abiAlignment(zcu); + if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + return error_align.forward(payload_ty.abiSize(zcu)); } else { return 0; } diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index a3156fc499..0e1decd42f 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -1,620 +1,163 @@ -//! This file contains the functionality for lowering RISCV64 MIR into -//! machine code +//! This file contains the functionality for emitting RISC-V MIR as machine code -mir: Mir, -bin_file: *link.File, +lower: Lower, debug_output: DebugInfoOutput, -output_mode: std.builtin.OutputMode, -link_mode: std.builtin.LinkMode, -target: *const std.Target, -err_msg: ?*ErrorMsg = null, -src_loc: Module.SrcLoc, code: *std.ArrayList(u8), -/// List of registers to save in the prologue. -save_reg_list: Mir.RegisterList, - prev_di_line: u32, prev_di_column: u32, /// Relative to the beginning of `code`. prev_di_pc: usize, -/// Function's stack size. Used for backpatching. -stack_size: u32, - -/// For backward branches: stores the code offset of the target -/// instruction -/// -/// For forward branches: stores the code offset of the branch -/// instruction code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{}, +relocs: std.ArrayListUnmanaged(Reloc) = .{}, -const log = std.log.scoped(.emit); - -const InnerError = error{ - OutOfMemory, +pub const Error = Lower.Error || error{ EmitFail, }; -pub fn emitMir( - emit: *Emit, -) InnerError!void { - const mir_tags = emit.mir.instructions.items(.tag); +pub fn emitMir(emit: *Emit) Error!void { + log.debug("mir instruction len: {}", .{emit.lower.mir.instructions.len}); + for (0..emit.lower.mir.instructions.len) |mir_i| { + const mir_index: Mir.Inst.Index = @intCast(mir_i); + try emit.code_offset_mapping.putNoClobber( + emit.lower.allocator, + mir_index, + @intCast(emit.code.items.len), + ); + const lowered = try emit.lower.lowerMir(mir_index); + var lowered_relocs = lowered.relocs; + for (lowered.insts, 0..) |lowered_inst, lowered_index| { + const start_offset: u32 = @intCast(emit.code.items.len); + try lowered_inst.encode(emit.code.writer()); - try emit.lowerMir(); + while (lowered_relocs.len > 0 and + lowered_relocs[0].lowered_inst_index == lowered_index) : ({ + lowered_relocs = lowered_relocs[1..]; + }) switch (lowered_relocs[0].target) { + .inst => |target| try emit.relocs.append(emit.lower.allocator, .{ + .source = start_offset, + .target = target, + .offset = 0, + .enc = std.meta.activeTag(lowered_inst.encoding.data), + }), + else => |x| return emit.fail("TODO: emitMir {s}", .{@tagName(x)}), + }; + } + std.debug.assert(lowered_relocs.len == 0); - for (mir_tags, 0..) |tag, index| { - const inst = @as(u32, @intCast(index)); - log.debug("emitMir: {s}", .{@tagName(tag)}); - switch (tag) { - .add => try emit.mirRType(inst), - .sub => try emit.mirRType(inst), - .mul => try emit.mirRType(inst), - .@"or" => try emit.mirRType(inst), + if (lowered.insts.len == 0) { + const mir_inst = emit.lower.mir.instructions.get(mir_index); + switch (mir_inst.tag) { + else => unreachable, + .pseudo => switch (mir_inst.ops) { + else => unreachable, + .pseudo_dbg_prologue_end => { + switch (emit.debug_output) { + .dwarf => |dw| { + try dw.setPrologueEnd(); + log.debug("mirDbgPrologueEnd (line={d}, col={d})", .{ + emit.prev_di_line, emit.prev_di_column, + }); + try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column); + }, + .plan9 => {}, + .none => {}, + } + }, + .pseudo_dbg_line_column => try emit.dbgAdvancePCAndLine( + mir_inst.data.pseudo_dbg_line_column.line, + mir_inst.data.pseudo_dbg_line_column.column, + ), + .pseudo_dbg_epilogue_begin => { + switch (emit.debug_output) { + .dwarf => |dw| { + try dw.setEpilogueBegin(); + log.debug("mirDbgEpilogueBegin (line={d}, col={d})", .{ + emit.prev_di_line, emit.prev_di_column, + }); + try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column); + }, + .plan9 => {}, + .none => {}, + } + }, + .pseudo_dead => {}, + }, + } + } + } + try emit.fixupRelocs(); +} - .cmp_eq => try emit.mirRType(inst), - .cmp_neq => try emit.mirRType(inst), - .cmp_gt => try emit.mirRType(inst), - .cmp_gte => try emit.mirRType(inst), - .cmp_lt => try emit.mirRType(inst), - .cmp_imm_gte => try emit.mirRType(inst), - .cmp_imm_eq => try emit.mirIType(inst), - .cmp_imm_neq => try emit.mirIType(inst), - .cmp_imm_lte => try emit.mirIType(inst), - .cmp_imm_lt => try emit.mirIType(inst), +pub fn deinit(emit: *Emit) void { + emit.relocs.deinit(emit.lower.allocator); + emit.code_offset_mapping.deinit(emit.lower.allocator); + emit.* = undefined; +} - .beq => try emit.mirBType(inst), - .bne => try emit.mirBType(inst), +const Reloc = struct { + /// Offset of the instruction. + source: usize, + /// Target of the relocation. + target: Mir.Inst.Index, + /// Offset of the relocation within the instruction. + offset: u32, + /// Encoding of the instruction, used to determine how to modify it. + enc: Encoding.InstEnc, +}; - .addi => try emit.mirIType(inst), - .addiw => try emit.mirIType(inst), - .andi => try emit.mirIType(inst), - .jalr => try emit.mirIType(inst), - .abs => try emit.mirIType(inst), +fn fixupRelocs(emit: *Emit) Error!void { + for (emit.relocs.items) |reloc| { + log.debug("target inst: {}", .{emit.lower.mir.instructions.get(reloc.target)}); + const target = emit.code_offset_mapping.get(reloc.target) orelse + return emit.fail("relocation target not found!", .{}); - .jal => try emit.mirJType(inst), + const disp = @as(i32, @intCast(target)) - @as(i32, @intCast(reloc.source)); + const code: *[4]u8 = emit.code.items[reloc.source + reloc.offset ..][0..4]; - .ebreak => try emit.mirSystem(inst), - .ecall => try emit.mirSystem(inst), - .unimp => try emit.mirSystem(inst), + log.debug("disp: {x}", .{disp}); - .dbg_line => try emit.mirDbgLine(inst), - .dbg_prologue_end => try emit.mirDebugPrologueEnd(), - .dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(), - - .psuedo_prologue => try emit.mirPsuedo(inst), - .psuedo_epilogue => try emit.mirPsuedo(inst), - - .j => try emit.mirPsuedo(inst), - - .mv => try emit.mirRR(inst), - .not => try emit.mirRR(inst), - - .nop => try emit.mirNop(inst), - .ret => try emit.mirNop(inst), - - .lui => try emit.mirUType(inst), - - .ld => try emit.mirIType(inst), - .lw => try emit.mirIType(inst), - .lh => try emit.mirIType(inst), - .lb => try emit.mirIType(inst), - - .sd => try emit.mirIType(inst), - .sw => try emit.mirIType(inst), - .sh => try emit.mirIType(inst), - .sb => try emit.mirIType(inst), - - .srlw => try emit.mirRType(inst), - .sllw => try emit.mirRType(inst), - - .srli => try emit.mirIType(inst), - .slli => try emit.mirIType(inst), - - .ldr_ptr_stack => try emit.mirIType(inst), - - .load_symbol => try emit.mirLoadSymbol(inst), + switch (reloc.enc) { + .J => riscv_util.writeInstJ(code, @bitCast(disp)), + else => return emit.fail("tried to reloc encoding type {s}", .{@tagName(reloc.enc)}), } } } -pub fn deinit(emit: *Emit) void { - const comp = emit.bin_file.comp; - const gpa = comp.gpa; - - emit.code_offset_mapping.deinit(gpa); - emit.* = undefined; -} - -fn writeInstruction(emit: *Emit, instruction: Instruction) !void { - const endian = emit.target.cpu.arch.endian(); - std.mem.writeInt(u32, try emit.code.addManyAsArray(4), instruction.toU32(), endian); -} - -fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { - @setCold(true); - assert(emit.err_msg == null); - const comp = emit.bin_file.comp; - const gpa = comp.gpa; - emit.err_msg = try ErrorMsg.create(gpa, emit.src_loc, format, args); - return error.EmitFail; -} - -fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void { - const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line)); +fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void { + const delta_line = @as(i33, line) - @as(i33, emit.prev_di_line); const delta_pc: usize = emit.code.items.len - emit.prev_di_pc; + log.debug(" (advance pc={d} and line={d})", .{ delta_pc, delta_line }); switch (emit.debug_output) { .dwarf => |dw| { if (column != emit.prev_di_column) try dw.setColumn(column); - if (delta_line == 0) return; // TODO: remove this + if (delta_line == 0) return; // TODO: fix these edge cases. try dw.advancePCAndLine(delta_line, delta_pc); emit.prev_di_line = line; emit.prev_di_column = column; emit.prev_di_pc = emit.code.items.len; }, - .plan9 => |dbg_out| { - if (delta_pc <= 0) return; // only do this when the pc changes - - // increasing the line number - try link.File.Plan9.changeLine(&dbg_out.dbg_line, delta_line); - // increasing the pc - const d_pc_p9 = @as(i64, @intCast(delta_pc)) - dbg_out.pc_quanta; - if (d_pc_p9 > 0) { - // minus one because if its the last one, we want to leave space to change the line which is one pc quanta - try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, dbg_out.pc_quanta) + 128)) - dbg_out.pc_quanta); - if (dbg_out.pcop_change_index) |pci| - dbg_out.dbg_line.items[pci] += 1; - dbg_out.pcop_change_index = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1)); - } else if (d_pc_p9 == 0) { - // we don't need to do anything, because adding the pc quanta does it for us - } else unreachable; - if (dbg_out.start_line == null) - dbg_out.start_line = emit.prev_di_line; - dbg_out.end_line = line; - // only do this if the pc changed - emit.prev_di_line = line; - emit.prev_di_column = column; - emit.prev_di_pc = emit.code.items.len; - }, - .none => {}, - } -} - -fn mirRType(emit: *Emit, inst: Mir.Inst.Index) !void { - const tag = emit.mir.instructions.items(.tag)[inst]; - const r_type = emit.mir.instructions.items(.data)[inst].r_type; - - const rd = r_type.rd; - const rs1 = r_type.rs1; - const rs2 = r_type.rs2; - - switch (tag) { - .add => try emit.writeInstruction(Instruction.add(rd, rs1, rs2)), - .sub => try emit.writeInstruction(Instruction.sub(rd, rs1, rs2)), - .mul => try emit.writeInstruction(Instruction.mul(rd, rs1, rs2)), - .cmp_gt => { - // rs1 > rs2 - try emit.writeInstruction(Instruction.sltu(rd, rs2, rs1)); - }, - .cmp_gte => { - // rs1 >= rs2 - try emit.writeInstruction(Instruction.sltu(rd, rs1, rs2)); - try emit.writeInstruction(Instruction.xori(rd, rd, 1)); - }, - .cmp_eq => { - // rs1 == rs2 - - try emit.writeInstruction(Instruction.xor(rd, rs1, rs2)); - try emit.writeInstruction(Instruction.sltiu(rd, rd, 1)); // seqz - }, - .cmp_neq => { - // rs1 != rs2 - - try emit.writeInstruction(Instruction.xor(rd, rs1, rs2)); - try emit.writeInstruction(Instruction.sltu(rd, .zero, rd)); // snez - }, - .cmp_lt => { - // rd = 1 if rs1 < rs2 - try emit.writeInstruction(Instruction.slt(rd, rs1, rs2)); - }, - .sllw => try emit.writeInstruction(Instruction.sllw(rd, rs1, rs2)), - .srlw => try emit.writeInstruction(Instruction.srlw(rd, rs1, rs2)), - .@"or" => try emit.writeInstruction(Instruction.@"or"(rd, rs1, rs2)), - .cmp_imm_gte => { - // rd = 1 if rs1 >= imm12 - // see the docstring of cmp_imm_gte to see why we use r_type here - - // (rs1 >= imm12) == !(imm12 > rs1) - try emit.writeInstruction(Instruction.sltu(rd, rs1, rs2)); - }, - else => unreachable, - } -} - -fn mirBType(emit: *Emit, inst: Mir.Inst.Index) !void { - const tag = emit.mir.instructions.items(.tag)[inst]; - const b_type = emit.mir.instructions.items(.data)[inst].b_type; - - const offset = @as(i64, @intCast(emit.code_offset_mapping.get(b_type.inst).?)) - @as(i64, @intCast(emit.code.items.len)); - - switch (tag) { - .beq => { - log.debug("beq: {} offset={}", .{ inst, offset }); - try emit.writeInstruction(Instruction.beq(b_type.rs1, b_type.rs2, @intCast(offset))); - }, - .bne => { - log.debug("bne: {} offset={}", .{ inst, offset }); - try emit.writeInstruction(Instruction.bne(b_type.rs1, b_type.rs2, @intCast(offset))); - }, - else => unreachable, - } -} - -fn mirIType(emit: *Emit, inst: Mir.Inst.Index) !void { - const tag = emit.mir.instructions.items(.tag)[inst]; - const i_type = emit.mir.instructions.items(.data)[inst].i_type; - - const rd = i_type.rd; - const rs1 = i_type.rs1; - const imm12 = i_type.imm12; - - switch (tag) { - .addi => try emit.writeInstruction(Instruction.addi(rd, rs1, imm12)), - .addiw => try emit.writeInstruction(Instruction.addiw(rd, rs1, imm12)), - .jalr => try emit.writeInstruction(Instruction.jalr(rd, imm12, rs1)), - - .andi => try emit.writeInstruction(Instruction.andi(rd, rs1, imm12)), - - .ld => try emit.writeInstruction(Instruction.ld(rd, imm12, rs1)), - .lw => try emit.writeInstruction(Instruction.lw(rd, imm12, rs1)), - .lh => try emit.writeInstruction(Instruction.lh(rd, imm12, rs1)), - .lb => try emit.writeInstruction(Instruction.lb(rd, imm12, rs1)), - - .sd => try emit.writeInstruction(Instruction.sd(rd, imm12, rs1)), - .sw => try emit.writeInstruction(Instruction.sw(rd, imm12, rs1)), - .sh => try emit.writeInstruction(Instruction.sh(rd, imm12, rs1)), - .sb => try emit.writeInstruction(Instruction.sb(rd, imm12, rs1)), - - .ldr_ptr_stack => try emit.writeInstruction(Instruction.add(rd, rs1, .sp)), - - .abs => { - try emit.writeInstruction(Instruction.sraiw(rd, rs1, @intCast(imm12))); - try emit.writeInstruction(Instruction.xor(rs1, rs1, rd)); - try emit.writeInstruction(Instruction.subw(rs1, rs1, rd)); - }, - - .srli => try emit.writeInstruction(Instruction.srli(rd, rs1, @intCast(imm12))), - .slli => try emit.writeInstruction(Instruction.slli(rd, rs1, @intCast(imm12))), - - .cmp_imm_eq => { - try emit.writeInstruction(Instruction.xori(rd, rs1, imm12)); - try emit.writeInstruction(Instruction.sltiu(rd, rd, 1)); - }, - .cmp_imm_neq => { - try emit.writeInstruction(Instruction.xori(rd, rs1, imm12)); - try emit.writeInstruction(Instruction.sltu(rd, .x0, rd)); - }, - - .cmp_imm_lt => { - try emit.writeInstruction(Instruction.slti(rd, rs1, imm12)); - }, - - .cmp_imm_lte => { - try emit.writeInstruction(Instruction.sltiu(rd, rs1, @bitCast(imm12))); - }, - - else => unreachable, - } -} - -fn mirJType(emit: *Emit, inst: Mir.Inst.Index) !void { - const tag = emit.mir.instructions.items(.tag)[inst]; - const j_type = emit.mir.instructions.items(.data)[inst].j_type; - - const offset = @as(i64, @intCast(emit.code_offset_mapping.get(j_type.inst).?)) - @as(i64, @intCast(emit.code.items.len)); - - switch (tag) { - .jal => { - log.debug("jal: {} offset={}", .{ inst, offset }); - try emit.writeInstruction(Instruction.jal(j_type.rd, @intCast(offset))); - }, - else => unreachable, - } -} - -fn mirSystem(emit: *Emit, inst: Mir.Inst.Index) !void { - const tag = emit.mir.instructions.items(.tag)[inst]; - - switch (tag) { - .ebreak => try emit.writeInstruction(Instruction.ebreak), - .ecall => try emit.writeInstruction(Instruction.ecall), - .unimp => try emit.writeInstruction(Instruction.unimp), - else => unreachable, - } -} - -fn mirDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void { - const tag = emit.mir.instructions.items(.tag)[inst]; - const dbg_line_column = emit.mir.instructions.items(.data)[inst].dbg_line_column; - - switch (tag) { - .dbg_line => try emit.dbgAdvancePCAndLine(dbg_line_column.line, dbg_line_column.column), - else => unreachable, - } -} - -fn mirDebugPrologueEnd(emit: *Emit) !void { - switch (emit.debug_output) { - .dwarf => |dw| { - try dw.setPrologueEnd(); - try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column); - }, .plan9 => {}, .none => {}, } } -fn mirDebugEpilogueBegin(emit: *Emit) !void { - switch (emit.debug_output) { - .dwarf => |dw| { - try dw.setEpilogueBegin(); - try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column); - }, - .plan9 => {}, - .none => {}, - } -} - -fn mirPsuedo(emit: *Emit, inst: Mir.Inst.Index) !void { - const tag = emit.mir.instructions.items(.tag)[inst]; - const data = emit.mir.instructions.items(.data)[inst]; - - switch (tag) { - .psuedo_prologue => { - const stack_size: i12 = math.cast(i12, emit.stack_size) orelse { - return emit.fail("TODO: mirPsuedo support larger stack sizes", .{}); - }; - - // Decrement sp by (num s registers * 8) + local var space - try emit.writeInstruction(Instruction.addi(.sp, .sp, -stack_size)); - - // Spill ra - try emit.writeInstruction(Instruction.sd(.ra, 0, .sp)); - - // Spill callee saved registers. - var s_reg_iter = emit.save_reg_list.iterator(.{}); - var i: i12 = 8; - while (s_reg_iter.next()) |reg_i| { - const reg = abi.callee_preserved_regs[reg_i]; - try emit.writeInstruction(Instruction.sd(reg, i, .sp)); - i += 8; - } - }, - .psuedo_epilogue => { - const stack_size: i12 = math.cast(i12, emit.stack_size) orelse { - return emit.fail("TODO: mirPsuedo support larger stack sizes", .{}); - }; - - // Restore ra - try emit.writeInstruction(Instruction.ld(.ra, 0, .sp)); - - // Restore spilled callee saved registers - var s_reg_iter = emit.save_reg_list.iterator(.{}); - var i: i12 = 8; - while (s_reg_iter.next()) |reg_i| { - const reg = abi.callee_preserved_regs[reg_i]; - try emit.writeInstruction(Instruction.ld(reg, i, .sp)); - i += 8; - } - - // Increment sp back to previous value - try emit.writeInstruction(Instruction.addi(.sp, .sp, stack_size)); - }, - - .j => { - const offset = @as(i64, @intCast(emit.code_offset_mapping.get(data.inst).?)) - @as(i64, @intCast(emit.code.items.len)); - try emit.writeInstruction(Instruction.jal(.zero, @intCast(offset))); - }, - - else => unreachable, - } -} - -fn mirRR(emit: *Emit, inst: Mir.Inst.Index) !void { - const tag = emit.mir.instructions.items(.tag)[inst]; - const rr = emit.mir.instructions.items(.data)[inst].rr; - - const rd = rr.rd; - const rs = rr.rs; - - switch (tag) { - .mv => try emit.writeInstruction(Instruction.addi(rd, rs, 0)), - .not => try emit.writeInstruction(Instruction.xori(rd, rs, 1)), - else => unreachable, - } -} - -fn mirUType(emit: *Emit, inst: Mir.Inst.Index) !void { - const tag = emit.mir.instructions.items(.tag)[inst]; - const u_type = emit.mir.instructions.items(.data)[inst].u_type; - - switch (tag) { - .lui => try emit.writeInstruction(Instruction.lui(u_type.rd, u_type.imm20)), - else => unreachable, - } -} - -fn mirNop(emit: *Emit, inst: Mir.Inst.Index) !void { - const tag = emit.mir.instructions.items(.tag)[inst]; - - switch (tag) { - .nop => try emit.writeInstruction(Instruction.addi(.zero, .zero, 0)), - .ret => try emit.writeInstruction(Instruction.jalr(.zero, 0, .ra)), - else => unreachable, - } -} - -fn mirLoadSymbol(emit: *Emit, inst: Mir.Inst.Index) !void { - const payload = emit.mir.instructions.items(.data)[inst].payload; - const data = emit.mir.extraData(Mir.LoadSymbolPayload, payload).data; - const reg = @as(Register, @enumFromInt(data.register)); - - const start_offset = @as(u32, @intCast(emit.code.items.len)); - try emit.writeInstruction(Instruction.lui(reg, 0)); - try emit.writeInstruction(Instruction.addi(reg, reg, 0)); - - switch (emit.bin_file.tag) { - .elf => { - const elf_file = emit.bin_file.cast(link.File.Elf).?; - const atom_ptr = elf_file.symbol(data.atom_index).atom(elf_file).?; - const sym_index = elf_file.zigObjectPtr().?.symbol(data.sym_index); - const sym = elf_file.symbol(sym_index); - - var hi_r_type: u32 = @intFromEnum(std.elf.R_RISCV.HI20); - var lo_r_type: u32 = @intFromEnum(std.elf.R_RISCV.LO12_I); - - if (sym.flags.needs_zig_got) { - _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); - - hi_r_type = Elf.R_ZIG_GOT_HI20; - lo_r_type = Elf.R_ZIG_GOT_LO12; - } - - try atom_ptr.addReloc(elf_file, .{ - .r_offset = start_offset, - .r_info = (@as(u64, @intCast(data.sym_index)) << 32) | hi_r_type, - .r_addend = 0, - }); - - try atom_ptr.addReloc(elf_file, .{ - .r_offset = start_offset + 4, - .r_info = (@as(u64, @intCast(data.sym_index)) << 32) | lo_r_type, - .r_addend = 0, - }); - }, - else => unreachable, - } -} - -fn isStore(tag: Mir.Inst.Tag) bool { - return switch (tag) { - .sb => true, - .sh => true, - .sw => true, - .sd => true, - .addi => true, // needed for ptr_stack_offset stores - else => false, +fn fail(emit: *Emit, comptime format: []const u8, args: anytype) Error { + return switch (emit.lower.fail(format, args)) { + error.LowerFail => error.EmitFail, + else => |e| e, }; } -fn isLoad(tag: Mir.Inst.Tag) bool { - return switch (tag) { - .lb => true, - .lh => true, - .lw => true, - .ld => true, - else => false, - }; -} - -pub fn isBranch(tag: Mir.Inst.Tag) bool { - return switch (tag) { - .beq => true, - .bne => true, - .jal => true, - .j => true, - else => false, - }; -} - -pub fn branchTarget(emit: *Emit, inst: Mir.Inst.Index) Mir.Inst.Index { - const tag = emit.mir.instructions.items(.tag)[inst]; - const data = emit.mir.instructions.items(.data)[inst]; - - switch (tag) { - .bne, - .beq, - => return data.b_type.inst, - .jal => return data.j_type.inst, - .j => return data.inst, - else => std.debug.panic("branchTarget {s}", .{@tagName(tag)}), - } -} - -fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize { - const tag = emit.mir.instructions.items(.tag)[inst]; - - return switch (tag) { - .dbg_line, - .dbg_epilogue_begin, - .dbg_prologue_end, - => 0, - - .cmp_eq, - .cmp_neq, - .cmp_imm_eq, - .cmp_imm_neq, - .cmp_gte, - .load_symbol, - .abs, - => 8, - - .psuedo_epilogue, .psuedo_prologue => size: { - const count = emit.save_reg_list.count() * 4; - break :size count + 8; - }, - - else => 4, - }; -} - -fn lowerMir(emit: *Emit) !void { - const comp = emit.bin_file.comp; - const gpa = comp.gpa; - const mir_tags = emit.mir.instructions.items(.tag); - const mir_datas = emit.mir.instructions.items(.data); - - const proglogue_size: u32 = @intCast(emit.save_reg_list.size()); - emit.stack_size += proglogue_size; - - for (mir_tags, 0..) |tag, index| { - const inst: u32 = @intCast(index); - - if (isStore(tag) or isLoad(tag)) { - const data = mir_datas[inst].i_type; - if (data.rs1 == .sp) { - const offset = mir_datas[inst].i_type.imm12; - mir_datas[inst].i_type.imm12 = offset + @as(i12, @intCast(proglogue_size)) + 8; - } - } - - if (isBranch(tag)) { - const target_inst = emit.branchTarget(inst); - try emit.code_offset_mapping.put(gpa, target_inst, 0); - } - } - var current_code_offset: usize = 0; - - for (0..mir_tags.len) |index| { - const inst = @as(u32, @intCast(index)); - if (emit.code_offset_mapping.getPtr(inst)) |offset| { - offset.* = current_code_offset; - } - current_code_offset += emit.instructionSize(inst); - } -} - -const Emit = @This(); -const std = @import("std"); -const math = std.math; -const Mir = @import("Mir.zig"); -const bits = @import("bits.zig"); -const abi = @import("abi.zig"); const link = @import("../../link.zig"); -const Module = @import("../../Module.zig"); -const Elf = @import("../../link/Elf.zig"); -const ErrorMsg = Module.ErrorMsg; -const assert = std.debug.assert; -const Instruction = bits.Instruction; -const Register = bits.Register; +const log = std.log.scoped(.emit); +const mem = std.mem; +const std = @import("std"); + const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; +const Emit = @This(); +const Lower = @import("Lower.zig"); +const Mir = @import("Mir.zig"); +const riscv_util = @import("../../link/riscv.zig"); +const Encoding = @import("Encoding.zig"); diff --git a/src/arch/riscv64/Encoding.zig b/src/arch/riscv64/Encoding.zig new file mode 100644 index 0000000000..1510185944 --- /dev/null +++ b/src/arch/riscv64/Encoding.zig @@ -0,0 +1,333 @@ +mnemonic: Mnemonic, +data: Data, + +pub const Mnemonic = enum { + // R Type + add, + + // I Type + ld, + lw, + lwu, + lh, + lhu, + lb, + lbu, + + addi, + jalr, + + // U Type + lui, + + // S Type + sd, + sw, + sh, + sb, + + // J Type + jal, + + // System + ecall, + ebreak, + unimp, + + pub fn encoding(mnem: Mnemonic) Enc { + return switch (mnem) { + // zig fmt: off + .add => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0000000 }, + + .ld => .{ .opcode = 0b0000011, .funct3 = 0b011, .funct7 = null }, + .lw => .{ .opcode = 0b0000011, .funct3 = 0b010, .funct7 = null }, + .lwu => .{ .opcode = 0b0000011, .funct3 = 0b110, .funct7 = null }, + .lh => .{ .opcode = 0b0000011, .funct3 = 0b001, .funct7 = null }, + .lhu => .{ .opcode = 0b0000011, .funct3 = 0b101, .funct7 = null }, + .lb => .{ .opcode = 0b0000011, .funct3 = 0b000, .funct7 = null }, + .lbu => .{ .opcode = 0b0000011, .funct3 = 0b100, .funct7 = null }, + + + .addi => .{ .opcode = 0b0010011, .funct3 = 0b000, .funct7 = null }, + .jalr => .{ .opcode = 0b1100111, .funct3 = 0b000, .funct7 = null }, + + .lui => .{ .opcode = 0b0110111, .funct3 = null, .funct7 = null }, + + .sd => .{ .opcode = 0b0100011, .funct3 = 0b011, .funct7 = null }, + .sw => .{ .opcode = 0b0100011, .funct3 = 0b010, .funct7 = null }, + .sh => .{ .opcode = 0b0100011, .funct3 = 0b001, .funct7 = null }, + .sb => .{ .opcode = 0b0100011, .funct3 = 0b000, .funct7 = null }, + + .jal => .{ .opcode = 0b1101111, .funct3 = null, .funct7 = null }, + + .ecall => .{ .opcode = 0b1110011, .funct3 = 0b000, .funct7 = null }, + .ebreak => .{ .opcode = 0b1110011, .funct3 = 0b000, .funct7 = null }, + .unimp => .{ .opcode = 0b0000000, .funct3 = 0b000, .funct7 = null }, + // zig fmt: on + }; + } +}; + +pub const InstEnc = enum { + R, + I, + S, + B, + U, + J, + + /// extras that have unusual op counts + system, + + pub fn fromMnemonic(mnem: Mnemonic) InstEnc { + return switch (mnem) { + .add, + => .R, + + .addi, + .ld, + .lw, + .lwu, + .lh, + .lhu, + .lb, + .lbu, + .jalr, + => .I, + + .lui, + => .U, + + .sd, + .sw, + .sh, + .sb, + => .S, + + .jal, + => .J, + + .ecall, + .ebreak, + .unimp, + => .system, + }; + } + + pub fn opsList(enc: InstEnc) [4]std.meta.FieldEnum(Operand) { + return switch (enc) { + .R => .{ .reg, .reg, .reg, .none }, + .I => .{ .reg, .reg, .imm, .none }, + .S => .{ .reg, .reg, .imm, .none }, + .B => .{ .imm, .reg, .reg, .imm }, + .U => .{ .reg, .imm, .none, .none }, + .J => .{ .reg, .imm, .none, .none }, + .system => .{ .none, .none, .none, .none }, + }; + } +}; + +pub const Data = union(InstEnc) { + R: packed struct { + opcode: u7, + rd: u5, + funct3: u3, + rs1: u5, + rs2: u5, + funct7: u7, + }, + I: packed struct { + opcode: u7, + rd: u5, + funct3: u3, + rs1: u5, + imm0_11: u12, + }, + S: packed struct { + opcode: u7, + imm0_4: u5, + funct3: u3, + rs1: u5, + rs2: u5, + imm5_11: u7, + }, + B: packed struct { + opcode: u7, + imm11: u1, + imm1_4: u4, + funct3: u3, + rs1: u5, + rs2: u5, + imm5_10: u6, + imm12: u1, + }, + U: packed struct { + opcode: u7, + rd: u5, + imm12_31: u20, + }, + J: packed struct { + opcode: u7, + rd: u5, + imm12_19: u8, + imm11: u1, + imm1_10: u10, + imm20: u1, + }, + system: void, + + pub fn toU32(self: Data) u32 { + return switch (self) { + .R => |v| @as(u32, @bitCast(v)), + .I => |v| @as(u32, @bitCast(v)), + .S => |v| @as(u32, @bitCast(v)), + .B => |v| @as(u32, @intCast(v.opcode)) + (@as(u32, @intCast(v.imm11)) << 7) + (@as(u32, @intCast(v.imm1_4)) << 8) + (@as(u32, @intCast(v.funct3)) << 12) + (@as(u32, @intCast(v.rs1)) << 15) + (@as(u32, @intCast(v.rs2)) << 20) + (@as(u32, @intCast(v.imm5_10)) << 25) + (@as(u32, @intCast(v.imm12)) << 31), + .U => |v| @as(u32, @bitCast(v)), + .J => |v| @as(u32, @bitCast(v)), + .system => unreachable, + }; + } + + pub fn construct(mnem: Mnemonic, ops: []const Operand) !Data { + const inst_enc = InstEnc.fromMnemonic(mnem); + + const enc = mnem.encoding(); + + // special mnemonics + switch (mnem) { + .ecall, + .ebreak, + .unimp, + => { + assert(ops.len == 0); + return .{ + .I = .{ + .rd = Register.zero.id(), + .rs1 = Register.zero.id(), + .imm0_11 = switch (mnem) { + .ecall => 0x000, + .ebreak => 0x001, + .unimp => 0, + else => unreachable, + }, + + .opcode = enc.opcode, + .funct3 = enc.funct3.?, + }, + }; + }, + else => {}, + } + + switch (inst_enc) { + .R => { + assert(ops.len == 3); + return .{ + .R = .{ + .rd = ops[0].reg.id(), + .rs1 = ops[1].reg.id(), + .rs2 = ops[2].reg.id(), + + .opcode = enc.opcode, + .funct3 = enc.funct3.?, + .funct7 = enc.funct7.?, + }, + }; + }, + .S => { + assert(ops.len == 3); + const umm = ops[2].imm.asBits(u12); + + return .{ + .S = .{ + .imm0_4 = @truncate(umm), + .rs1 = ops[0].reg.id(), + .rs2 = ops[1].reg.id(), + .imm5_11 = @truncate(umm >> 5), + + .opcode = enc.opcode, + .funct3 = enc.funct3.?, + }, + }; + }, + .I => { + assert(ops.len == 3); + return .{ + .I = .{ + .rd = ops[0].reg.id(), + .rs1 = ops[1].reg.id(), + .imm0_11 = ops[2].imm.asBits(u12), + + .opcode = enc.opcode, + .funct3 = enc.funct3.?, + }, + }; + }, + .U => { + assert(ops.len == 2); + return .{ + .U = .{ + .rd = ops[0].reg.id(), + .imm12_31 = ops[1].imm.asBits(u20), + + .opcode = enc.opcode, + }, + }; + }, + .J => { + assert(ops.len == 2); + + const umm = ops[1].imm.asBits(u21); + assert(umm % 4 == 0); // misaligned jump target + + return .{ + .J = .{ + .rd = ops[0].reg.id(), + .imm1_10 = @truncate(umm >> 1), + .imm11 = @truncate(umm >> 11), + .imm12_19 = @truncate(umm >> 12), + .imm20 = @truncate(umm >> 20), + + .opcode = enc.opcode, + }, + }; + }, + + else => std.debug.panic("TODO: construct {s}", .{@tagName(inst_enc)}), + } + } +}; + +pub fn findByMnemonic(mnem: Mnemonic, ops: []const Operand) !?Encoding { + if (!verifyOps(mnem, ops)) return null; + + return .{ + .mnemonic = mnem, + .data = try Data.construct(mnem, ops), + }; +} + +const Enc = struct { + opcode: u7, + funct3: ?u3, + funct7: ?u7, +}; + +fn verifyOps(mnem: Mnemonic, ops: []const Operand) bool { + const inst_enc = InstEnc.fromMnemonic(mnem); + const list = std.mem.sliceTo(&inst_enc.opsList(), .none); + for (list, ops) |l, o| if (l != std.meta.activeTag(o)) return false; + return true; +} + +const std = @import("std"); +const assert = std.debug.assert; +const log = std.log.scoped(.encoding); + +const Encoding = @This(); +const bits = @import("bits.zig"); +const Register = bits.Register; +const encoder = @import("encoder.zig"); +const Instruction = encoder.Instruction; +const Operand = Instruction.Operand; +const OperandEnum = std.meta.FieldEnum(Operand); diff --git a/src/arch/riscv64/Lower.zig b/src/arch/riscv64/Lower.zig new file mode 100644 index 0000000000..5a3e375e05 --- /dev/null +++ b/src/arch/riscv64/Lower.zig @@ -0,0 +1,222 @@ +//! This file contains the functionality for lowering RISC-V MIR to Instructions + +bin_file: *link.File, +output_mode: std.builtin.OutputMode, +link_mode: std.builtin.LinkMode, +pic: bool, +allocator: Allocator, +mir: Mir, +cc: std.builtin.CallingConvention, +err_msg: ?*ErrorMsg = null, +src_loc: Module.SrcLoc, +result_insts_len: u8 = undefined, +result_relocs_len: u8 = undefined, +result_insts: [ + @max( + 1, // non-pseudo instruction + abi.callee_preserved_regs.len, // spill / restore regs, + ) +]Instruction = undefined, +result_relocs: [1]Reloc = undefined, + +pub const Error = error{ + OutOfMemory, + LowerFail, + InvalidInstruction, +}; + +pub const Reloc = struct { + lowered_inst_index: u8, + target: Target, + + const Target = union(enum) { + inst: Mir.Inst.Index, + linker_reloc: bits.Symbol, + }; +}; + +/// The returned slice is overwritten by the next call to lowerMir. +pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { + insts: []const Instruction, + relocs: []const Reloc, +} { + lower.result_insts = undefined; + lower.result_relocs = undefined; + errdefer lower.result_insts = undefined; + errdefer lower.result_relocs = undefined; + lower.result_insts_len = 0; + lower.result_relocs_len = 0; + defer lower.result_insts_len = undefined; + defer lower.result_relocs_len = undefined; + + const inst = lower.mir.instructions.get(index); + log.debug("lowerMir {}", .{inst}); + switch (inst.tag) { + else => try lower.generic(inst), + .pseudo => switch (inst.ops) { + .pseudo_dbg_line_column, + .pseudo_dbg_epilogue_begin, + .pseudo_dbg_prologue_end, + .pseudo_dead, + => {}, + .pseudo_load_rm, .pseudo_store_rm => { + const rm = inst.data.rm; + + const frame_loc = rm.m.toFrameLoc(lower.mir); + + switch (inst.ops) { + .pseudo_load_rm => { + const tag: Encoding.Mnemonic = switch (rm.m.mod.rm.size) { + .byte => .lb, + .hword => .lh, + .word => .lw, + .dword => .ld, + }; + + try lower.emit(tag, &.{ + .{ .reg = rm.r }, + .{ .reg = frame_loc.base }, + .{ .imm = Immediate.s(frame_loc.disp) }, + }); + }, + .pseudo_store_rm => { + const tag: Encoding.Mnemonic = switch (rm.m.mod.rm.size) { + .byte => .sb, + .hword => .sh, + .word => .sw, + .dword => .sd, + }; + + try lower.emit(tag, &.{ + .{ .reg = frame_loc.base }, + .{ .reg = rm.r }, + .{ .imm = Immediate.s(frame_loc.disp) }, + }); + }, + else => unreachable, + } + }, + + .pseudo_mv => { + const rr = inst.data.rr; + + try lower.emit(.addi, &.{ + .{ .reg = rr.rd }, + .{ .reg = rr.rs }, + .{ .imm = Immediate.s(0) }, + }); + }, + .pseudo_ret => { + try lower.emit(.jalr, &.{ + .{ .reg = .zero }, + .{ .reg = .ra }, + .{ .imm = Immediate.s(0) }, + }); + }, + .pseudo_j => { + try lower.emit(.jal, &.{ + .{ .reg = .zero }, + .{ .imm = lower.reloc(.{ .inst = inst.data.inst }) }, + }); + }, + + .pseudo_spill_regs => try lower.pushPopRegList(true, inst.data.reg_list), + .pseudo_restore_regs => try lower.pushPopRegList(false, inst.data.reg_list), + + else => return lower.fail("TODO: psuedo {s}", .{@tagName(inst.ops)}), + }, + } + + return .{ + .insts = lower.result_insts[0..lower.result_insts_len], + .relocs = lower.result_relocs[0..lower.result_relocs_len], + }; +} + +fn generic(lower: *Lower, inst: Mir.Inst) Error!void { + const mnemonic = std.meta.stringToEnum(Encoding.Mnemonic, @tagName(inst.tag)) orelse { + return lower.fail("generic inst name {s}-{s} doesn't match with a mnemonic", .{ + @tagName(inst.tag), + @tagName(inst.ops), + }); + }; + try lower.emit(mnemonic, switch (inst.ops) { + .none => &.{}, + .ri => &.{ + .{ .reg = inst.data.u_type.rd }, + .{ .imm = inst.data.u_type.imm20 }, + }, + .rri => &.{ + .{ .reg = inst.data.i_type.rd }, + .{ .reg = inst.data.i_type.rs1 }, + .{ .imm = inst.data.i_type.imm12 }, + }, + else => return lower.fail("TODO: generic lower ops {s}", .{@tagName(inst.ops)}), + }); +} + +fn emit(lower: *Lower, mnemonic: Encoding.Mnemonic, ops: []const Instruction.Operand) !void { + lower.result_insts[lower.result_insts_len] = + try Instruction.new(mnemonic, ops); + lower.result_insts_len += 1; +} + +fn reloc(lower: *Lower, target: Reloc.Target) Immediate { + lower.result_relocs[lower.result_relocs_len] = .{ + .lowered_inst_index = lower.result_insts_len, + .target = target, + }; + lower.result_relocs_len += 1; + return Immediate.s(0); +} + +fn pushPopRegList(lower: *Lower, comptime spilling: bool, reg_list: Mir.RegisterList) !void { + var it = reg_list.iterator(.{ .direction = if (spilling) .forward else .reverse }); + + var reg_i: u31 = 0; + while (it.next()) |i| { + const frame = lower.mir.frame_locs.get(@intFromEnum(bits.FrameIndex.spill_frame)); + + if (spilling) { + try lower.emit(.sd, &.{ + .{ .reg = frame.base }, + .{ .reg = abi.callee_preserved_regs[i] }, + .{ .imm = Immediate.s(frame.disp + reg_i) }, + }); + } else { + try lower.emit(.ld, &.{ + .{ .reg = abi.callee_preserved_regs[i] }, + .{ .reg = frame.base }, + .{ .imm = Immediate.s(frame.disp + reg_i) }, + }); + } + + reg_i += 8; + } +} + +pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error { + @setCold(true); + assert(lower.err_msg == null); + lower.err_msg = try ErrorMsg.create(lower.allocator, lower.src_loc, format, args); + return error.LowerFail; +} + +const Lower = @This(); + +const abi = @import("abi.zig"); +const assert = std.debug.assert; +const bits = @import("bits.zig"); +const encoder = @import("encoder.zig"); +const link = @import("../../link.zig"); +const Encoding = @import("Encoding.zig"); +const std = @import("std"); +const log = std.log.scoped(.lower); + +const Air = @import("../../Air.zig"); +const Allocator = std.mem.Allocator; +const ErrorMsg = Module.ErrorMsg; +const Mir = @import("Mir.zig"); +const Module = @import("../../Module.zig"); +const Instruction = encoder.Instruction; +const Immediate = bits.Immediate; diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index dd9064b4a4..162aeb23c4 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -9,22 +9,32 @@ instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. extra: []const u32, +frame_locs: std.MultiArrayList(FrameLoc).Slice, pub const Inst = struct { tag: Tag, - /// The meaning of this depends on `tag`. data: Data, + ops: Ops, + + /// The position of an MIR instruction within the `Mir` instructions array. + pub const Index = u32; pub const Tag = enum(u16) { + /// Add immediate. Uses i_type payload. addi, + + /// Add immediate and produce a sign-extended result. + /// + /// Uses i-type payload. addiw, + jalr, lui, mv, - unimp, ebreak, ecall, + unimp, /// OR instruction. Uses r_type payload. @"or", @@ -48,9 +58,11 @@ pub const Inst = struct { /// Register Logical Right Shit, uses r_type payload srlw, + /// Jumps, but stores the address of the instruction following the + /// jump in `rd`. + /// + /// Uses j_type payload. jal, - /// Jumps. Uses `inst` payload. - j, /// Immediate AND, uses i_type payload andi, @@ -93,55 +105,34 @@ pub const Inst = struct { /// Boolean NOT, Uses rr payload not, + /// Generates a NO-OP, uses nop payload nop, - ret, - /// Load double (64 bits) + /// Load double (64 bits), uses i_type payload ld, - /// Store double (64 bits) - sd, - /// Load word (32 bits) + /// Load word (32 bits), uses i_type payload lw, - /// Store word (32 bits) - sw, - /// Load half (16 bits) + /// Load half (16 bits), uses i_type payload lh, - /// Store half (16 bits) - sh, - /// Load byte (8 bits) + /// Load byte (8 bits), uses i_type payload lb, - /// Store byte (8 bits) + + /// Store double (64 bits), uses s_type payload + sd, + /// Store word (32 bits), uses s_type payload + sw, + /// Store half (16 bits), uses s_type payload + sh, + /// Store byte (8 bits), uses s_type payload sb, - /// Pseudo-instruction: End of prologue - dbg_prologue_end, - /// Pseudo-instruction: Beginning of epilogue - dbg_epilogue_begin, - /// Pseudo-instruction: Update debug line - dbg_line, - - /// Psuedo-instruction that will generate a backpatched - /// function prologue. - psuedo_prologue, - /// Psuedo-instruction that will generate a backpatched - /// function epilogue - psuedo_epilogue, - - /// Loads the address of a value that hasn't yet been allocated in memory. - /// - /// uses the Mir.LoadSymbolPayload payload. - load_symbol, - - // TODO: add description - // this is bad, remove this - ldr_ptr_stack, + /// A pseudo-instruction. Used for anything that isn't 1:1 with an + /// assembly instruction. + pseudo, }; - /// The position of an MIR instruction within the `Mir` instructions array. - pub const Index = u32; - /// All instructions have a 4-byte payload, which is contained within - /// this union. `Tag` determines which union field is active, as well as + /// this union. `Ops` determines which union field is active, as well as /// how to interpret the data within. pub const Data = union { /// No additional data @@ -152,22 +143,69 @@ pub const Inst = struct { /// /// Used by e.g. b inst: Index, - /// A 16-bit immediate value. - /// - /// Used by e.g. svc - imm16: i16, - /// A 12-bit immediate value. - /// - /// Used by e.g. psuedo_prologue - imm12: i12, /// Index into `extra`. Meaning of what can be found there is context-dependent. /// /// Used by e.g. load_memory payload: u32, + + r_type: struct { + rd: Register, + rs1: Register, + rs2: Register, + }, + + i_type: struct { + rd: Register, + rs1: Register, + imm12: Immediate, + }, + + s_type: struct { + rs1: Register, + rs2: Register, + imm5: Immediate, + imm7: Immediate, + }, + + b_type: struct { + rs1: Register, + rs2: Register, + inst: Inst.Index, + }, + + u_type: struct { + rd: Register, + imm20: Immediate, + }, + + j_type: struct { + rd: Register, + inst: Inst.Index, + }, + + /// Debug info: line and column + /// + /// Used by e.g. pseudo_dbg_line + pseudo_dbg_line_column: struct { + line: u32, + column: u32, + }, + + // Custom types to be lowered + + /// Register + Memory + rm: struct { + r: Register, + m: Memory, + }, + + reg_list: Mir.RegisterList, + /// A register /// /// Used by e.g. blr reg: Register, + /// Two registers /// /// Used by e.g. mv @@ -175,51 +213,84 @@ pub const Inst = struct { rd: Register, rs: Register, }, - /// I-Type + }; + + pub const Ops = enum { + /// No data associated with this instruction (only mnemonic is used). + none, + /// Two registers + rr, + /// Three registers + rrr, + + /// Two registers + immediate, uses the i_type payload. + rri, + /// Two registers + Two Immediates + rrii, + + /// Two registers + another instruction. + rr_inst, + + /// Register + Memory + rm, + + /// Register + Immediate + ri, + + /// Another instruction. + inst, + + /// Pseudo-instruction that will generate a backpatched + /// function prologue. + pseudo_prologue, + /// Pseudo-instruction that will generate a backpatched + /// function epilogue + pseudo_epilogue, + + /// Pseudo-instruction: End of prologue + pseudo_dbg_prologue_end, + /// Pseudo-instruction: Beginning of epilogue + pseudo_dbg_epilogue_begin, + /// Pseudo-instruction: Update debug line + pseudo_dbg_line_column, + + /// Pseudo-instruction that loads from memory into a register. /// - /// Used by e.g. jalr - i_type: struct { - rd: Register, - rs1: Register, - imm12: i12, - }, - /// R-Type + /// Uses `rm` payload. + pseudo_load_rm, + /// Pseudo-instruction that stores from a register into memory /// - /// Used by e.g. add - r_type: struct { - rd: Register, - rs1: Register, - rs2: Register, - }, - /// B-Type + /// Uses `rm` payload. + pseudo_store_rm, + + /// Pseudo-instruction that loads the address of memory into a register. /// - /// Used by e.g. beq - b_type: struct { - rs1: Register, - rs2: Register, - inst: Inst.Index, - }, - /// J-Type + /// Uses `rm` payload. + pseudo_lea_rm, + + /// Shorthand for returning, aka jumping to ra register. /// - /// Used by e.g. jal - j_type: struct { - rd: Register, - inst: Inst.Index, - }, - /// U-Type + /// Uses nop payload. + pseudo_ret, + + /// Jumps. Uses `inst` payload. + pseudo_j, + + /// Dead inst, ignored by the emitter. + pseudo_dead, + + /// Loads the address of a value that hasn't yet been allocated in memory. /// - /// Used by e.g. lui - u_type: struct { - rd: Register, - imm20: i20, - }, - /// Debug info: line and column + /// uses the Mir.LoadSymbolPayload payload. + pseudo_load_symbol, + + /// Moves the value of rs1 to rd. /// - /// Used by e.g. dbg_line - dbg_line_column: struct { - line: u32, - column: u32, - }, + /// uses the `rr` payload. + pseudo_mv, + + pseudo_restore_regs, + pseudo_spill_regs, }; // Make sure we don't accidentally make instructions bigger than expected. @@ -229,14 +300,32 @@ pub const Inst = struct { // assert(@sizeOf(Inst) == 8); // } // } + + pub fn format( + inst: Inst, + comptime fmt: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, + ) !void { + assert(fmt.len == 0); + _ = options; + + try writer.print("Tag: {s}, Ops: {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }); + } }; pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void { mir.instructions.deinit(gpa); + mir.frame_locs.deinit(gpa); gpa.free(mir.extra); mir.* = undefined; } +pub const FrameLoc = struct { + base: Register, + disp: i32, +}; + /// Returns the requested data, as well as the new index which is at the start of the /// trailers for the object. pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end: usize } { @@ -291,11 +380,11 @@ pub const RegisterList = struct { return self.bitset.iterator(options); } - pub fn count(self: Self) u32 { + pub fn count(self: Self) i32 { return @intCast(self.bitset.count()); } - pub fn size(self: Self) u32 { + pub fn size(self: Self) i32 { return @intCast(self.bitset.count() * 8); } }; @@ -307,4 +396,8 @@ const assert = std.debug.assert; const bits = @import("bits.zig"); const Register = bits.Register; +const Immediate = bits.Immediate; +const Memory = bits.Memory; +const FrameIndex = bits.FrameIndex; +const FrameAddr = @import("CodeGen.zig").FrameAddr; const IntegerBitSet = std.bit_set.IntegerBitSet; diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index 90aef62f09..d09baab761 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -93,7 +93,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class { /// There are a maximum of 8 possible return slots. Returned values are in /// the beginning of the array; unused slots are filled with .none. -pub fn classifySystemV(ty: Type, mod: *Module) [8]Class { +pub fn classifySystem(ty: Type, mod: *Module) [8]Class { var result = [1]Class{.none} ** 8; switch (ty.zigTypeTag(mod)) { .Pointer => switch (ty.ptrSize(mod)) { @@ -109,18 +109,42 @@ pub fn classifySystemV(ty: Type, mod: *Module) [8]Class { }, .Optional => { if (ty.isPtrLikeOptional(mod)) { + result[0] = .integer; return result; } result[0] = .integer; result[1] = .integer; return result; }, - else => return result, + .Int, .Enum, .ErrorSet => { + const int_bits = ty.intInfo(mod).bits; + if (int_bits <= 64) { + result[0] = .integer; + return result; + } + if (int_bits <= 128) { + result[0] = .integer; + result[1] = .integer; + return result; + } + unreachable; // support > 128 bit int arguments + }, + .ErrorUnion => { + const payload = ty.errorUnionPayload(mod); + const payload_bits = payload.bitSize(mod); + if (payload_bits <= 64) { + result[0] = .integer; + result[1] = .integer; + } + unreachable; // support > 64 bit error payloads + }, + else => |bad_ty| std.debug.panic("classifySystem {s}", .{@tagName(bad_ty)}), } } pub const callee_preserved_regs = [_]Register{ - .s0, .s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11, + // .s0 is ommited to be used as a frame pointer + .s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11, }; pub const function_arg_regs = [_]Register{ diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig index 0e87478025..83651432a6 100644 --- a/src/arch/riscv64/bits.zig +++ b/src/arch/riscv64/bits.zig @@ -2,391 +2,141 @@ const std = @import("std"); const DW = std.dwarf; const assert = std.debug.assert; const testing = std.testing; +const Encoding = @import("Encoding.zig"); +const Mir = @import("Mir.zig"); -// TODO: this is only tagged to facilitate the monstrosity. -// Once packed structs work make it packed. -pub const Instruction = union(enum) { - R: packed struct { - opcode: u7, - rd: u5, - funct3: u3, - rs1: u5, - rs2: u5, - funct7: u7, - }, - I: packed struct { - opcode: u7, - rd: u5, - funct3: u3, - rs1: u5, - imm0_11: u12, - }, - S: packed struct { - opcode: u7, - imm0_4: u5, - funct3: u3, - rs1: u5, - rs2: u5, - imm5_11: u7, - }, - B: packed struct { - opcode: u7, - imm11: u1, - imm1_4: u4, - funct3: u3, - rs1: u5, - rs2: u5, - imm5_10: u6, - imm12: u1, - }, - U: packed struct { - opcode: u7, - rd: u5, - imm12_31: u20, - }, - J: packed struct { - opcode: u7, - rd: u5, - imm12_19: u8, - imm11: u1, - imm1_10: u10, - imm20: u1, - }, +pub const Memory = struct { + base: Base, + mod: Mod, - // TODO: once packed structs work we can remove this monstrosity. - pub fn toU32(self: Instruction) u32 { - return switch (self) { - .R => |v| @as(u32, @bitCast(v)), - .I => |v| @as(u32, @bitCast(v)), - .S => |v| @as(u32, @bitCast(v)), - .B => |v| @as(u32, @intCast(v.opcode)) + (@as(u32, @intCast(v.imm11)) << 7) + (@as(u32, @intCast(v.imm1_4)) << 8) + (@as(u32, @intCast(v.funct3)) << 12) + (@as(u32, @intCast(v.rs1)) << 15) + (@as(u32, @intCast(v.rs2)) << 20) + (@as(u32, @intCast(v.imm5_10)) << 25) + (@as(u32, @intCast(v.imm12)) << 31), - .U => |v| @as(u32, @bitCast(v)), - .J => |v| @as(u32, @bitCast(v)), - }; + pub const Base = union(enum) { + reg: Register, + frame: FrameIndex, + reloc: Symbol, + }; + + pub const Mod = union(enum(u1)) { + rm: struct { + size: Size, + disp: i32 = 0, + }, + off: u64, + }; + + pub const Size = enum(u4) { + /// Byte, 1 byte + byte, + /// Half word, 2 bytes + hword, + /// Word, 4 bytes + word, + /// Double word, 8 Bytes + dword, + + pub fn fromSize(size: u32) Size { + return switch (size) { + 1 => .byte, + 2 => .hword, + 4 => .word, + 8 => .dword, + else => unreachable, + }; + } + + pub fn fromBitSize(bit_size: u64) Size { + return switch (bit_size) { + 8 => .byte, + 16 => .hword, + 32 => .word, + 64 => .dword, + else => unreachable, + }; + } + + pub fn bitSize(s: Size) u64 { + return switch (s) { + .byte => 8, + .hword => 16, + .word => 32, + .dword => 64, + }; + } + }; + + /// Asserts `mem` can be represented as a `FrameLoc`. + pub fn toFrameLoc(mem: Memory, mir: Mir) Mir.FrameLoc { + switch (mem.base) { + .reg => |reg| { + return .{ + .base = reg, + .disp = switch (mem.mod) { + .off => unreachable, // TODO: toFrameLoc disp.off + .rm => |rm| rm.disp, + }, + }; + }, + .frame => |index| return mir.frame_locs.get(@intFromEnum(index)), + .reloc => unreachable, + } + } +}; + +pub const Immediate = union(enum) { + signed: i32, + unsigned: u32, + + pub fn u(x: u64) Immediate { + return .{ .unsigned = x }; } - fn rType(op: u7, fn3: u3, fn7: u7, rd: Register, r1: Register, r2: Register) Instruction { - return Instruction{ - .R = .{ - .opcode = op, - .funct3 = fn3, - .funct7 = fn7, - .rd = rd.id(), - .rs1 = r1.id(), - .rs2 = r2.id(), + pub fn s(x: i32) Immediate { + return .{ .signed = x }; + } + + pub fn asSigned(imm: Immediate, bit_size: u64) i64 { + return switch (imm) { + .signed => |x| switch (bit_size) { + 1, 8 => @as(i8, @intCast(x)), + 16 => @as(i16, @intCast(x)), + 32, 64 => x, + else => unreachable, + }, + .unsigned => |x| switch (bit_size) { + 1, 8 => @as(i8, @bitCast(@as(u8, @intCast(x)))), + 16 => @as(i16, @bitCast(@as(u16, @intCast(x)))), + 32 => @as(i32, @bitCast(@as(u32, @intCast(x)))), + 64 => @bitCast(x), + else => unreachable, }, }; } - // RISC-V is all signed all the time -- convert immediates to unsigned for processing - fn iType(op: u7, fn3: u3, rd: Register, r1: Register, imm: i12) Instruction { - const umm = @as(u12, @bitCast(imm)); - - return Instruction{ - .I = .{ - .opcode = op, - .funct3 = fn3, - .rd = rd.id(), - .rs1 = r1.id(), - .imm0_11 = umm, + pub fn asUnsigned(imm: Immediate, bit_size: u64) u64 { + return switch (imm) { + .signed => |x| switch (bit_size) { + 1, 8 => @as(u8, @bitCast(@as(i8, @intCast(x)))), + 16 => @as(u16, @bitCast(@as(i16, @intCast(x)))), + 32, 64 => @as(u32, @bitCast(x)), + else => unreachable, + }, + .unsigned => |x| switch (bit_size) { + 1, 8 => @as(u8, @intCast(x)), + 16 => @as(u16, @intCast(x)), + 32 => @as(u32, @intCast(x)), + 64 => x, + else => unreachable, }, }; } - fn sType(op: u7, fn3: u3, r1: Register, r2: Register, imm: i12) Instruction { - const umm = @as(u12, @bitCast(imm)); - - return Instruction{ - .S = .{ - .opcode = op, - .funct3 = fn3, - .rs1 = r1.id(), - .rs2 = r2.id(), - .imm0_4 = @as(u5, @truncate(umm)), - .imm5_11 = @as(u7, @truncate(umm >> 5)), - }, + pub fn asBits(imm: Immediate, comptime T: type) T { + const int_info = @typeInfo(T).Int; + if (int_info.signedness != .unsigned) @compileError("Immediate.asBits needs unsigned T"); + return switch (imm) { + .signed => |x| @bitCast(@as(std.meta.Int(.signed, int_info.bits), @intCast(x))), + .unsigned => |x| @intCast(x), }; } - - // Use significance value rather than bit value, same for J-type - // -- less burden on callsite, bonus semantic checking - fn bType(op: u7, fn3: u3, r1: Register, r2: Register, imm: i13) Instruction { - const umm = @as(u13, @bitCast(imm)); - assert(umm % 4 == 0); // misaligned branch target - - return Instruction{ - .B = .{ - .opcode = op, - .funct3 = fn3, - .rs1 = r1.id(), - .rs2 = r2.id(), - .imm1_4 = @as(u4, @truncate(umm >> 1)), - .imm5_10 = @as(u6, @truncate(umm >> 5)), - .imm11 = @as(u1, @truncate(umm >> 11)), - .imm12 = @as(u1, @truncate(umm >> 12)), - }, - }; - } - - // We have to extract the 20 bits anyway -- let's not make it more painful - fn uType(op: u7, rd: Register, imm: i20) Instruction { - const umm = @as(u20, @bitCast(imm)); - - return Instruction{ - .U = .{ - .opcode = op, - .rd = rd.id(), - .imm12_31 = umm, - }, - }; - } - - fn jType(op: u7, rd: Register, imm: i21) Instruction { - const umm = @as(u21, @bitCast(imm)); - assert(umm % 2 == 0); // misaligned jump target - - return Instruction{ - .J = .{ - .opcode = op, - .rd = rd.id(), - .imm1_10 = @as(u10, @truncate(umm >> 1)), - .imm11 = @as(u1, @truncate(umm >> 11)), - .imm12_19 = @as(u8, @truncate(umm >> 12)), - .imm20 = @as(u1, @truncate(umm >> 20)), - }, - }; - } - - // The meat and potatoes. Arguments are in the order in which they would appear in assembly code. - - // Arithmetic/Logical, Register-Register - - pub fn add(rd: Register, r1: Register, r2: Register) Instruction { - return rType(0b0110011, 0b000, 0b0000000, rd, r1, r2); - } - - pub fn sub(rd: Register, r1: Register, r2: Register) Instruction { - return rType(0b0110011, 0b000, 0b0100000, rd, r1, r2); - } - - pub fn @"and"(rd: Register, r1: Register, r2: Register) Instruction { - return rType(0b0110011, 0b111, 0b0000000, rd, r1, r2); - } - - pub fn @"or"(rd: Register, r1: Register, r2: Register) Instruction { - return rType(0b0110011, 0b110, 0b0000000, rd, r1, r2); - } - - pub fn xor(rd: Register, r1: Register, r2: Register) Instruction { - return rType(0b0110011, 0b100, 0b0000000, rd, r1, r2); - } - - pub fn sll(rd: Register, r1: Register, r2: Register) Instruction { - return rType(0b0110011, 0b001, 0b0000000, rd, r1, r2); - } - - pub fn srl(rd: Register, r1: Register, r2: Register) Instruction { - return rType(0b0110011, 0b101, 0b0000000, rd, r1, r2); - } - - pub fn sra(rd: Register, r1: Register, r2: Register) Instruction { - return rType(0b0110011, 0b101, 0b0100000, rd, r1, r2); - } - - pub fn slt(rd: Register, r1: Register, r2: Register) Instruction { - return rType(0b0110011, 0b010, 0b0000000, rd, r1, r2); - } - - pub fn sltu(rd: Register, r1: Register, r2: Register) Instruction { - return rType(0b0110011, 0b011, 0b0000000, rd, r1, r2); - } - - // M extension operations - - pub fn mul(rd: Register, r1: Register, r2: Register) Instruction { - return rType(0b0110011, 0b000, 0b0000001, rd, r1, r2); - } - - // Arithmetic/Logical, Register-Register (32-bit) - - pub fn addw(rd: Register, r1: Register, r2: Register) Instruction { - return rType(0b0111011, 0b000, rd, r1, r2); - } - - pub fn subw(rd: Register, r1: Register, r2: Register) Instruction { - return rType(0b0111011, 0b000, 0b0100000, rd, r1, r2); - } - - pub fn sllw(rd: Register, r1: Register, r2: Register) Instruction { - return rType(0b0111011, 0b001, 0b0000000, rd, r1, r2); - } - - pub fn srlw(rd: Register, r1: Register, r2: Register) Instruction { - return rType(0b0111011, 0b101, 0b0000000, rd, r1, r2); - } - - pub fn sraw(rd: Register, r1: Register, r2: Register) Instruction { - return rType(0b0111011, 0b101, 0b0100000, rd, r1, r2); - } - - // Arithmetic/Logical, Register-Immediate - - pub fn addi(rd: Register, r1: Register, imm: i12) Instruction { - return iType(0b0010011, 0b000, rd, r1, imm); - } - - pub fn andi(rd: Register, r1: Register, imm: i12) Instruction { - return iType(0b0010011, 0b111, rd, r1, imm); - } - - pub fn ori(rd: Register, r1: Register, imm: i12) Instruction { - return iType(0b0010011, 0b110, rd, r1, imm); - } - - pub fn xori(rd: Register, r1: Register, imm: i12) Instruction { - return iType(0b0010011, 0b100, rd, r1, imm); - } - - pub fn slli(rd: Register, r1: Register, shamt: u6) Instruction { - return iType(0b0010011, 0b001, rd, r1, shamt); - } - - pub fn srli(rd: Register, r1: Register, shamt: u6) Instruction { - return iType(0b0010011, 0b101, rd, r1, shamt); - } - - pub fn srai(rd: Register, r1: Register, shamt: u6) Instruction { - return iType(0b0010011, 0b101, rd, r1, (@as(i12, 1) << 10) + shamt); - } - - pub fn slti(rd: Register, r1: Register, imm: i12) Instruction { - return iType(0b0010011, 0b010, rd, r1, imm); - } - - pub fn sltiu(rd: Register, r1: Register, imm: u12) Instruction { - return iType(0b0010011, 0b011, rd, r1, @as(i12, @bitCast(imm))); - } - - // Arithmetic/Logical, Register-Immediate (32-bit) - - pub fn addiw(rd: Register, r1: Register, imm: i12) Instruction { - return iType(0b0011011, 0b000, rd, r1, imm); - } - - pub fn slliw(rd: Register, r1: Register, shamt: u6) Instruction { - return iType(0b0011011, 0b001, rd, r1, shamt); - } - - pub fn srliw(rd: Register, r1: Register, shamt: u6) Instruction { - return iType(0b0011011, 0b101, rd, r1, shamt); - } - - pub fn sraiw(rd: Register, r1: Register, shamt: u6) Instruction { - return iType(0b0011011, 0b101, rd, r1, (@as(i12, 1) << 10) + shamt); - } - - // Upper Immediate - - pub fn lui(rd: Register, imm: i20) Instruction { - return uType(0b0110111, rd, imm); - } - - pub fn auipc(rd: Register, imm: i20) Instruction { - return uType(0b0010111, rd, imm); - } - - // Load - - pub fn ld(rd: Register, offset: i12, base: Register) Instruction { - return iType(0b0000011, 0b011, rd, base, offset); - } - - pub fn lw(rd: Register, offset: i12, base: Register) Instruction { - return iType(0b0000011, 0b010, rd, base, offset); - } - - pub fn lwu(rd: Register, offset: i12, base: Register) Instruction { - return iType(0b0000011, 0b110, rd, base, offset); - } - - pub fn lh(rd: Register, offset: i12, base: Register) Instruction { - return iType(0b0000011, 0b001, rd, base, offset); - } - - pub fn lhu(rd: Register, offset: i12, base: Register) Instruction { - return iType(0b0000011, 0b101, rd, base, offset); - } - - pub fn lb(rd: Register, offset: i12, base: Register) Instruction { - return iType(0b0000011, 0b000, rd, base, offset); - } - - pub fn lbu(rd: Register, offset: i12, base: Register) Instruction { - return iType(0b0000011, 0b100, rd, base, offset); - } - - // Store - - pub fn sd(rs: Register, offset: i12, base: Register) Instruction { - return sType(0b0100011, 0b011, base, rs, offset); - } - - pub fn sw(rs: Register, offset: i12, base: Register) Instruction { - return sType(0b0100011, 0b010, base, rs, offset); - } - - pub fn sh(rs: Register, offset: i12, base: Register) Instruction { - return sType(0b0100011, 0b001, base, rs, offset); - } - - pub fn sb(rs: Register, offset: i12, base: Register) Instruction { - return sType(0b0100011, 0b000, base, rs, offset); - } - - // Fence - // TODO: implement fence - - // Branch - - pub fn beq(r1: Register, r2: Register, offset: i13) Instruction { - return bType(0b1100011, 0b000, r1, r2, offset); - } - - pub fn bne(r1: Register, r2: Register, offset: i13) Instruction { - return bType(0b1100011, 0b001, r1, r2, offset); - } - - pub fn blt(r1: Register, r2: Register, offset: i13) Instruction { - return bType(0b1100011, 0b100, r1, r2, offset); - } - - pub fn bge(r1: Register, r2: Register, offset: i13) Instruction { - return bType(0b1100011, 0b101, r1, r2, offset); - } - - pub fn bltu(r1: Register, r2: Register, offset: i13) Instruction { - return bType(0b1100011, 0b110, r1, r2, offset); - } - - pub fn bgeu(r1: Register, r2: Register, offset: i13) Instruction { - return bType(0b1100011, 0b111, r1, r2, offset); - } - - // Jump - - pub fn jal(link: Register, offset: i21) Instruction { - return jType(0b1101111, link, offset); - } - - pub fn jalr(link: Register, offset: i12, base: Register) Instruction { - return iType(0b1100111, 0b000, link, base, offset); - } - - // System - - pub const ecall = iType(0b1110011, 0b000, .zero, .zero, 0x000); - pub const ebreak = iType(0b1110011, 0b000, .zero, .zero, 0x001); - pub const unimp = iType(0, 0, .zero, .zero, 0); }; pub const Register = enum(u6) { @@ -421,39 +171,52 @@ pub const Register = enum(u6) { } }; -// zig fmt: on +pub const FrameIndex = enum(u32) { + /// This index refers to the return address. + ret_addr, + /// This index refers to the frame pointer. + base_ptr, + /// This index refers to the entire stack frame. + stack_frame, + /// This index referes to where in the stack frame the args are spilled to. + args_frame, + /// This index referes to a frame dedicated to setting up args for function called + /// in this function. Useful for aligning args separately. + call_frame, + /// This index referes to the frame where callee saved registers are spilled and restore + /// from. + spill_frame, + /// Other indices are used for local variable stack slots + _, -test "serialize instructions" { - const Testcase = struct { - inst: Instruction, - expected: u32, - }; + pub const named_count = @typeInfo(FrameIndex).Enum.fields.len; - const testcases = [_]Testcase{ - .{ // add t6, zero, zero - .inst = Instruction.add(.t6, .zero, .zero), - .expected = 0b0000000_00000_00000_000_11111_0110011, - }, - .{ // sd s0, 0x7f(s0) - .inst = Instruction.sd(.s0, 0x7f, .s0), - .expected = 0b0000011_01000_01000_011_11111_0100011, - }, - .{ // bne s0, s1, 0x42 - .inst = Instruction.bne(.s0, .s1, 0x42), - .expected = 0b0_000010_01001_01000_001_0001_0_1100011, - }, - .{ // j 0x1a - .inst = Instruction.jal(.zero, 0x1a), - .expected = 0b0_0000001101_0_00000000_00000_1101111, - }, - .{ // ebreak - .inst = Instruction.ebreak, - .expected = 0b000000000001_00000_000_00000_1110011, - }, - }; - - for (testcases) |case| { - const actual = case.inst.toU32(); - try testing.expectEqual(case.expected, actual); + pub fn isNamed(fi: FrameIndex) bool { + return @intFromEnum(fi) < named_count; } -} + + pub fn format( + fi: FrameIndex, + comptime fmt: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + try writer.writeAll("FrameIndex"); + if (fi.isNamed()) { + try writer.writeByte('.'); + try writer.writeAll(@tagName(fi)); + } else { + try writer.writeByte('('); + try std.fmt.formatType(@intFromEnum(fi), fmt, options, writer, 0); + try writer.writeByte(')'); + } + } +}; + +/// A linker symbol not yet allocated in VM. +pub const Symbol = struct { + /// Index of the containing atom. + atom_index: u32, + /// Index into the linker's symbol table. + sym_index: u32, +}; diff --git a/src/arch/riscv64/encoder.zig b/src/arch/riscv64/encoder.zig new file mode 100644 index 0000000000..4eadcb0e8c --- /dev/null +++ b/src/arch/riscv64/encoder.zig @@ -0,0 +1,49 @@ +pub const Instruction = struct { + encoding: Encoding, + ops: [4]Operand = .{.none} ** 4, + + pub const Operand = union(enum) { + none, + reg: Register, + mem: Memory, + imm: Immediate, + }; + + pub fn new(mnemonic: Encoding.Mnemonic, ops: []const Operand) !Instruction { + const encoding = (try Encoding.findByMnemonic(mnemonic, ops)) orelse { + log.err("no encoding found for: {s} {s} {s} {s} {s}", .{ + @tagName(mnemonic), + @tagName(if (ops.len > 0) ops[0] else .none), + @tagName(if (ops.len > 1) ops[1] else .none), + @tagName(if (ops.len > 2) ops[2] else .none), + @tagName(if (ops.len > 3) ops[3] else .none), + }); + return error.InvalidInstruction; + }; + + var result_ops: [4]Operand = .{.none} ** 4; + @memcpy(result_ops[0..ops.len], ops); + + return .{ + .encoding = encoding, + .ops = result_ops, + }; + } + + pub fn encode(inst: Instruction, writer: anytype) !void { + try writer.writeInt(u32, inst.encoding.data.toU32(), .little); + } +}; + +const std = @import("std"); + +const Lower = @import("Lower.zig"); +const Mir = @import("Mir.zig"); +const bits = @import("bits.zig"); +const Encoding = @import("Encoding.zig"); + +const Register = bits.Register; +const Memory = bits.Memory; +const Immediate = bits.Immediate; + +const log = std.log.scoped(.encode); diff --git a/src/link/riscv.zig b/src/link/riscv.zig index 85e4098200..0f87131367 100644 --- a/src/link/riscv.zig +++ b/src/link/riscv.zig @@ -25,38 +25,52 @@ pub fn writeAddend( } pub fn writeInstU(code: *[4]u8, value: u32) void { - var inst = Instruction{ + var data = Encoding.Data{ .U = mem.bytesToValue(std.meta.TagPayload( - Instruction, - Instruction.U, + Encoding.Data, + Encoding.Data.U, ), code), }; const compensated: u32 = @bitCast(@as(i32, @bitCast(value)) + 0x800); - inst.U.imm12_31 = bitSlice(compensated, 31, 12); - mem.writeInt(u32, code, inst.toU32(), .little); + data.U.imm12_31 = bitSlice(compensated, 31, 12); + mem.writeInt(u32, code, data.toU32(), .little); } pub fn writeInstI(code: *[4]u8, value: u32) void { - var inst = Instruction{ + var data = Encoding.Data{ .I = mem.bytesToValue(std.meta.TagPayload( - Instruction, - Instruction.I, + Encoding.Data, + Encoding.Data.I, ), code), }; - inst.I.imm0_11 = bitSlice(value, 11, 0); - mem.writeInt(u32, code, inst.toU32(), .little); + data.I.imm0_11 = bitSlice(value, 11, 0); + mem.writeInt(u32, code, data.toU32(), .little); } pub fn writeInstS(code: *[4]u8, value: u32) void { - var inst = Instruction{ + var data = Encoding.Data{ .S = mem.bytesToValue(std.meta.TagPayload( - Instruction, - Instruction.S, + Encoding.Data, + Encoding.Data.S, ), code), }; - inst.S.imm0_4 = bitSlice(value, 4, 0); - inst.S.imm5_11 = bitSlice(value, 11, 5); - mem.writeInt(u32, code, inst.toU32(), .little); + data.S.imm0_4 = bitSlice(value, 4, 0); + data.S.imm5_11 = bitSlice(value, 11, 5); + mem.writeInt(u32, code, data.toU32(), .little); +} + +pub fn writeInstJ(code: *[4]u8, value: u32) void { + var data = Encoding.Data{ + .J = mem.bytesToValue(std.meta.TagPayload( + Encoding.Data, + Encoding.Data.J, + ), code), + }; + data.J.imm1_10 = bitSlice(value, 10, 1); + data.J.imm11 = bitSlice(value, 11, 11); + data.J.imm12_19 = bitSlice(value, 19, 12); + data.J.imm20 = bitSlice(value, 20, 20); + mem.writeInt(u32, code, data.toU32(), .little); } fn bitSlice( @@ -67,8 +81,9 @@ fn bitSlice( return @truncate((value >> low) & (1 << (high - low + 1)) - 1); } -const bits = @import("../arch/riscv64/bits.zig"); +const encoder = @import("../arch/riscv64/encoder.zig"); +const Encoding = @import("../arch/riscv64/Encoding.zig"); const mem = std.mem; const std = @import("std"); -pub const Instruction = bits.Instruction; +pub const Instruction = encoder.Instruction; diff --git a/src/register_manager.zig b/src/register_manager.zig index f2539e0dbe..bd596f5658 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -360,6 +360,7 @@ pub fn RegisterManager( } else self.getRegIndexAssumeFree(tracked_index, inst); } pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) AllocateRegistersError!void { + log.debug("getting reg: {}", .{reg}); return self.getRegIndex(indexOfRegIntoTracked(reg) orelse return, inst); } pub fn getKnownReg( diff --git a/src/target.zig b/src/target.zig index 8f61b2ba03..ea58111bc1 100644 --- a/src/target.zig +++ b/src/target.zig @@ -526,7 +526,7 @@ pub fn backendSupportsFeature( feature: Feature, ) bool { return switch (feature) { - .panic_fn => ofmt == .c or use_llvm or cpu_arch == .x86_64 or cpu_arch == .riscv64, + .panic_fn => ofmt == .c or use_llvm or cpu_arch == .x86_64, .panic_unwrap_error => ofmt == .c or use_llvm, .safety_check_formatted => ofmt == .c or use_llvm, .error_return_trace => use_llvm, diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 34d76fb4ba..6eddb15db7 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -16,6 +16,7 @@ test "global variable alignment" { } test "large alignment of local constant" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky @@ -25,6 +26,7 @@ test "large alignment of local constant" { } test "slicing array of length 1 can not assume runtime index is always zero" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky @@ -42,6 +44,7 @@ test "default alignment allows unspecified in type syntax" { } test "implicitly decreasing pointer alignment" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO const a: u32 align(4) = 3; const b: u32 align(8) = 4; try expect(addUnaligned(&a, &b) == 7); @@ -52,6 +55,7 @@ fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 { } test "@alignCast pointers" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO var x: u32 align(4) = 1; expectsOnly1(&x); try expect(x == 2); @@ -223,6 +227,7 @@ fn fnWithAlignedStack() i32 { } test "implicitly decreasing slice alignment" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -235,6 +240,7 @@ fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 { } test "specifying alignment allows pointer cast" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -247,6 +253,7 @@ fn testBytesAlign(b: u8) !void { } test "@alignCast slices" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -265,6 +272,7 @@ fn sliceExpects4(slice: []align(4) u32) void { } test "return error union with 128-bit integer" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -277,6 +285,7 @@ fn give() anyerror!u128 { } test "page aligned array on stack" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -418,6 +427,7 @@ test "function callconv expression depends on generic parameter" { } test "runtime-known array index has best alignment possible" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO // take full advantage of over-alignment @@ -478,6 +488,7 @@ const DefaultAligned = struct { }; test "read 128-bit field from default aligned struct in stack memory" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -497,6 +508,7 @@ var default_aligned_global = DefaultAligned{ }; test "read 128-bit field from default aligned struct in global memory" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -506,6 +518,7 @@ test "read 128-bit field from default aligned struct in global memory" { } test "struct field explicit alignment" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -550,6 +563,7 @@ test "align(@alignOf(T)) T does not force resolution of T" { } test "align(N) on functions" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -595,6 +609,7 @@ test "comptime alloc alignment" { } test "@alignCast null" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -610,6 +625,7 @@ test "alignment of slice element" { } test "sub-aligned pointer field access" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; @@ -658,6 +674,7 @@ test "alignment of zero-bit types is respected" { } test "zero-bit fields in extern struct pad fields appropriately" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 6397094398..c4421e8e8b 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -7,6 +7,7 @@ const expect = testing.expect; const expectEqual = testing.expectEqual; test "array to slice" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO const a: u32 align(4) = 3; const b: u32 align(8) = 4; const a_slice: []align(1) const u32 = @as(*const [1]u32, &a)[0..]; @@ -19,6 +20,8 @@ test "array to slice" { } test "arrays" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -47,6 +50,8 @@ fn getArrayLen(a: []const u32) usize { } test "array concat with undefined" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -70,6 +75,8 @@ test "array concat with undefined" { } test "array concat with tuple" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -86,6 +93,8 @@ test "array concat with tuple" { } test "array init with concat" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const a = 'a'; @@ -94,6 +103,8 @@ test "array init with concat" { } test "array init with mult" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -106,6 +117,7 @@ test "array init with mult" { } test "array literal with explicit type" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -116,6 +128,7 @@ test "array literal with explicit type" { } test "array literal with inferred length" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO const hex_mult = [_]u16{ 4096, 256, 16, 1 }; try expect(hex_mult.len == 4); @@ -123,6 +136,7 @@ test "array literal with inferred length" { } test "array dot len const expr" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO try expect(comptime x: { break :x some_array.len == 4; }); @@ -134,6 +148,7 @@ const ArrayDotLenConstExpr = struct { const some_array = [_]u8{ 0, 1, 2, 3 }; test "array literal with specified size" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -145,6 +160,7 @@ test "array literal with specified size" { } test "array len field" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO var arr = [4]u8{ 0, 0, 0, 0 }; @@ -157,6 +173,8 @@ test "array len field" { } test "array with sentinels" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -186,6 +204,7 @@ test "array with sentinels" { } test "void arrays" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO var array: [4]void = undefined; array[0] = void{}; array[1] = array[2]; @@ -194,6 +213,8 @@ test "void arrays" { } test "nested arrays of strings" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -209,6 +230,7 @@ test "nested arrays of strings" { } test "nested arrays of integers" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -224,6 +246,8 @@ test "nested arrays of integers" { } test "implicit comptime in array type size" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -237,6 +261,8 @@ fn plusOne(x: u32) u32 { } test "single-item pointer to array indexing and slicing" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -263,6 +289,8 @@ fn doSomeMangling(array: *[4]u8) void { } test "implicit cast zero sized array ptr to slice" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO { @@ -278,6 +306,7 @@ test "implicit cast zero sized array ptr to slice" { } test "anonymous list literal syntax" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -300,6 +329,8 @@ var s_array: [8]Sub = undefined; const Sub = struct { b: u8 }; const Str = struct { a: []Sub }; test "set global var array via slice embedded in struct" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -315,6 +346,8 @@ test "set global var array via slice embedded in struct" { } test "read/write through global variable array of struct fields initialized via array mult" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -336,6 +369,8 @@ test "read/write through global variable array of struct fields initialized via } test "implicit cast single-item pointer" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -355,6 +390,7 @@ fn testArrayByValAtComptime(b: [2]u8) u8 { } test "comptime evaluating function that takes array by value" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -366,6 +402,8 @@ test "comptime evaluating function that takes array by value" { } test "runtime initialize array elem and then implicit cast to slice" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -376,6 +414,8 @@ test "runtime initialize array elem and then implicit cast to slice" { } test "array literal as argument to function" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -403,6 +443,8 @@ test "array literal as argument to function" { } test "double nested array to const slice cast in array literal" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -464,6 +506,7 @@ test "double nested array to const slice cast in array literal" { } test "anonymous literal in array" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -490,6 +533,8 @@ test "anonymous literal in array" { } test "access the null element of a null terminated array" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -508,6 +553,8 @@ test "access the null element of a null terminated array" { } test "type deduction for array subscript expression" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -527,6 +574,8 @@ test "type deduction for array subscript expression" { } test "sentinel element count towards the ABI size calculation" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -551,6 +600,8 @@ test "sentinel element count towards the ABI size calculation" { } test "zero-sized array with recursive type definition" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; @@ -574,6 +625,8 @@ test "zero-sized array with recursive type definition" { } test "type coercion of anon struct literal to array" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -608,6 +661,8 @@ test "type coercion of anon struct literal to array" { } test "type coercion of pointer to anon struct literal to pointer to array" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -642,12 +697,16 @@ test "type coercion of pointer to anon struct literal to pointer to array" { } test "array with comptime-only element type" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO const a = [_]type{ u32, i32 }; try testing.expect(a[0] == u32); try testing.expect(a[1] == i32); } test "tuple to array handles sentinel" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -660,6 +719,8 @@ test "tuple to array handles sentinel" { } test "array init of container level array variable" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -683,6 +744,8 @@ test "array init of container level array variable" { } test "runtime initialized sentinel-terminated array literal" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO var c: u16 = 300; _ = &c; const f = &[_:0x9999]u16{c}; @@ -692,6 +755,8 @@ test "runtime initialized sentinel-terminated array literal" { } test "array of array agregate init" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -703,6 +768,8 @@ test "array of array agregate init" { } test "pointer to array has ptr field" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO const arr: *const [5]u32 = &.{ 10, 20, 30, 40, 50 }; try std.testing.expect(arr.ptr == @as([*]const u32, arr)); try std.testing.expect(arr.ptr[0] == 10); @@ -713,6 +780,8 @@ test "pointer to array has ptr field" { } test "discarded array init preserves result location" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO const S = struct { fn f(p: *u32) u16 { p.* += 1; @@ -731,6 +800,8 @@ test "discarded array init preserves result location" { } test "array init with no result location has result type" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO const x = .{ .foo = [2]u16{ @intCast(10), @intCast(20), @@ -742,6 +813,8 @@ test "array init with no result location has result type" { } test "slicing array of zero-sized values" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; @@ -754,6 +827,8 @@ test "slicing array of zero-sized values" { } test "array init with no result pointer sets field result types" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO const S = struct { // A function parameter has a result type, but no result pointer. fn f(arr: [1]u32) u32 { @@ -768,6 +843,8 @@ test "array init with no result pointer sets field result types" { } test "runtime side-effects in comptime-known array init" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO var side_effects: u4 = 0; const init = [4]u4{ blk: { @@ -792,6 +869,8 @@ test "runtime side-effects in comptime-known array init" { } test "slice initialized through reference to anonymous array init provides result types" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO var my_u32: u32 = 123; var my_u64: u64 = 456; _ = .{ &my_u32, &my_u64 }; @@ -851,6 +930,8 @@ test "many-item sentinel-terminated pointer initialized through reference to ano } test "pointer to array initialized through reference to anonymous array init provides result types" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO var my_u32: u32 = 123; var my_u64: u64 = 456; _ = .{ &my_u32, &my_u64 }; @@ -877,6 +958,8 @@ test "pointer to sentinel-terminated array initialized through reference to anon } test "tuple initialized through reference to anonymous array init provides result types" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO const Tuple = struct { u64, *const u32 }; const foo: *const Tuple = &.{ @intCast(12345), @@ -887,6 +970,8 @@ test "tuple initialized through reference to anonymous array init provides resul } test "copied array element doesn't alias source" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -901,6 +986,8 @@ test "copied array element doesn't alias source" { } test "array initialized with string literal" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const S = struct { @@ -921,6 +1008,8 @@ test "array initialized with string literal" { } test "array initialized with array with sentinel" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const S = struct { @@ -941,6 +1030,8 @@ test "array initialized with array with sentinel" { } test "store array of array of structs at comptime" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -966,6 +1057,8 @@ test "store array of array of structs at comptime" { } test "accessing multidimensional global array at comptime" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -982,6 +1075,8 @@ test "accessing multidimensional global array at comptime" { } test "union that needs padding bytes inside an array" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO From cc204e2365547c19f7c9e1a836a3ea8c18e3a6ea Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sat, 13 Apr 2024 11:52:17 -0700 Subject: [PATCH 35/44] riscv: spill args immediately to prevent clobbers --- src/arch/riscv64/CodeGen.zig | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 36014d64ba..285aa88095 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1696,17 +1696,8 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const dst_ty = self.typeOfIndex(inst); const result: MCValue = result: { - const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu)); - const src_int_info = src_ty.intInfo(zcu); const dst_int_info = dst_ty.intInfo(zcu); - const extend = switch (src_int_info.signedness) { - .signed => dst_int_info, - .unsigned => src_int_info, - }.signedness; - - _ = dst_abi_size; - _ = extend; const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty; @@ -1727,13 +1718,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { break :dst dst_mcv; }; - if (dst_int_info.bits <= src_int_info.bits) { + if (dst_int_info.bits <= src_int_info.bits) break :result dst_mcv; - } - if (dst_int_info.bits > 64 or src_int_info.bits > 64) { + if (dst_int_info.bits > 64 or src_int_info.bits > 64) break :result null; // TODO - } break :result dst_mcv; } orelse return self.fail("TODO implement airIntCast from {} to {}", .{ @@ -3435,6 +3424,7 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void { } fn airArg(self: *Self, inst: Air.Inst.Index) !void { + const zcu = self.bin_file.comp.module.?; var arg_index = self.arg_index; // we skip over args that have no bits @@ -3445,10 +3435,18 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const src_mcv = self.args[arg_index]; const dst_mcv = switch (src_mcv) { - .register, .register_pair, .load_frame => dst: { - for (src_mcv.getRegs()) |reg| self.register_manager.getRegAssumeFree(reg, inst); - break :dst src_mcv; + .register => dst: { + const frame = try self.allocFrameIndex(FrameAlloc.init(.{ + .size = Type.usize.abiSize(zcu), + .alignment = Type.usize.abiAlignment(zcu), + })); + const dst_mcv: MCValue = .{ .load_frame = .{ .index = frame } }; + + try self.genCopy(Type.usize, dst_mcv, src_mcv); + + break :dst dst_mcv; }, + .load_frame => src_mcv, else => return self.fail("TODO: airArg {s}", .{@tagName(src_mcv)}), }; From d19b77d63f0d02ab9c0a0928391891ae4a77744c Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sat, 13 Apr 2024 19:30:10 -0700 Subject: [PATCH 36/44] riscv: back to hello world panics --- lib/std/builtin.zig | 10 +++++++- src/arch/riscv64/CodeGen.zig | 30 ++++++++++++++++++---- src/arch/riscv64/Emit.zig | 32 ++++++++++++++++++++++- src/arch/riscv64/Encoding.zig | 48 ++++++++++++++++++++++++++++------- src/arch/riscv64/Lower.zig | 47 +++++++++++++++++++++++++++++++--- src/arch/riscv64/bits.zig | 18 +++++++++---- src/arch/riscv64/encoder.zig | 6 ++--- src/link/riscv.zig | 14 ++++++++++ src/target.zig | 2 +- 9 files changed, 179 insertions(+), 28 deletions(-) diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index c5ddf02188..3026911d3f 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -775,7 +775,15 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr } if (builtin.zig_backend == .stage2_riscv64) { - unreachable; + asm volatile ("ecall" + : + : [number] "{a7}" (64), + [arg1] "{a0}" (1), + [arg2] "{a1}" (@intFromPtr(msg.ptr)), + [arg3] "{a2}" (msg.len), + : "memory" + ); + std.posix.exit(127); } switch (builtin.os.tag) { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 285aa88095..035ed1b611 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1513,7 +1513,7 @@ fn splitType(self: *Self, ty: Type) ![2]Type { }, else => unreachable, }, - else => break, + else => return self.fail("TODO: splitType class {}", .{class}), }; } else if (parts[0].abiSize(zcu) + parts[1].abiSize(zcu) == ty.abiSize(zcu)) return parts; return self.fail("TODO implement splitType for {}", .{ty.fmt(zcu)}); @@ -3434,6 +3434,8 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { const src_mcv = self.args[arg_index]; + const arg_ty = self.typeOfIndex(inst); + const dst_mcv = switch (src_mcv) { .register => dst: { const frame = try self.allocFrameIndex(FrameAlloc.init(.{ @@ -3441,9 +3443,16 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { .alignment = Type.usize.abiAlignment(zcu), })); const dst_mcv: MCValue = .{ .load_frame = .{ .index = frame } }; - try self.genCopy(Type.usize, dst_mcv, src_mcv); - + break :dst dst_mcv; + }, + .register_pair => dst: { + const frame = try self.allocFrameIndex(FrameAlloc.init(.{ + .size = Type.usize.abiSize(zcu) * 2, + .alignment = Type.usize.abiAlignment(zcu), + })); + const dst_mcv: MCValue = .{ .load_frame = .{ .index = frame } }; + try self.genCopy(arg_ty, dst_mcv, src_mcv); break :dst dst_mcv; }, .load_frame => src_mcv, @@ -4506,6 +4515,17 @@ fn genSetStack( else => unreachable, // register can hold a max of 8 bytes } }, + .register_pair => |pair| { + var part_disp: i32 = frame.off; + for (try self.splitType(ty), pair) |src_ty, src_reg| { + try self.genSetStack( + src_ty, + .{ .index = frame.index, .off = part_disp }, + .{ .register = src_reg }, + ); + part_disp += @intCast(src_ty.abiSize(zcu)); + } + }, .load_frame, .indirect, .load_symbol, @@ -4564,8 +4584,8 @@ fn genInlineMemcpy( .ops = .rri, .data = .{ .i_type = .{ - .rd = tmp, - .rs1 = dst, + .rd = dst, + .rs1 = tmp, .imm12 = Immediate.s(0), }, }, diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 0e1decd42f..ec256fefb3 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -41,7 +41,35 @@ pub fn emitMir(emit: *Emit) Error!void { .offset = 0, .enc = std.meta.activeTag(lowered_inst.encoding.data), }), - else => |x| return emit.fail("TODO: emitMir {s}", .{@tagName(x)}), + .load_symbol_reloc => |symbol| { + if (emit.lower.bin_file.cast(link.File.Elf)) |elf_file| { + const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?; + const sym_index = elf_file.zigObjectPtr().?.symbol(symbol.sym_index); + const sym = elf_file.symbol(sym_index); + + var hi_r_type: u32 = @intFromEnum(std.elf.R_RISCV.HI20); + var lo_r_type: u32 = @intFromEnum(std.elf.R_RISCV.LO12_I); + + if (sym.flags.needs_zig_got) { + _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); + + hi_r_type = Elf.R_ZIG_GOT_HI20; + lo_r_type = Elf.R_ZIG_GOT_LO12; + } + + try atom_ptr.addReloc(elf_file, .{ + .r_offset = start_offset, + .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | hi_r_type, + .r_addend = 0, + }); + + try atom_ptr.addReloc(elf_file, .{ + .r_offset = start_offset + 4, + .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | lo_r_type, + .r_addend = 0, + }); + } else return emit.fail("TODO: load_symbol_reloc non-ELF", .{}); + }, }; } std.debug.assert(lowered_relocs.len == 0); @@ -120,6 +148,7 @@ fn fixupRelocs(emit: *Emit) Error!void { switch (reloc.enc) { .J => riscv_util.writeInstJ(code, @bitCast(disp)), + .B => riscv_util.writeInstB(code, @bitCast(disp)), else => return emit.fail("tried to reloc encoding type {s}", .{@tagName(reloc.enc)}), } } @@ -161,3 +190,4 @@ const Lower = @import("Lower.zig"); const Mir = @import("Mir.zig"); const riscv_util = @import("../../link/riscv.zig"); const Encoding = @import("Encoding.zig"); +const Elf = @import("../../link/Elf.zig"); diff --git a/src/arch/riscv64/Encoding.zig b/src/arch/riscv64/Encoding.zig index 1510185944..ec113d9b91 100644 --- a/src/arch/riscv64/Encoding.zig +++ b/src/arch/riscv64/Encoding.zig @@ -29,6 +29,9 @@ pub const Mnemonic = enum { // J Type jal, + // B Type + beq, + // System ecall, ebreak, @@ -58,7 +61,9 @@ pub const Mnemonic = enum { .sh => .{ .opcode = 0b0100011, .funct3 = 0b001, .funct7 = null }, .sb => .{ .opcode = 0b0100011, .funct3 = 0b000, .funct7 = null }, - .jal => .{ .opcode = 0b1101111, .funct3 = null, .funct7 = null }, + .jal => .{ .opcode = 0b1101111, .funct3 = null, .funct7 = null }, + + .beq => .{ .opcode = 0b1100011, .funct3 = 0b000, .funct7 = null }, .ecall => .{ .opcode = 0b1110011, .funct3 = 0b000, .funct7 = null }, .ebreak => .{ .opcode = 0b1110011, .funct3 = 0b000, .funct7 = null }, @@ -107,6 +112,9 @@ pub const InstEnc = enum { .jal, => .J, + .beq, + => .B, + .ecall, .ebreak, .unimp, @@ -114,15 +122,17 @@ pub const InstEnc = enum { }; } - pub fn opsList(enc: InstEnc) [4]std.meta.FieldEnum(Operand) { + pub fn opsList(enc: InstEnc) [3]std.meta.FieldEnum(Operand) { return switch (enc) { - .R => .{ .reg, .reg, .reg, .none }, - .I => .{ .reg, .reg, .imm, .none }, - .S => .{ .reg, .reg, .imm, .none }, - .B => .{ .imm, .reg, .reg, .imm }, - .U => .{ .reg, .imm, .none, .none }, - .J => .{ .reg, .imm, .none, .none }, - .system => .{ .none, .none, .none, .none }, + // zig fmt: off + .R => .{ .reg, .reg, .reg, }, + .I => .{ .reg, .reg, .imm, }, + .S => .{ .reg, .reg, .imm, }, + .B => .{ .reg, .reg, .imm, }, + .U => .{ .reg, .imm, .none, }, + .J => .{ .reg, .imm, .none, }, + .system => .{ .none, .none, .none, }, + // zig fmt: on }; } }; @@ -292,6 +302,26 @@ pub const Data = union(InstEnc) { }, }; }, + .B => { + assert(ops.len == 3); + + const umm = ops[2].imm.asBits(u13); + assert(umm % 4 == 0); // misaligned branch target + + return .{ + .B = .{ + .rs1 = ops[0].reg.id(), + .rs2 = ops[1].reg.id(), + .imm1_4 = @truncate(umm >> 1), + .imm5_10 = @truncate(umm >> 5), + .imm11 = @truncate(umm >> 11), + .imm12 = @truncate(umm >> 12), + + .opcode = enc.opcode, + .funct3 = enc.funct3.?, + }, + }; + }, else => std.debug.panic("TODO: construct {s}", .{@tagName(inst_enc)}), } diff --git a/src/arch/riscv64/Lower.zig b/src/arch/riscv64/Lower.zig index 5a3e375e05..714f3a43ad 100644 --- a/src/arch/riscv64/Lower.zig +++ b/src/arch/riscv64/Lower.zig @@ -31,7 +31,9 @@ pub const Reloc = struct { const Target = union(enum) { inst: Mir.Inst.Index, - linker_reloc: bits.Symbol, + + /// Relocs the lowered_inst_index and the next one. + load_symbol_reloc: bits.Symbol, }; }; @@ -59,6 +61,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .pseudo_dbg_prologue_end, .pseudo_dead, => {}, + .pseudo_load_rm, .pseudo_store_rm => { const rm = inst.data.rm; @@ -106,6 +109,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .{ .imm = Immediate.s(0) }, }); }, + .pseudo_ret => { try lower.emit(.jalr, &.{ .{ .reg = .zero }, @@ -113,6 +117,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .{ .imm = Immediate.s(0) }, }); }, + .pseudo_j => { try lower.emit(.jal, &.{ .{ .reg = .zero }, @@ -123,7 +128,38 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .pseudo_spill_regs => try lower.pushPopRegList(true, inst.data.reg_list), .pseudo_restore_regs => try lower.pushPopRegList(false, inst.data.reg_list), - else => return lower.fail("TODO: psuedo {s}", .{@tagName(inst.ops)}), + .pseudo_load_symbol => { + const payload = inst.data.payload; + const data = lower.mir.extraData(Mir.LoadSymbolPayload, payload).data; + + try lower.emit(.lui, &.{ + .{ .reg = @enumFromInt(data.register) }, + .{ .imm = lower.reloc(.{ .load_symbol_reloc = .{ + .atom_index = data.atom_index, + .sym_index = data.sym_index, + } }) }, + }); + + // the above reloc implies this one + try lower.emit(.addi, &.{ + .{ .reg = @enumFromInt(data.register) }, + .{ .reg = @enumFromInt(data.register) }, + .{ .imm = Immediate.s(0) }, + }); + }, + + .pseudo_lea_rm => { + const rm = inst.data.rm; + const frame = rm.m.toFrameLoc(lower.mir); + + try lower.emit(.addi, &.{ + .{ .reg = rm.r }, + .{ .reg = frame.base }, + .{ .imm = Immediate.s(frame.disp) }, + }); + }, + + else => return lower.fail("TODO Lower: psuedo {s}", .{@tagName(inst.ops)}), }, } @@ -135,7 +171,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { fn generic(lower: *Lower, inst: Mir.Inst) Error!void { const mnemonic = std.meta.stringToEnum(Encoding.Mnemonic, @tagName(inst.tag)) orelse { - return lower.fail("generic inst name {s}-{s} doesn't match with a mnemonic", .{ + return lower.fail("generic inst name '{s}' with op {s} doesn't match with a mnemonic", .{ @tagName(inst.tag), @tagName(inst.ops), }); @@ -151,6 +187,11 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void { .{ .reg = inst.data.i_type.rs1 }, .{ .imm = inst.data.i_type.imm12 }, }, + .rr_inst => &.{ + .{ .reg = inst.data.b_type.rs1 }, + .{ .reg = inst.data.b_type.rs2 }, + .{ .imm = lower.reloc(.{ .inst = inst.data.b_type.inst }) }, + }, else => return lower.fail("TODO: generic lower ops {s}", .{@tagName(inst.ops)}), }); } diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig index 83651432a6..d3dd78cf3e 100644 --- a/src/arch/riscv64/bits.zig +++ b/src/arch/riscv64/bits.zig @@ -65,17 +65,25 @@ pub const Memory = struct { /// Asserts `mem` can be represented as a `FrameLoc`. pub fn toFrameLoc(mem: Memory, mir: Mir) Mir.FrameLoc { + const offset: i32 = switch (mem.mod) { + .off => |off| @intCast(off), + .rm => |rm| rm.disp, + }; + switch (mem.base) { .reg => |reg| { return .{ .base = reg, - .disp = switch (mem.mod) { - .off => unreachable, // TODO: toFrameLoc disp.off - .rm => |rm| rm.disp, - }, + .disp = offset, + }; + }, + .frame => |index| { + const base_loc = mir.frame_locs.get(@intFromEnum(index)); + return .{ + .base = base_loc.base, + .disp = base_loc.disp + offset, }; }, - .frame => |index| return mir.frame_locs.get(@intFromEnum(index)), .reloc => unreachable, } } diff --git a/src/arch/riscv64/encoder.zig b/src/arch/riscv64/encoder.zig index 4eadcb0e8c..ddd4f5f437 100644 --- a/src/arch/riscv64/encoder.zig +++ b/src/arch/riscv64/encoder.zig @@ -1,6 +1,6 @@ pub const Instruction = struct { encoding: Encoding, - ops: [4]Operand = .{.none} ** 4, + ops: [3]Operand = .{.none} ** 3, pub const Operand = union(enum) { none, @@ -11,7 +11,7 @@ pub const Instruction = struct { pub fn new(mnemonic: Encoding.Mnemonic, ops: []const Operand) !Instruction { const encoding = (try Encoding.findByMnemonic(mnemonic, ops)) orelse { - log.err("no encoding found for: {s} {s} {s} {s} {s}", .{ + std.log.err("no encoding found for: {s} {s} {s} {s} {s}", .{ @tagName(mnemonic), @tagName(if (ops.len > 0) ops[0] else .none), @tagName(if (ops.len > 1) ops[1] else .none), @@ -21,7 +21,7 @@ pub const Instruction = struct { return error.InvalidInstruction; }; - var result_ops: [4]Operand = .{.none} ** 4; + var result_ops: [3]Operand = .{.none} ** 3; @memcpy(result_ops[0..ops.len], ops); return .{ diff --git a/src/link/riscv.zig b/src/link/riscv.zig index 0f87131367..e78cb84cdf 100644 --- a/src/link/riscv.zig +++ b/src/link/riscv.zig @@ -73,6 +73,20 @@ pub fn writeInstJ(code: *[4]u8, value: u32) void { mem.writeInt(u32, code, data.toU32(), .little); } +pub fn writeInstB(code: *[4]u8, value: u32) void { + var data = Encoding.Data{ + .B = mem.bytesToValue(std.meta.TagPayload( + Encoding.Data, + Encoding.Data.B, + ), code), + }; + data.B.imm1_4 = bitSlice(value, 4, 1); + data.B.imm5_10 = bitSlice(value, 10, 5); + data.B.imm11 = bitSlice(value, 11, 11); + data.B.imm12 = bitSlice(value, 12, 12); + mem.writeInt(u32, code, data.toU32(), .little); +} + fn bitSlice( value: anytype, comptime high: comptime_int, diff --git a/src/target.zig b/src/target.zig index ea58111bc1..8f61b2ba03 100644 --- a/src/target.zig +++ b/src/target.zig @@ -526,7 +526,7 @@ pub fn backendSupportsFeature( feature: Feature, ) bool { return switch (feature) { - .panic_fn => ofmt == .c or use_llvm or cpu_arch == .x86_64, + .panic_fn => ofmt == .c or use_llvm or cpu_arch == .x86_64 or cpu_arch == .riscv64, .panic_unwrap_error => ofmt == .c or use_llvm, .safety_check_formatted => ofmt == .c or use_llvm, .error_return_trace => use_llvm, From e622485df8d162fd2696b6ab1149262aa6b74407 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sat, 13 Apr 2024 21:12:26 -0700 Subject: [PATCH 37/44] riscv: actually working test runner --- lib/compiler/test_runner.zig | 2 + src/arch/riscv64/CodeGen.zig | 314 +++++++++++----------------------- src/arch/riscv64/Encoding.zig | 32 +++- src/arch/riscv64/Lower.zig | 83 ++++++++- src/arch/riscv64/Mir.zig | 47 ++--- src/arch/riscv64/abi.zig | 17 +- 6 files changed, 239 insertions(+), 256 deletions(-) diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index 5c674cecce..08633e54ca 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -266,4 +266,6 @@ pub fn mainExtraSimple() !void { }; pass_count += 1; } + + std.posix.exit(pass_count); } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 035ed1b611..f36613473b 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1601,7 +1601,6 @@ fn allocReg(self: *Self) !struct { Register, RegisterLock } { } fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Register { - log.debug("elemOffset: {}", .{index}); const reg: Register = blk: { switch (index) { .immediate => |imm| { @@ -1616,14 +1615,14 @@ fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Regi const lock = self.register_manager.lockRegAssumeUnused(reg); defer self.register_manager.unlockReg(lock); - try self.binOpMir( + const result = try self.binOp( .mul, - null, - index_ty, .{ .register = reg }, + index_ty, .{ .immediate = elem_size }, + index_ty, ); - break :blk reg; + break :blk result.register; }, } }; @@ -1817,24 +1816,10 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const lhs_ty = self.typeOf(bin_op.lhs); const rhs_ty = self.typeOf(bin_op.rhs); - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - -fn supportImmediate(tag: Air.Inst.Tag) bool { - return switch (tag) { - .add, - .sub, - .cmp_eq, - .cmp_neq, - .cmp_gt, - .cmp_gte, - .cmp_lt, - .cmp_lte, - => true, - - else => false, + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { + break :result try self.binOp(tag, lhs, lhs_ty, rhs, rhs_ty); }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } /// For all your binary operation needs, this function will generate @@ -1854,10 +1839,9 @@ fn supportImmediate(tag: Air.Inst.Tag) bool { fn binOp( self: *Self, tag: Air.Inst.Tag, - maybe_inst: ?Air.Inst.Index, lhs: MCValue, - rhs: MCValue, lhs_ty: Type, + rhs: MCValue, rhs_ty: Type, ) InnerError!MCValue { const zcu = self.bin_file.comp.module.?; @@ -1881,15 +1865,12 @@ fn binOp( assert(lhs_ty.eql(rhs_ty, zcu)); const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 64) { - if (rhs == .immediate and supportImmediate(tag)) { - return self.binOpImm(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); - } - return self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + return self.binOpRegister(tag, lhs, lhs_ty, rhs, rhs_ty); } else { return self.fail("TODO binary operations on int with bits > 64", .{}); } }, - else => |x| return self.fail("TOOD: binOp {s}", .{@tagName(x)}), + else => |x| return std.debug.panic("TOOD: binOp {s}", .{@tagName(x)}), } }, @@ -1912,23 +1893,21 @@ fn binOp( else => unreachable, }; - return try self.binOpRegister(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + return try self.binOpRegister(base_tag, lhs, lhs_ty, rhs, rhs_ty); } else { const offset = try self.binOp( .mul, - null, rhs, - .{ .immediate = elem_size }, Type.usize, + .{ .immediate = elem_size }, Type.usize, ); const addr = try self.binOp( tag, - null, lhs, - offset, Type.manyptr_u8, + offset, Type.usize, ); return addr; @@ -1948,10 +1927,7 @@ fn binOp( .Int => { const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 64) { - if (rhs == .immediate) { - return self.binOpImm(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); - } - return self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + return self.binOpRegister(tag, lhs, lhs_ty, rhs, rhs_ty); } else { return self.fail("TODO binary operations on int with bits > 64", .{}); } @@ -1973,14 +1949,11 @@ fn binOp( fn binOpRegister( self: *Self, tag: Air.Inst.Tag, - maybe_inst: ?Air.Inst.Index, lhs: MCValue, - rhs: MCValue, lhs_ty: Type, + rhs: MCValue, rhs_ty: Type, ) !MCValue { - _ = maybe_inst; - const lhs_reg, const lhs_lock = blk: { if (lhs == .register) break :blk .{ lhs.register, null }; @@ -2006,164 +1979,79 @@ fn binOpRegister( .add => .add, .sub => .sub, .mul => .mul, - .cmp_eq => .cmp_eq, - .cmp_neq => .cmp_neq, - .cmp_gt => .cmp_gt, - .cmp_gte => .cmp_gte, - .cmp_lt => .cmp_lt, + .shl => .sllw, .shr => .srlw, + + .cmp_eq, + .cmp_neq, + .cmp_gt, + .cmp_gte, + .cmp_lt, + => .pseudo, + else => return self.fail("TODO: binOpRegister {s}", .{@tagName(tag)}), }; - _ = try self.addInst(.{ - .tag = mir_tag, - .ops = .rrr, - .data = .{ - .r_type = .{ - .rd = dest_reg, - .rs1 = lhs_reg, - .rs2 = rhs_reg, - }, - }, - }); - - // generate the struct for OF checks - - return MCValue{ .register = dest_reg }; -} - -/// Don't call this function directly. Use binOp instead. -/// -/// Call this function if rhs is an immediate. Generates I version of binops. -/// -/// Asserts that rhs is an immediate MCValue -fn binOpImm( - self: *Self, - tag: Air.Inst.Tag, - maybe_inst: ?Air.Inst.Index, - lhs: MCValue, - rhs: MCValue, - lhs_ty: Type, - rhs_ty: Type, -) !MCValue { - assert(rhs == .immediate); - _ = maybe_inst; - - // TODO: use `maybe_inst` to track instead of forcing a lock. - - const lhs_reg, const lhs_lock = blk: { - if (lhs == .register) break :blk .{ lhs.register, null }; - - const lhs_reg, const lhs_lock = try self.allocReg(); - try self.genSetReg(lhs_ty, lhs_reg, lhs); - break :blk .{ lhs_reg, lhs_lock }; - }; - defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - - const dest_reg, const dest_lock = try self.allocReg(); - defer self.register_manager.unlockReg(dest_lock); - - const mir_tag: Mir.Inst.Tag = switch (tag) { - .shl => .slli, - .shr => .srli, - .cmp_gte => .cmp_imm_gte, - .cmp_eq => .cmp_imm_eq, - .cmp_neq => .cmp_imm_neq, - .cmp_lte => .cmp_imm_lte, - .cmp_lt => .cmp_imm_lt, - .add => .addi, - .sub => .addiw, - else => return self.fail("TODO: binOpImm {s}", .{@tagName(tag)}), - }; - - // apply some special operations needed switch (mir_tag) { - .slli, - .srli, - .addi, - .cmp_imm_eq, - .cmp_imm_neq, - .cmp_imm_lte, - .cmp_imm_lt, + .add, + .sub, + .mul, + .sllw, + .srlw, => { - _ = try self.addInst(.{ - .tag = mir_tag, - .ops = .rri, - .data = .{ .i_type = .{ - .rd = dest_reg, - .rs1 = lhs_reg, - .imm12 = Immediate.s(math.cast(i12, rhs.immediate) orelse { - return self.fail("TODO: binOpImm larger than i12 i_type payload", .{}); - }), - } }, - }); - }, - .addiw => { - _ = try self.addInst(.{ - .tag = mir_tag, - .ops = .rri, - .data = .{ .i_type = .{ - .rd = dest_reg, - .rs1 = lhs_reg, - .imm12 = Immediate.s(-(math.cast(i12, rhs.immediate) orelse { - return self.fail("TODO: binOpImm larger than i12 i_type payload", .{}); - })), - } }, - }); - }, - .cmp_imm_gte => { - const imm_reg = try self.copyToTmpRegister(rhs_ty, .{ .immediate = rhs.immediate - 1 }); - - _ = try self.addInst(.{ - .tag = mir_tag, - .ops = .rrr, - .data = .{ .r_type = .{ - .rd = dest_reg, - .rs1 = imm_reg, - .rs2 = lhs_reg, - } }, - }); - }, - else => unreachable, - } - - return MCValue{ .register = dest_reg }; -} - -fn binOpMir( - self: *Self, - mir_tag: Mir.Inst.Tag, - maybe_inst: ?Air.Inst.Index, - ty: Type, - dst_mcv: MCValue, - src_mcv: MCValue, -) !void { - const zcu = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(ty.abiSize(zcu)); - - _ = abi_size; - _ = maybe_inst; - - switch (dst_mcv) { - .register => |dst_reg| { - const src_reg = try self.copyToTmpRegister(ty, src_mcv); - _ = try self.addInst(.{ .tag = mir_tag, .ops = .rrr, .data = .{ .r_type = .{ - .rd = dst_reg, - .rs1 = dst_reg, - .rs2 = src_reg, + .rd = dest_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, }, }, }); }, - else => return self.fail("TODO: binOpMir {s}", .{@tagName(dst_mcv)}), + .pseudo => { + const pseudo_op = switch (tag) { + .cmp_eq, + .cmp_neq, + .cmp_gt, + .cmp_gte, + .cmp_lt, + => .pseudo_compare, + else => unreachable, + }; + + _ = try self.addInst(.{ + .tag = .pseudo, + .ops = pseudo_op, + .data = .{ + .compare = .{ + .rd = dest_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + .op = switch (tag) { + .cmp_eq => .eq, + .cmp_neq => .neq, + .cmp_gt => .gt, + .cmp_gte => .gte, + .cmp_lt => .lt, + .cmp_lte => .lte, + else => unreachable, + }, + }, + }, + }); + }, + + else => unreachable, } + + // generate the struct for OF checks + + return MCValue{ .register = dest_reg }; } fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { @@ -2174,7 +2062,9 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void const lhs_ty = self.typeOf(bin_op.lhs); const rhs_ty = self.typeOf(bin_op.rhs); - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { + break :result try self.binOp(tag, lhs, lhs_ty, rhs, rhs_ty); + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -2200,7 +2090,7 @@ fn airSubWrap(self: *Self, inst: Air.Inst.Index) !void { const lhs_ty = self.typeOf(bin_op.lhs); const rhs_ty = self.typeOf(bin_op.rhs); - break :result try self.binOp(.sub, inst, lhs, rhs, lhs_ty, rhs_ty); + break :result try self.binOp(.sub, lhs, lhs_ty, rhs, rhs_ty); }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -2240,7 +2130,7 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs_ty = self.typeOf(extra.lhs); const rhs_ty = self.typeOf(extra.rhs); - const add_result_mcv = try self.binOp(.add, null, lhs, rhs, lhs_ty, rhs_ty); + const add_result_mcv = try self.binOp(.add, lhs, lhs_ty, rhs, rhs_ty); const add_result_lock = self.register_manager.lockRegAssumeUnused(add_result_mcv.register); defer self.register_manager.unlockReg(add_result_lock); @@ -2291,10 +2181,9 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const overflow_mcv = try self.binOp( .cmp_neq, - null, .{ .register = overflow_reg }, - .{ .register = add_reg }, lhs_ty, + .{ .register = add_reg }, lhs_ty, ); @@ -2347,7 +2236,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { switch (int_info.bits) { 1...32 => { if (self.hasFeature(.m)) { - const dest = try self.binOp(.mul, null, lhs, rhs, lhs_ty, rhs_ty); + const dest = try self.binOp(.mul, lhs, lhs_ty, rhs, rhs_ty); const add_result_lock = self.register_manager.lockRegAssumeUnused(dest.register); defer self.register_manager.unlockReg(add_result_lock); @@ -2393,10 +2282,9 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const overflow_mcv = try self.binOp( .cmp_neq, - null, .{ .register = overflow_reg }, - .{ .register = add_reg }, lhs_ty, + .{ .register = add_reg }, lhs_ty, ); @@ -2479,7 +2367,7 @@ fn airShl(self: *Self, inst: Air.Inst.Index) !void { const lhs_ty = self.typeOf(bin_op.lhs); const rhs_ty = self.typeOf(bin_op.rhs); - break :result try self.binOp(.shl, inst, lhs, rhs, lhs_ty, rhs_ty); + break :result try self.binOp(.shl, lhs, lhs_ty, rhs, rhs_ty); }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -2543,10 +2431,9 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { if (err_off > 0) { result = try self.binOp( .shr, - null, result, - .{ .immediate = @as(u6, @intCast(err_off * 8)) }, err_union_ty, + .{ .immediate = @as(u6, @intCast(err_off * 8)) }, Type.u8, ); } @@ -2593,10 +2480,9 @@ fn genUnwrapErrUnionPayloadMir( if (payload_off > 0) { result = try self.binOp( .shr, - null, result, - .{ .immediate = @as(u6, @intCast(payload_off * 8)) }, err_union_ty, + .{ .immediate = @as(u6, @intCast(payload_off * 8)) }, Type.u8, ); } @@ -2837,7 +2723,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { }; const dest = try self.allocRegOrMem(inst, true); - const addr = try self.binOp(.ptr_add, null, base_mcv, index_mcv, slice_ptr_field_type, Type.usize); + const addr = try self.binOp(.ptr_add, base_mcv, slice_ptr_field_type, index_mcv, Type.usize); try self.load(dest, addr, slice_ptr_field_type); break :result dest; @@ -2885,13 +2771,14 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { defer self.register_manager.unlockReg(offset_lock); const dst_mcv = try self.allocRegOrMem(inst, false); - try self.binOpMir( - .add, - null, - Type.usize, - .{ .register = addr_reg }, - .{ .register = offset_reg }, - ); + _ = try self.addInst(.{ + .tag = .add, + .ops = .rr, + .data = .{ .rr = .{ + .rd = addr_reg, + .rs = offset_reg, + } }, + }); try self.genCopy(elem_ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg } }); break :result dst_mcv; }; @@ -3046,7 +2933,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { switch (int_bits) { 16 => { - const temp = try self.binOp(.shr, null, dest_mcv, .{ .immediate = 8 }, ty, Type.u8); + const temp = try self.binOp(.shr, dest_mcv, ty, .{ .immediate = 8 }, Type.u8); assert(temp == .register); _ = try self.addInst(.{ .tag = .slli, @@ -3752,7 +3639,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index) !void { const int_info = int_ty.intInfo(zcu); if (int_info.bits <= 64) { - break :result try self.binOp(tag, null, lhs, rhs, int_ty, int_ty); + break :result try self.binOp(tag, lhs, int_ty, rhs, int_ty); } else { return self.fail("TODO riscv cmp for ints > 64 bits", .{}); } @@ -4033,20 +3920,19 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) if (err_off > 0) { return_mcv = try self.binOp( .shr, - null, return_mcv, - .{ .immediate = @as(u6, @intCast(err_off * 8)) }, eu_ty, + .{ .immediate = @as(u6, @intCast(err_off * 8)) }, Type.u8, ); } - try self.binOpMir( + return_mcv = try self.binOp( .cmp_neq, - null, - Type.anyerror, return_mcv, + Type.u16, .{ .immediate = 0 }, + Type.u16, ); return return_mcv; @@ -4070,8 +3956,8 @@ fn isNonErr(self: *Self, inst: Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MC switch (is_err_res) { .register => |reg| { _ = try self.addInst(.{ - .tag = .not, - .ops = .rr, + .tag = .pseudo, + .ops = .pseudo_not, .data = .{ .rr = .{ .rd = reg, @@ -4440,9 +4326,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { dst_mcv, try self.resolveInst(src_ref), ), - else => return self.fail("TODO implement genCopy for {s} of {}", .{ - @tagName(src_mcv), ty.fmt(zcu), - }), + else => unreachable, }; defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock); diff --git a/src/arch/riscv64/Encoding.zig b/src/arch/riscv64/Encoding.zig index ec113d9b91..d145e21603 100644 --- a/src/arch/riscv64/Encoding.zig +++ b/src/arch/riscv64/Encoding.zig @@ -2,9 +2,6 @@ mnemonic: Mnemonic, data: Data, pub const Mnemonic = enum { - // R Type - add, - // I Type ld, lw, @@ -13,6 +10,10 @@ pub const Mnemonic = enum { lhu, lb, lbu, + sltiu, + sltu, + xori, + andi, addi, jalr, @@ -32,6 +33,12 @@ pub const Mnemonic = enum { // B Type beq, + // R Type + add, + slt, + mul, + xor, + // System ecall, ebreak, @@ -50,8 +57,11 @@ pub const Mnemonic = enum { .lb => .{ .opcode = 0b0000011, .funct3 = 0b000, .funct7 = null }, .lbu => .{ .opcode = 0b0000011, .funct3 = 0b100, .funct7 = null }, + .sltiu => .{ .opcode = 0b0010011, .funct3 = 0b011, .funct7 = null }, .addi => .{ .opcode = 0b0010011, .funct3 = 0b000, .funct7 = null }, + .andi => .{ .opcode = 0b0010011, .funct3 = 0b111, .funct7 = null }, + .xori => .{ .opcode = 0b0010011, .funct3 = 0b100, .funct7 = null }, .jalr => .{ .opcode = 0b1100111, .funct3 = 0b000, .funct7 = null }, .lui => .{ .opcode = 0b0110111, .funct3 = null, .funct7 = null }, @@ -65,6 +75,13 @@ pub const Mnemonic = enum { .beq => .{ .opcode = 0b1100011, .funct3 = 0b000, .funct7 = null }, + .slt => .{ .opcode = 0b0110011, .funct3 = 0b010, .funct7 = 0b0000000 }, + .sltu => .{ .opcode = 0b0110011, .funct3 = 0b011, .funct7 = 0b0000000 }, + + .xor => .{ .opcode = 0b0110011, .funct3 = 0b100, .funct7 = 0b0000000 }, + + .mul => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0000001 }, + .ecall => .{ .opcode = 0b1110011, .funct3 = 0b000, .funct7 = null }, .ebreak => .{ .opcode = 0b1110011, .funct3 = 0b000, .funct7 = null }, .unimp => .{ .opcode = 0b0000000, .funct3 = 0b000, .funct7 = null }, @@ -98,6 +115,9 @@ pub const InstEnc = enum { .lb, .lbu, .jalr, + .sltiu, + .xori, + .andi, => .I, .lui, @@ -115,6 +135,12 @@ pub const InstEnc = enum { .beq, => .B, + .slt, + .sltu, + .mul, + .xor, + => .R, + .ecall, .ebreak, .unimp, diff --git a/src/arch/riscv64/Lower.zig b/src/arch/riscv64/Lower.zig index 714f3a43ad..41bb5c6599 100644 --- a/src/arch/riscv64/Lower.zig +++ b/src/arch/riscv64/Lower.zig @@ -159,7 +159,83 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }); }, - else => return lower.fail("TODO Lower: psuedo {s}", .{@tagName(inst.ops)}), + .pseudo_compare => { + const compare = inst.data.compare; + const op = compare.op; + + const rd = compare.rd; + const rs1 = compare.rs1; + const rs2 = compare.rs2; + + switch (op) { + .eq => { + try lower.emit(.xor, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + + try lower.emit(.sltiu, &.{ + .{ .reg = rd }, + .{ .reg = rd }, + .{ .imm = Immediate.s(1) }, + }); + }, + .neq => { + try lower.emit(.xor, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + + try lower.emit(.sltu, &.{ + .{ .reg = rd }, + .{ .reg = .zero }, + .{ .reg = rd }, + }); + }, + .gt => { + try lower.emit(.sltu, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + }, + .gte => { + try lower.emit(.sltu, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + + try lower.emit(.xori, &.{ + .{ .reg = rd }, + .{ .reg = rd }, + .{ .imm = Immediate.s(1) }, + }); + }, + .lt => { + try lower.emit(.slt, &.{ + .{ .reg = rd }, + .{ .reg = rs1 }, + .{ .reg = rs2 }, + }); + }, + else => return lower.fail("TODO lower: pseudo_compare {s}", .{@tagName(op)}), + } + }, + + .pseudo_not => { + const rr = inst.data.rr; + + try lower.emit(.xori, &.{ + .{ .reg = rr.rd }, + .{ .reg = rr.rs }, + .{ .imm = Immediate.s(1) }, + }); + }, + + else => return lower.fail("TODO lower: psuedo {s}", .{@tagName(inst.ops)}), }, } @@ -192,6 +268,11 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void { .{ .reg = inst.data.b_type.rs2 }, .{ .imm = lower.reloc(.{ .inst = inst.data.b_type.inst }) }, }, + .rrr => &.{ + .{ .reg = inst.data.r_type.rd }, + .{ .reg = inst.data.r_type.rs1 }, + .{ .reg = inst.data.r_type.rs2 }, + }, else => return lower.fail("TODO: generic lower ops {s}", .{@tagName(inst.ops)}), }); } diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 162aeb23c4..9ecca44bd8 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -67,36 +67,6 @@ pub const Inst = struct { /// Immediate AND, uses i_type payload andi, - // NOTE: Maybe create a special data for compares that includes the ops - /// Register `==`, uses r_type - cmp_eq, - /// Register `!=`, uses r_type - cmp_neq, - /// Register `>`, uses r_type - cmp_gt, - /// Register `<`, uses r_type - cmp_lt, - /// Register `>=`, uses r_type - cmp_gte, - - /// Immediate `>=`, uses r_type - /// - /// Note: this uses r_type because RISC-V does not provide a good way - /// to do `>=` comparisons on immediates. Usually we would just subtract - /// 1 from the immediate and do a `>` comparison, however there is no `>` - /// register to immedate comparison in RISC-V. This leads us to need to - /// allocate a register for temporary use. - cmp_imm_gte, - - /// Immediate `==`, uses i_type - cmp_imm_eq, - /// Immediate `!=`, uses i_type. - cmp_imm_neq, - /// Immediate `<=`, uses i_type - cmp_imm_lte, - /// Immediate `<`, uses i_type - cmp_imm_lt, - /// Branch if equal, Uses b_type beq, /// Branch if not equal, Uses b_type @@ -213,6 +183,20 @@ pub const Inst = struct { rd: Register, rs: Register, }, + + compare: struct { + rd: Register, + rs1: Register, + rs2: Register, + op: enum { + eq, + neq, + gt, + gte, + lt, + lte, + }, + }, }; pub const Ops = enum { @@ -291,6 +275,9 @@ pub const Inst = struct { pseudo_restore_regs, pseudo_spill_regs, + + pseudo_compare, + pseudo_not, }; // Make sure we don't accidentally make instructions bigger than expected. diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index d09baab761..98de968142 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -130,13 +130,16 @@ pub fn classifySystem(ty: Type, mod: *Module) [8]Class { unreachable; // support > 128 bit int arguments }, .ErrorUnion => { - const payload = ty.errorUnionPayload(mod); - const payload_bits = payload.bitSize(mod); - if (payload_bits <= 64) { - result[0] = .integer; - result[1] = .integer; - } - unreachable; // support > 64 bit error payloads + const payload_ty = ty.errorUnionPayload(mod); + const payload_bits = payload_ty.bitSize(mod); + + // the error union itself + result[0] = .integer; + + // anyerror!void can fit into one register + if (payload_bits == 0) return result; + + std.debug.panic("support ErrorUnion payload {}", .{payload_ty.fmt(mod)}); }, else => |bad_ty| std.debug.panic("classifySystem {s}", .{@tagName(bad_ty)}), } From d9e0cafe64dd7dc56fc2d46bc29c18630a108356 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sat, 13 Apr 2024 23:11:32 -0700 Subject: [PATCH 38/44] riscv: add stage2_riscv to test matrix and bypass failing tests --- lib/compiler/test_runner.zig | 6 +- lib/std/mem.zig | 2 + lib/std/testing.zig | 2 +- src/arch/riscv64/CodeGen.zig | 29 ++-- src/arch/riscv64/Encoding.zig | 13 +- src/arch/riscv64/Lower.zig | 20 ++- src/arch/riscv64/abi.zig | 4 + test/behavior/abs.zig | 11 +- test/behavior/align.zig | 2 + test/behavior/alignof.zig | 2 + test/behavior/array.zig | 145 ++++++------------ test/behavior/asm.zig | 4 + test/behavior/atomics.zig | 13 ++ test/behavior/basic.zig | 57 +++++++ test/behavior/bit_shifting.zig | 2 + test/behavior/bitcast.zig | 18 +++ test/behavior/bitreverse.zig | 4 + test/behavior/bool.zig | 2 + ...n_functions_returning_void_or_noreturn.zig | 2 +- test/behavior/byteswap.zig | 4 + test/behavior/byval_arg_var.zig | 1 + test/behavior/call.zig | 15 ++ test/behavior/call_tail.zig | 2 + test/behavior/cast.zig | 112 ++++++++++++++ test/behavior/cast_int.zig | 8 + test/behavior/comptime_memory.zig | 6 + test/behavior/const_slice_child.zig | 1 + test/behavior/decltest.zig | 2 + test/behavior/defer.zig | 10 ++ test/behavior/destructure.zig | 2 + test/behavior/duplicated_test_names.zig | 1 + test/behavior/empty_tuple_fields.zig | 2 + test/behavior/empty_union.zig | 4 + test/behavior/enum.zig | 23 +++ test/behavior/error.zig | 43 ++++++ test/behavior/eval.zig | 44 ++++++ test/behavior/export_builtin.zig | 2 + test/behavior/export_keyword.zig | 3 + test/behavior/extern.zig | 3 + test/behavior/field_parent_ptr.zig | 21 +++ test/behavior/floatop.zig | 78 ++++++++++ test/behavior/fn.zig | 33 ++++ test/behavior/fn_delegation.zig | 1 + test/behavior/for.zig | 25 +++ test/behavior/generics.zig | 24 +++ test/behavior/globals.zig | 4 + test/behavior/hasdecl.zig | 4 + test/behavior/if.zig | 10 ++ test/behavior/import.zig | 4 + test/behavior/import_c_keywords.zig | 1 + test/behavior/incomplete_struct_param_tld.zig | 1 + test/behavior/inline_switch.zig | 9 ++ test/behavior/int128.zig | 5 + test/behavior/int_comparison_elision.zig | 2 + test/behavior/int_div.zig | 2 + test/behavior/ir_block_deps.zig | 1 + test/behavior/lower_strlit_to_vector.zig | 1 + test/behavior/math.zig | 61 ++++++++ test/behavior/maximum_minimum.zig | 16 ++ test/behavior/member_func.zig | 2 + test/behavior/memcpy.zig | 4 + test/behavior/memset.zig | 9 ++ test/behavior/merge_error_sets.zig | 1 + test/behavior/muladd.zig | 10 ++ ...ultiple_externs_with_conflicting_types.zig | 1 + .../namespace_depends_on_compile_var.zig | 1 + test/behavior/nan.zig | 1 + test/behavior/null.zig | 8 + test/behavior/optional.zig | 26 ++++ test/behavior/packed-struct.zig | 33 ++++ test/behavior/packed-union.zig | 5 + .../packed_struct_explicit_backing_int.zig | 1 + test/behavior/pointers.zig | 21 +++ test/behavior/popcount.zig | 3 + test/behavior/prefetch.zig | 1 + test/behavior/ptrcast.zig | 4 + test/behavior/ptrfromint.zig | 5 + test/behavior/pub_enum.zig | 2 + ...ef_var_in_if_after_if_2nd_switch_prong.zig | 1 + test/behavior/reflection.zig | 1 + test/behavior/return_address.zig | 1 + test/behavior/saturating_arithmetic.zig | 7 + test/behavior/select.zig | 2 + test/behavior/shuffle.zig | 3 + test/behavior/sizeof_and_typeof.zig | 9 ++ test/behavior/slice.zig | 33 ++++ test/behavior/src.zig | 3 + test/behavior/string_literals.zig | 5 + test/behavior/struct.zig | 83 ++++++++++ .../struct_contains_null_ptr_itself.zig | 1 + .../struct_contains_slice_of_itself.zig | 2 + test/behavior/switch.zig | 43 ++++++ test/behavior/switch_on_captured_error.zig | 2 + test/behavior/switch_prong_err_enum.zig | 1 + test/behavior/switch_prong_implicit_cast.zig | 1 + test/behavior/this.zig | 4 + test/behavior/threadlocal.zig | 3 + test/behavior/truncate.zig | 1 + test/behavior/try.zig | 2 + test/behavior/tuple.zig | 22 +++ test/behavior/tuple_declarations.zig | 2 + test/behavior/type.zig | 4 + test/behavior/type_info.zig | 12 ++ test/behavior/typename.zig | 8 + test/behavior/undefined.zig | 5 + test/behavior/underscore.zig | 1 + test/behavior/union.zig | 72 +++++++++ test/behavior/union_with_members.zig | 1 + test/behavior/usingnamespace.zig | 13 ++ test/behavior/usingnamespace/file_1.zig | 3 + .../usingnamespace/import_segregation.zig | 1 + test/behavior/var_args.zig | 15 ++ test/behavior/vector.zig | 47 +++++- test/behavior/void.zig | 2 + test/behavior/wasm.zig | 2 + test/behavior/while.zig | 23 +++ test/behavior/widening.zig | 5 + test/behavior/wrapping_arithmetic.zig | 3 + .../compile_errors/capture_by_ref_while.zig | 2 +- .../switch_expression-missing_error_prong.zig | 2 +- ...switch_expression-multiple_else_prongs.zig | 4 +- test/cases/inherit_want_safety.zig | 2 +- test/tests.zig | 14 ++ 123 files changed, 1411 insertions(+), 129 deletions(-) diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index 08633e54ca..194e84b8ea 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -251,8 +251,6 @@ pub fn mainSimple() anyerror!void { } pub fn mainExtraSimple() !void { - var pass_count: u8 = 0; - var skip_count: u8 = 0; var fail_count: u8 = 0; for (builtin.test_functions) |test_fn| { @@ -261,11 +259,9 @@ pub fn mainExtraSimple() !void { fail_count += 1; continue; } - skip_count += 1; continue; }; - pass_count += 1; } - std.posix.exit(pass_count); + if (fail_count != 0) std.process.exit(1); } diff --git a/lib/std/mem.zig b/lib/std/mem.zig index fe54a0ce74..aff9532ecb 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -638,6 +638,8 @@ test lessThan { const backend_can_use_eql_bytes = switch (builtin.zig_backend) { // The SPIR-V backend does not support the optimized path yet. .stage2_spirv64 => false, + // The RISC-V does not support vectors. + .stage2_riscv64 => false, else => true, }; diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 2d97580a22..662351f153 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -22,7 +22,7 @@ pub var base_allocator_instance = std.heap.FixedBufferAllocator.init(""); pub var log_level = std.log.Level.warn; // Disable printing in tests for simple backends. -pub const backend_can_print = builtin.zig_backend != .stage2_spirv64; +pub const backend_can_print = builtin.zig_backend != .stage2_spirv64 and builtin.zig_backend != .stage2_riscv64; fn print(comptime fmt: []const u8, args: anytype) void { if (@inComptime()) { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index f36613473b..d9f31b4a14 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1448,9 +1448,15 @@ fn computeFrameLayout(self: *Self) !FrameLayout { // The total frame size is calculated by the amount of s registers you need to save * 8, as each // register is 8 bytes, the total allocation sizes, and 16 more register for the spilled ra and s0 // register. Finally we align the frame size to the align of the base pointer. + const args_frame_size = frame_size[@intFromEnum(FrameIndex.args_frame)]; + const spill_frame_size = frame_size[@intFromEnum(FrameIndex.spill_frame)]; + const call_frame_size = frame_size[@intFromEnum(FrameIndex.call_frame)]; + + // TODO: this 24 should be a 16, but we were clobbering the top and bottom of the frame. + // maybe everything can go from the bottom? const acc_frame_size: i32 = std.mem.alignForward( i32, - total_alloc_size + 16 + frame_size[@intFromEnum(FrameIndex.args_frame)] + frame_size[@intFromEnum(FrameIndex.spill_frame)], + total_alloc_size + 64 + args_frame_size + spill_frame_size + call_frame_size, @intCast(frame_align[@intFromEnum(FrameIndex.base_ptr)].toByteUnits().?), ); log.debug("frame size: {}", .{acc_frame_size}); @@ -1771,8 +1777,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { try self.register_manager.allocReg(inst, gp); _ = try self.addInst(.{ - .tag = .not, - .ops = .rr, + .tag = .pseudo, + .ops = .pseudo_not, .data = .{ .rr = .{ .rs = operand_reg, @@ -1870,7 +1876,7 @@ fn binOp( return self.fail("TODO binary operations on int with bits > 64", .{}); } }, - else => |x| return std.debug.panic("TOOD: binOp {s}", .{@tagName(x)}), + else => |x| return self.fail("TOOD: binOp {s}", .{@tagName(x)}), } }, @@ -1988,6 +1994,7 @@ fn binOpRegister( .cmp_gt, .cmp_gte, .cmp_lt, + .cmp_lte, => .pseudo, else => return self.fail("TODO: binOpRegister {s}", .{@tagName(tag)}), @@ -2020,6 +2027,7 @@ fn binOpRegister( .cmp_gt, .cmp_gte, .cmp_lt, + .cmp_lte, => .pseudo_compare, else => unreachable, }; @@ -2773,10 +2781,11 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const dst_mcv = try self.allocRegOrMem(inst, false); _ = try self.addInst(.{ .tag = .add, - .ops = .rr, - .data = .{ .rr = .{ + .ops = .rrr, + .data = .{ .r_type = .{ .rd = addr_reg, - .rs = offset_reg, + .rs1 = offset_reg, + .rs2 = addr_reg, } }, }); try self.genCopy(elem_ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg } }); @@ -3723,7 +3732,7 @@ fn genVarDbgInfo( .undef => .undef, .none => .none, else => blk: { - log.warn("TODO generate debug info for {}", .{mcv}); + // log.warn("TODO generate debug info for {}", .{mcv}); break :blk .nop; }, }; @@ -4289,7 +4298,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { if (!dst_mcv.isMutable()) { // panic so we can see the trace - return std.debug.panic("tried to genCopy immutable: {s}", .{@tagName(dst_mcv)}); + return self.fail("tried to genCopy immutable: {s}", .{@tagName(dst_mcv)}); } switch (dst_mcv) { @@ -4344,7 +4353,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { part_disp += @intCast(dst_ty.abiSize(zcu)); } }, - else => return std.debug.panic("TODO: genCopy {s} with {s}", .{ @tagName(dst_mcv), @tagName(src_mcv) }), + else => return self.fail("TODO: genCopy to {s} from {s}", .{ @tagName(dst_mcv), @tagName(src_mcv) }), } } diff --git a/src/arch/riscv64/Encoding.zig b/src/arch/riscv64/Encoding.zig index d145e21603..c23ba10d9b 100644 --- a/src/arch/riscv64/Encoding.zig +++ b/src/arch/riscv64/Encoding.zig @@ -14,6 +14,8 @@ pub const Mnemonic = enum { sltu, xori, andi, + slli, + srli, addi, jalr, @@ -35,6 +37,7 @@ pub const Mnemonic = enum { // R Type add, + sub, slt, mul, xor, @@ -48,6 +51,7 @@ pub const Mnemonic = enum { return switch (mnem) { // zig fmt: off .add => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0000000 }, + .sub => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0100000 }, .ld => .{ .opcode = 0b0000011, .funct3 = 0b011, .funct7 = null }, .lw => .{ .opcode = 0b0000011, .funct3 = 0b010, .funct7 = null }, @@ -63,6 +67,8 @@ pub const Mnemonic = enum { .andi => .{ .opcode = 0b0010011, .funct3 = 0b111, .funct7 = null }, .xori => .{ .opcode = 0b0010011, .funct3 = 0b100, .funct7 = null }, .jalr => .{ .opcode = 0b1100111, .funct3 = 0b000, .funct7 = null }, + .slli => .{ .opcode = 0b0010011, .funct3 = 0b001, .funct7 = null }, + .srli => .{ .opcode = 0b0010011, .funct3 = 0b101, .funct7 = null }, .lui => .{ .opcode = 0b0110111, .funct3 = null, .funct7 = null }, @@ -103,9 +109,6 @@ pub const InstEnc = enum { pub fn fromMnemonic(mnem: Mnemonic) InstEnc { return switch (mnem) { - .add, - => .R, - .addi, .ld, .lw, @@ -118,6 +121,8 @@ pub const InstEnc = enum { .sltiu, .xori, .andi, + .slli, + .srli, => .I, .lui, @@ -139,6 +144,8 @@ pub const InstEnc = enum { .sltu, .mul, .xor, + .add, + .sub, => .R, .ecall, diff --git a/src/arch/riscv64/Lower.zig b/src/arch/riscv64/Lower.zig index 41bb5c6599..4b77f9cdee 100644 --- a/src/arch/riscv64/Lower.zig +++ b/src/arch/riscv64/Lower.zig @@ -221,7 +221,19 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .{ .reg = rs2 }, }); }, - else => return lower.fail("TODO lower: pseudo_compare {s}", .{@tagName(op)}), + .lte => { + try lower.emit(.slt, &.{ + .{ .reg = rd }, + .{ .reg = rs2 }, + .{ .reg = rs1 }, + }); + + try lower.emit(.xori, &.{ + .{ .reg = rd }, + .{ .reg = rd }, + .{ .imm = Immediate.s(1) }, + }); + }, } }, @@ -258,6 +270,10 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void { .{ .reg = inst.data.u_type.rd }, .{ .imm = inst.data.u_type.imm20 }, }, + .rr => &.{ + .{ .reg = inst.data.rr.rd }, + .{ .reg = inst.data.rr.rs }, + }, .rri => &.{ .{ .reg = inst.data.i_type.rd }, .{ .reg = inst.data.i_type.rs1 }, @@ -293,7 +309,7 @@ fn reloc(lower: *Lower, target: Reloc.Target) Immediate { } fn pushPopRegList(lower: *Lower, comptime spilling: bool, reg_list: Mir.RegisterList) !void { - var it = reg_list.iterator(.{ .direction = if (spilling) .forward else .reverse }); + var it = reg_list.iterator(.{ .direction = .forward }); var reg_i: u31 = 0; while (it.next()) |i| { diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index 98de968142..468fede917 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -96,6 +96,10 @@ pub fn classifyType(ty: Type, mod: *Module) Class { pub fn classifySystem(ty: Type, mod: *Module) [8]Class { var result = [1]Class{.none} ** 8; switch (ty.zigTypeTag(mod)) { + .Bool, .Void, .NoReturn => { + result[0] = .integer; + return result; + }, .Pointer => switch (ty.ptrSize(mod)) { .Slice => { result[0] = .integer; diff --git a/test/behavior/abs.zig b/test/behavior/abs.zig index 7fc3fbbb4a..21f02b2a3d 100644 --- a/test/behavior/abs.zig +++ b/test/behavior/abs.zig @@ -5,8 +5,8 @@ const expect = std.testing.expect; test "@abs integers" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testAbsIntegers(); try testAbsIntegers(); @@ -51,7 +51,6 @@ fn testAbsIntegers() !void { test "@abs unsigned integers" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO try comptime testAbsUnsignedIntegers(); @@ -92,9 +91,9 @@ fn testAbsUnsignedIntegers() !void { test "@abs floats" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testAbsFloats(f16); try testAbsFloats(f16); @@ -151,8 +150,8 @@ test "@abs int vectors" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testAbsIntVectors(1); try testAbsIntVectors(1); @@ -218,8 +217,8 @@ test "@abs unsigned int vectors" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testAbsUnsignedIntVectors(1); try testAbsUnsignedIntVectors(1); @@ -277,9 +276,9 @@ test "@abs float vectors" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/12827 if (builtin.zig_backend == .stage2_llvm and diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 6eddb15db7..ed650b3ef4 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -314,6 +314,7 @@ test "function alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // function alignment is a compile error on wasm32/wasm64 if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest; @@ -389,6 +390,7 @@ test "function align expression depends on generic parameter" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // function alignment is a compile error on wasm32/wasm64 if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest; diff --git a/test/behavior/alignof.zig b/test/behavior/alignof.zig index e08a42cf19..a3e71a254f 100644 --- a/test/behavior/alignof.zig +++ b/test/behavior/alignof.zig @@ -29,6 +29,8 @@ test "comparison of @alignOf(T) against zero" { } test "correct alignment for elements and slices of aligned array" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var buf: [1024]u8 align(64) = undefined; var start: usize = 1; var end: usize = undefined; diff --git a/test/behavior/array.zig b/test/behavior/array.zig index c4421e8e8b..2cb7cfee4a 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -7,7 +7,6 @@ const expect = testing.expect; const expectEqual = testing.expectEqual; test "array to slice" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO const a: u32 align(4) = 3; const b: u32 align(8) = 4; const a_slice: []align(1) const u32 = @as(*const [1]u32, &a)[0..]; @@ -20,11 +19,10 @@ test "array to slice" { } test "arrays" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var array: [5]u32 = undefined; @@ -50,10 +48,9 @@ fn getArrayLen(a: []const u32) usize { } test "array concat with undefined" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -75,11 +72,10 @@ test "array concat with undefined" { } test "array concat with tuple" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const array: [2]u8 = .{ 1, 2 }; { @@ -93,9 +89,8 @@ test "array concat with tuple" { } test "array init with concat" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = 'a'; var i: [4]u8 = [2]u8{ a, 'b' } ++ [2]u8{ 'c', 'd' }; @@ -103,10 +98,9 @@ test "array init with concat" { } test "array init with mult" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = 'a'; var i: [8]u8 = [2]u8{ a, 'b' } ** 4; @@ -117,7 +111,6 @@ test "array init with mult" { } test "array literal with explicit type" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -128,7 +121,6 @@ test "array literal with explicit type" { } test "array literal with inferred length" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO const hex_mult = [_]u16{ 4096, 256, 16, 1 }; try expect(hex_mult.len == 4); @@ -136,7 +128,6 @@ test "array literal with inferred length" { } test "array dot len const expr" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO try expect(comptime x: { break :x some_array.len == 4; }); @@ -148,7 +139,6 @@ const ArrayDotLenConstExpr = struct { const some_array = [_]u8{ 0, 1, 2, 3 }; test "array literal with specified size" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -160,7 +150,6 @@ test "array literal with specified size" { } test "array len field" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO var arr = [4]u8{ 0, 0, 0, 0 }; @@ -173,10 +162,9 @@ test "array len field" { } test "array with sentinels" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(is_ct: bool) !void { @@ -204,7 +192,6 @@ test "array with sentinels" { } test "void arrays" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO var array: [4]void = undefined; array[0] = void{}; array[1] = array[2]; @@ -213,11 +200,10 @@ test "void arrays" { } test "nested arrays of strings" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const array_of_strings = [_][]const u8{ "hello", "this", "is", "my", "thing" }; for (array_of_strings, 0..) |s, i| { @@ -230,7 +216,6 @@ test "nested arrays of strings" { } test "nested arrays of integers" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -246,10 +231,9 @@ test "nested arrays of integers" { } test "implicit comptime in array type size" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var arr: [plusOne(10)]bool = undefined; _ = &arr; @@ -261,10 +245,9 @@ fn plusOne(x: u32) u32 { } test "single-item pointer to array indexing and slicing" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSingleItemPtrArrayIndexSlice(); try comptime testSingleItemPtrArrayIndexSlice(); @@ -289,9 +272,8 @@ fn doSomeMangling(array: *[4]u8) void { } test "implicit cast zero sized array ptr to slice" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var b = "".*; @@ -306,7 +288,6 @@ test "implicit cast zero sized array ptr to slice" { } test "anonymous list literal syntax" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -329,10 +310,9 @@ var s_array: [8]Sub = undefined; const Sub = struct { b: u8 }; const Str = struct { a: []Sub }; test "set global var array via slice embedded in struct" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var s = Str{ .a = s_array[0..] }; @@ -346,11 +326,10 @@ test "set global var array via slice embedded in struct" { } test "read/write through global variable array of struct fields initialized via array mult" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -369,10 +348,9 @@ test "read/write through global variable array of struct fields initialized via } test "implicit cast single-item pointer" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testImplicitCastSingleItemPtr(); try comptime testImplicitCastSingleItemPtr(); @@ -390,7 +368,6 @@ fn testArrayByValAtComptime(b: [2]u8) u8 { } test "comptime evaluating function that takes array by value" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -402,10 +379,9 @@ test "comptime evaluating function that takes array by value" { } test "runtime initialize array elem and then implicit cast to slice" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var two: i32 = 2; _ = &two; @@ -414,10 +390,9 @@ test "runtime initialize array elem and then implicit cast to slice" { } test "array literal as argument to function" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry(two: i32) !void { @@ -443,11 +418,10 @@ test "array literal as argument to function" { } test "double nested array to const slice cast in array literal" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry(two: i32) !void { @@ -506,7 +480,6 @@ test "double nested array to const slice cast in array literal" { } test "anonymous literal in array" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -533,10 +506,9 @@ test "anonymous literal in array" { } test "access the null element of a null terminated array" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -553,11 +525,10 @@ test "access the null element of a null terminated array" { } test "type deduction for array subscript expression" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -574,11 +545,10 @@ test "type deduction for array subscript expression" { } test "sentinel element count towards the ABI size calculation" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -600,8 +570,6 @@ test "sentinel element count towards the ABI size calculation" { } test "zero-sized array with recursive type definition" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; @@ -625,11 +593,10 @@ test "zero-sized array with recursive type definition" { } test "type coercion of anon struct literal to array" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const U = union { @@ -661,11 +628,10 @@ test "type coercion of anon struct literal to array" { } test "type coercion of pointer to anon struct literal to pointer to array" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const U = union { @@ -697,16 +663,12 @@ test "type coercion of pointer to anon struct literal to pointer to array" { } test "array with comptime-only element type" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO const a = [_]type{ u32, i32 }; try testing.expect(a[0] == u32); try testing.expect(a[1] == i32); } test "tuple to array handles sentinel" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -719,11 +681,10 @@ test "tuple to array handles sentinel" { } test "array init of container level array variable" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { var pair: [2]usize = .{ 1, 2 }; @@ -744,8 +705,8 @@ test "array init of container level array variable" { } test "runtime initialized sentinel-terminated array literal" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var c: u16 = 300; _ = &c; const f = &[_:0x9999]u16{c}; @@ -755,11 +716,10 @@ test "runtime initialized sentinel-terminated array literal" { } test "array of array agregate init" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = [1]u32{11} ** 10; var b = [1][10]u32{a} ** 2; @@ -768,8 +728,6 @@ test "array of array agregate init" { } test "pointer to array has ptr field" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO const arr: *const [5]u32 = &.{ 10, 20, 30, 40, 50 }; try std.testing.expect(arr.ptr == @as([*]const u32, arr)); try std.testing.expect(arr.ptr[0] == 10); @@ -780,8 +738,8 @@ test "pointer to array has ptr field" { } test "discarded array init preserves result location" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn f(p: *u32) u16 { p.* += 1; @@ -800,8 +758,6 @@ test "discarded array init preserves result location" { } test "array init with no result location has result type" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO const x = .{ .foo = [2]u16{ @intCast(10), @intCast(20), @@ -813,11 +769,10 @@ test "array init with no result location has result type" { } test "slicing array of zero-sized values" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var arr: [32]u0 = undefined; for (arr[0..]) |*zero| @@ -827,8 +782,8 @@ test "slicing array of zero-sized values" { } test "array init with no result pointer sets field result types" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { // A function parameter has a result type, but no result pointer. fn f(arr: [1]u32) u32 { @@ -843,8 +798,8 @@ test "array init with no result pointer sets field result types" { } test "runtime side-effects in comptime-known array init" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var side_effects: u4 = 0; const init = [4]u4{ blk: { @@ -869,8 +824,8 @@ test "runtime side-effects in comptime-known array init" { } test "slice initialized through reference to anonymous array init provides result types" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var my_u32: u32 = 123; var my_u64: u64 = 456; _ = .{ &my_u32, &my_u64 }; @@ -884,6 +839,8 @@ test "slice initialized through reference to anonymous array init provides resul } test "sentinel-terminated slice initialized through reference to anonymous array init provides result types" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var my_u32: u32 = 123; var my_u64: u64 = 456; _ = .{ &my_u32, &my_u64 }; @@ -897,6 +854,8 @@ test "sentinel-terminated slice initialized through reference to anonymous array } test "many-item pointer initialized through reference to anonymous array init provides result types" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var my_u32: u32 = 123; var my_u64: u64 = 456; _ = .{ &my_u32, &my_u64 }; @@ -913,6 +872,8 @@ test "many-item pointer initialized through reference to anonymous array init pr } test "many-item sentinel-terminated pointer initialized through reference to anonymous array init provides result types" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var my_u32: u32 = 123; var my_u64: u64 = 456; _ = .{ &my_u32, &my_u64 }; @@ -930,8 +891,8 @@ test "many-item sentinel-terminated pointer initialized through reference to ano } test "pointer to array initialized through reference to anonymous array init provides result types" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var my_u32: u32 = 123; var my_u64: u64 = 456; _ = .{ &my_u32, &my_u64 }; @@ -945,6 +906,8 @@ test "pointer to array initialized through reference to anonymous array init pro } test "pointer to sentinel-terminated array initialized through reference to anonymous array init provides result types" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var my_u32: u32 = 123; var my_u64: u64 = 456; _ = .{ &my_u32, &my_u64 }; @@ -958,8 +921,6 @@ test "pointer to sentinel-terminated array initialized through reference to anon } test "tuple initialized through reference to anonymous array init provides result types" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO const Tuple = struct { u64, *const u32 }; const foo: *const Tuple = &.{ @intCast(12345), @@ -970,11 +931,10 @@ test "tuple initialized through reference to anonymous array init provides resul } test "copied array element doesn't alias source" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: [10][10]u32 = undefined; @@ -986,9 +946,8 @@ test "copied array element doesn't alias source" { } test "array initialized with string literal" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: u32, @@ -1008,9 +967,8 @@ test "array initialized with string literal" { } test "array initialized with array with sentinel" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: u32, @@ -1030,8 +988,6 @@ test "array initialized with array with sentinel" { } test "store array of array of structs at comptime" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -1057,11 +1013,10 @@ test "store array of array of structs at comptime" { } test "accessing multidimensional global array at comptime" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const array = [_][]const []const u8{ @@ -1075,11 +1030,10 @@ test "accessing multidimensional global array at comptime" { } test "union that needs padding bytes inside an array" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const B = union(enum) { D: u8, @@ -1101,6 +1055,7 @@ test "union that needs padding bytes inside an array" { test "runtime index of array of zero-bit values" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var runtime: struct { array: [1]void, index: usize } = undefined; runtime = .{ .array = .{{}}, .index = 0 }; diff --git a/test/behavior/asm.zig b/test/behavior/asm.zig index acb17ea004..e82242f425 100644 --- a/test/behavior/asm.zig +++ b/test/behavior/asm.zig @@ -46,6 +46,7 @@ test "output constraint modifiers" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly @@ -69,6 +70,7 @@ test "alternative constraints" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly @@ -87,6 +89,7 @@ test "sized integer/float in asm input" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly @@ -137,6 +140,7 @@ test "struct/array/union types as input values" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly diff --git a/test/behavior/atomics.zig b/test/behavior/atomics.zig index d3f19d396e..830c8a951d 100644 --- a/test/behavior/atomics.zig +++ b/test/behavior/atomics.zig @@ -15,6 +15,7 @@ test "cmpxchg" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCmpxchg(); try comptime testCmpxchg(); @@ -41,6 +42,7 @@ test "fence" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: i32 = 1234; @fence(.seq_cst); @@ -52,6 +54,7 @@ test "atomicrmw and atomicload" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var data: u8 = 200; try testAtomicRmw(&data); @@ -80,6 +83,7 @@ test "cmpxchg with ptr" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var data1: i32 = 1234; var data2: i32 = 5678; @@ -105,6 +109,7 @@ test "cmpxchg with ignored result" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: i32 = 1234; @@ -149,6 +154,7 @@ test "cmpxchg on a global variable" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) { // https://github.com/ziglang/zig/issues/10627 @@ -164,6 +170,7 @@ test "atomic load and rmw with enum" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Value = enum(u8) { a, b, c }; var x = Value.a; @@ -181,6 +188,7 @@ test "atomic store" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u32 = 0; @atomicStore(u32, &x, 1, .seq_cst); @@ -194,6 +202,7 @@ test "atomic store comptime" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testAtomicStore(); try testAtomicStore(); @@ -212,6 +221,7 @@ test "atomicrmw with floats" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) { // https://github.com/ziglang/zig/issues/10627 @@ -241,6 +251,7 @@ test "atomicrmw with ints" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) { // https://github.com/ziglang/zig/issues/16846 @@ -390,6 +401,7 @@ test "atomics with different types" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testAtomicsWithType(bool, true, false); @@ -419,6 +431,7 @@ test "return @atomicStore, using it as a void value" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = struct { diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index c9abaf9c8b..883540d31c 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -16,6 +16,8 @@ test "empty function with comments" { } test "truncate" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try expect(testTruncate(0x10fd) == 0xfd); comptime assert(testTruncate(0x10fd) == 0xfd); } @@ -25,6 +27,7 @@ fn testTruncate(x: u32) u8 { test "truncate to non-power-of-two integers" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testTrunc(u32, u1, 0b10101, 0b1); try testTrunc(u32, u1, 0b10110, 0b0); @@ -42,6 +45,7 @@ test "truncate to non-power-of-two integers from 128-bit" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testTrunc(u128, u1, 0xffffffff_ffffffff_ffffffff_01010101, 0x01); try testTrunc(u128, u1, 0xffffffff_ffffffff_ffffffff_01010110, 0x00); @@ -63,6 +67,7 @@ var g2: i32 = 0; test "global variables" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(g2 == 0); g2 = g1; @@ -81,6 +86,8 @@ test "type equality" { } test "pointer dereferencing" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var x = @as(i32, 3); const y = &x; @@ -132,18 +139,21 @@ fn first4KeysOfHomeRow() []const u8 { test "return string from function" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, first4KeysOfHomeRow(), "aoeu")); } test "hex escape" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, "\x68\x65\x6c\x6c\x6f", "hello")); } test "multiline string" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const s1 = \\one @@ -156,6 +166,7 @@ test "multiline string" { test "multiline string comments at start" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const s1 = //\\one @@ -168,6 +179,7 @@ test "multiline string comments at start" { test "multiline string comments at end" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const s1 = \\one @@ -180,6 +192,7 @@ test "multiline string comments at end" { test "multiline string comments in middle" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const s1 = \\one @@ -192,6 +205,7 @@ test "multiline string comments in middle" { test "multiline string comments at multiple places" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const s1 = \\one @@ -205,11 +219,14 @@ test "multiline string comments at multiple places" { } test "string concatenation simple" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try expect(mem.eql(u8, "OK" ++ " IT " ++ "WORKED", "OK IT WORKED")); } test "array mult operator" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, "ab" ** 5, "ababababab")); } @@ -220,6 +237,7 @@ const OpaqueB = opaque {}; test "opaque types" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(*OpaqueA != *OpaqueB); @@ -291,6 +309,8 @@ test "function closes over local const" { } test "volatile load and store" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var number: i32 = 1234; const ptr = @as(*volatile i32, &number); ptr.* += 1; @@ -307,6 +327,7 @@ fn fB() []const u8 { test "call function pointer in struct" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, f3(true), "a")); try expect(mem.eql(u8, f3(false), "b")); @@ -330,6 +351,7 @@ const FnPtrWrapper = struct { test "const ptr from var variable" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u64 = undefined; var y: u64 = undefined; @@ -349,6 +371,7 @@ test "call result of if else expression" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, f2(true), "a")); try expect(mem.eql(u8, f2(false), "b")); @@ -371,6 +394,7 @@ test "take address of parameter" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testTakeAddressOfParameter(12.34); } @@ -395,6 +419,7 @@ test "array 2D const double ptr" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const rect_2d_vertexes = [_][1]f32{ [_]f32{1.0}, @@ -407,6 +432,7 @@ test "array 2D const double ptr with offset" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const rect_2d_vertexes = [_][2]f32{ [_]f32{ 3.0, 4.239 }, @@ -419,6 +445,7 @@ test "array 3D const double ptr with offset" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const rect_3d_vertexes = [_][2][2]f32{ [_][2]f32{ @@ -453,6 +480,7 @@ fn nine() u8 { test "struct inside function" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testStructInFn(); try comptime testStructInFn(); @@ -474,6 +502,7 @@ fn testStructInFn() !void { test "fn call returning scalar optional in equality expression" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(getNull() == null); } @@ -484,6 +513,7 @@ fn getNull() ?*i32 { test "global variable assignment with optional unwrapping with var initialized to undefined" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { var data: i32 = 1234; @@ -502,6 +532,7 @@ var global_foo: *i32 = undefined; test "peer result location with typed parent, runtime condition, comptime prongs" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(arg: i32) i32 { @@ -581,6 +612,7 @@ test "equality compare fn ptrs" { test "self reference through fn ptr field" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = struct { @@ -613,6 +645,7 @@ var global_ptr = &gdt[0]; test "global constant is loaded with a runtime-known index" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -631,6 +664,7 @@ test "global constant is loaded with a runtime-known index" { test "multiline string literal is null terminated" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const s1 = \\one @@ -645,6 +679,7 @@ test "string escapes" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expectEqualStrings("\"", "\x22"); try expectEqualStrings("\'", "\x27"); @@ -674,6 +709,7 @@ test "string concatenation" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = "OK" ++ " IT " ++ "WORKED"; const b = "OK IT WORKED"; @@ -697,6 +733,7 @@ test "result location is optional inside error union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x = maybe(true) catch unreachable; try expect(x.? == 42); @@ -712,6 +749,7 @@ fn maybe(x: bool) anyerror!?u32 { test "auto created variables have correct alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(str: [*]const u8) u32 { @@ -733,6 +771,7 @@ test "extern variable with non-pointer opaque type" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @export(var_to_export, .{ .name = "opaque_extern_var" }); try expect(@as(*align(1) u32, @ptrCast(&opaque_extern_var)).* == 42); @@ -776,6 +815,7 @@ test "discarding the result of various expressions" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo() !u32 { @@ -817,6 +857,7 @@ test "labeled block implicitly ends in a break" { test "catch in block has correct result location" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn open() error{A}!@This() { @@ -848,6 +889,7 @@ test "labeled block with runtime branch forwards its result location type to bre test "try in labeled block doesn't cast to wrong type" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: u32, @@ -874,6 +916,7 @@ test "weird array and tuple initializations" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = enum { a, b }; const S = struct { e: E }; @@ -992,6 +1035,7 @@ comptime { test "switch inside @as gets correct type" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u32 = 0; _ = &a; @@ -1058,6 +1102,7 @@ test "returning an opaque type from a function" { test "orelse coercion as function argument" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Loc = struct { start: i32 = -1 }; const Container = struct { @@ -1075,6 +1120,8 @@ test "orelse coercion as function argument" { } test "runtime-known globals initialized with undefined" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { var array: [10]u32 = [_]u32{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; var vp: [*]u32 = undefined; @@ -1095,6 +1142,7 @@ test "arrays and vectors with big integers" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO: only aarch64-windows didn't pass in the PR that added this code. // figure out why if you can run this target. @@ -1119,6 +1167,8 @@ test "pointer to struct literal with runtime field is constant" { } test "integer compare" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn doTheTestSigned(comptime T: type) !void { var z: T = 0; @@ -1170,6 +1220,7 @@ test "integer compare" { test "reference to inferred local variable works as expected" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Crasher = struct { lets_crash: u64 = 0, @@ -1215,6 +1266,8 @@ test "pointer to tuple field can be dereferenced at comptime" { } test "proper value is returned from labeled block" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn hash(v: *u32, key: anytype) void { const Key = @TypeOf(key); @@ -1281,6 +1334,7 @@ test "break out of block based on comptime known values" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const source = "A-"; @@ -1317,6 +1371,7 @@ test "allocation and looping over 3-byte integer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .macos) { return error.SkipZigTest; // TODO @@ -1350,6 +1405,8 @@ test "allocation and looping over 3-byte integer" { } test "loading array from struct is not optimized away" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { arr: [1]u32 = .{0}, fn doTheTest(self: *@This()) !void { diff --git a/test/behavior/bit_shifting.zig b/test/behavior/bit_shifting.zig index 216d9eeaa5..9d203dcfe3 100644 --- a/test/behavior/bit_shifting.zig +++ b/test/behavior/bit_shifting.zig @@ -65,6 +65,7 @@ test "sharded table" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // realistic 16-way sharding try testShardedTable(u32, 4, 8); @@ -116,6 +117,7 @@ test "Saturating Shift Left where lhs is of a computed type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn getIntShiftType(comptime T: type) type { diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index a274553665..2c7e069b02 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -10,6 +10,7 @@ const native_endian = builtin.target.cpu.arch.endian(); test "@bitCast iX -> uX (32, 64)" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bit_values = [_]usize{ 32, 64 }; @@ -24,6 +25,7 @@ test "@bitCast iX -> uX (8, 16, 128)" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bit_values = [_]usize{ 8, 16, 128 }; @@ -39,6 +41,7 @@ test "@bitCast iX -> uX exotic integers" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bit_values = [_]usize{ 1, 48, 27, 512, 493, 293, 125, 204, 112 }; @@ -83,6 +86,7 @@ test "bitcast uX to bytes" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bit_values = [_]usize{ 1, 48, 27, 512, 493, 293, 125, 204, 112 }; inline for (bit_values) |bits| { @@ -161,6 +165,7 @@ test "@bitCast packed structs at runtime and comptime" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Full = packed struct { number: u16, @@ -187,6 +192,7 @@ test "@bitCast packed structs at runtime and comptime" { test "@bitCast extern structs at runtime and comptime" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Full = extern struct { number: u16, @@ -221,6 +227,7 @@ test "bitcast packed struct to integer and back" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const LevelUpMove = packed struct { move_id: u9, @@ -243,6 +250,7 @@ test "bitcast packed struct to integer and back" { test "implicit cast to error union by returning" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -272,6 +280,8 @@ test "comptime bitcast used in expression has the correct type" { } test "bitcast passed as tuple element" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn foo(args: anytype) !void { comptime assert(@TypeOf(args[0]) == f32); @@ -282,6 +292,8 @@ test "bitcast passed as tuple element" { } test "triple level result location with bitcast sandwich passed as tuple element" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn foo(args: anytype) !void { comptime assert(@TypeOf(args[0]) == f64); @@ -299,6 +311,7 @@ test "@bitCast packed struct of floats" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Foo = packed struct { a: f16 = 0, @@ -337,6 +350,7 @@ test "comptime @bitCast packed struct to int and back" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and native_endian == .big) { // https://github.com/ziglang/zig/issues/13782 @@ -401,6 +415,7 @@ test "bitcast vector to integer and back" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const arr: [16]bool = [_]bool{ true, false } ++ [_]bool{true} ** 14; var x: @Vector(16, bool) = @splat(true); @@ -426,6 +441,7 @@ test "bitcast nan float does not modify signaling bit" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO: https://github.com/ziglang/zig/issues/14366 if (builtin.zig_backend == .stage2_llvm and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; @@ -482,6 +498,7 @@ test "@bitCast of packed struct of bools all true" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const P = packed struct { b0: bool, @@ -503,6 +520,7 @@ test "@bitCast of packed struct of bools all false" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const P = packed struct { b0: bool, diff --git a/test/behavior/bitreverse.zig b/test/behavior/bitreverse.zig index 259e6b29ec..55a3d580ff 100644 --- a/test/behavior/bitreverse.zig +++ b/test/behavior/bitreverse.zig @@ -16,6 +16,7 @@ test "@bitReverse" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testBitReverse(); try testBitReverse(); @@ -121,6 +122,7 @@ test "bitReverse vectors u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime vector8(); try vector8(); @@ -141,6 +143,7 @@ test "bitReverse vectors u16" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime vector16(); try vector16(); @@ -161,6 +164,7 @@ test "bitReverse vectors u24" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime vector24(); try vector24(); diff --git a/test/behavior/bool.zig b/test/behavior/bool.zig index 608fb20ca7..72c1dff336 100644 --- a/test/behavior/bool.zig +++ b/test/behavior/bool.zig @@ -9,6 +9,8 @@ test "bool literals" { } test "cast bool to int" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const t = true; const f = false; try expectEqual(@as(u32, 1), @intFromBool(t)); diff --git a/test/behavior/builtin_functions_returning_void_or_noreturn.zig b/test/behavior/builtin_functions_returning_void_or_noreturn.zig index 7b8f23c262..712a24b950 100644 --- a/test/behavior/builtin_functions_returning_void_or_noreturn.zig +++ b/test/behavior/builtin_functions_returning_void_or_noreturn.zig @@ -10,8 +10,8 @@ test { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var val: u8 = undefined; try testing.expectEqual({}, @atomicStore(u8, &val, 0, .unordered)); diff --git a/test/behavior/byteswap.zig b/test/behavior/byteswap.zig index 182948416c..fd7e2af850 100644 --- a/test/behavior/byteswap.zig +++ b/test/behavior/byteswap.zig @@ -3,6 +3,8 @@ const builtin = @import("builtin"); const expect = std.testing.expect; test "@byteSwap integers" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_wasm) { // TODO: Remove when self-hosted wasm supports more types for byteswap const ByteSwapIntTest = struct { @@ -118,6 +120,7 @@ test "@byteSwap vectors u16" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime vector16(); try vector16(); @@ -138,6 +141,7 @@ test "@byteSwap vectors u24" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime vector24(); try vector24(); diff --git a/test/behavior/byval_arg_var.zig b/test/behavior/byval_arg_var.zig index 01b5f90ef7..ed0fde991f 100644 --- a/test/behavior/byval_arg_var.zig +++ b/test/behavior/byval_arg_var.zig @@ -5,6 +5,7 @@ var result: []const u8 = "wrong"; test "pass string literal byvalue to a generic var param" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; start(); blowUpStack(10); diff --git a/test/behavior/call.zig b/test/behavior/call.zig index c1ae3b66c6..7d7325721c 100644 --- a/test/behavior/call.zig +++ b/test/behavior/call.zig @@ -60,6 +60,7 @@ test "tuple parameters" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const add = struct { fn add(a: i32, b: i32) i32 { @@ -93,6 +94,7 @@ test "result location of function call argument through runtime condition and st if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = enum { a, b }; const S = struct { @@ -112,6 +114,7 @@ test "result location of function call argument through runtime condition and st test "function call with 40 arguments" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(thirty_nine: i32) !void { @@ -271,6 +274,7 @@ test "forced tail call" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) { // Only attempt this test on targets we know have tail call support in LLVM. @@ -306,6 +310,7 @@ test "inline call preserves tail call" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) { // Only attempt this test on targets we know have tail call support in LLVM. @@ -339,6 +344,7 @@ test "inline call preserves tail call" { test "inline call doesn't re-evaluate non generic struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(f: struct { a: u8, b: u8 }) !void { @@ -405,6 +411,7 @@ test "recursive inline call with comptime known argument" { test "inline while with @call" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn inc(a: *u32) void { @@ -420,6 +427,8 @@ test "inline while with @call" { } test "method call as parameter type" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn foo(x: anytype, y: @TypeOf(x).Inner()) @TypeOf(y) { return y; @@ -437,6 +446,7 @@ test "non-anytype generic parameters provide result type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn f(comptime T: type, y: T) !void { @@ -467,6 +477,7 @@ test "argument to generic function has correct result type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(_: anytype, e: enum { a, b }) bool { @@ -502,6 +513,8 @@ test "call inline fn through pointer" { } test "call coerced function" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const T = struct { x: f64, const T = @This(); @@ -559,6 +572,8 @@ test "call function pointer in comptime field" { } test "generic function pointer can be called" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { var ok = false; fn foo(x: anytype) void { diff --git a/test/behavior/call_tail.zig b/test/behavior/call_tail.zig index 24aab2a01e..3cb858a10b 100644 --- a/test/behavior/call_tail.zig +++ b/test/behavior/call_tail.zig @@ -31,6 +31,8 @@ noinline fn insertionSort(data: []u64) void { } test "arguments pointed to on stack into tailcall" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + switch (builtin.cpu.arch) { .wasm32, .mips, diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 2f47155cc4..1c4041f33d 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -24,6 +24,7 @@ test "peer type resolution: ?T and T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(peerTypeTAndOptionalT(true, false).? == 0); try expect(peerTypeTAndOptionalT(false, false).? == 3); @@ -56,6 +57,8 @@ test "@intCast to comptime_int" { } test "implicit cast comptime numbers to any type when the value fits" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const a: u64 = 255; var b: u8 = a; _ = &b; @@ -103,6 +106,7 @@ test "@floatFromInt" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -126,6 +130,7 @@ test "@floatFromInt(f80)" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(comptime Int: type) !void { @@ -160,6 +165,7 @@ test "@intFromFloat" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testIntFromFloats(); try comptime testIntFromFloats(); @@ -182,6 +188,7 @@ fn expectIntFromFloat(comptime F: type, f: F, comptime I: type, i: I) !void { test "implicitly cast indirect pointer to maybe-indirect pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Self = @This(); @@ -242,6 +249,7 @@ test "coerce undefined to optional" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(MakeType(void).getNull() == null); try expect(MakeType(void).getNonNull() != null); @@ -262,6 +270,7 @@ fn MakeType(comptime T: type) type { test "implicit cast from *[N]T to [*c]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: [4]u16 = [4]u16{ 0, 1, 2, 3 }; var y: [*c]u16 = &x; @@ -299,6 +308,7 @@ test "peer result null and comptime_int" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn blah(n: i32) ?i32 { @@ -323,6 +333,7 @@ test "peer result null and comptime_int" { test "*const ?[*]const T to [*c]const [*c]const T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var array = [_]u8{ 'o', 'k' }; const opt_array_ptr: ?[*]const u8 = &array; @@ -336,6 +347,7 @@ test "array coercion to undefined at runtime" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @setRuntimeSafety(true); @@ -366,6 +378,7 @@ test "return u8 coercing into ?u32 return type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -387,6 +400,7 @@ test "cast from ?[*]T to ??[*]T" { test "peer type unsigned int to signed" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var w: u31 = 5; var x: u8 = 7; @@ -400,6 +414,7 @@ test "peer type unsigned int to signed" { test "expected [*c]const u8, found [*:0]const u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [*:0]const u8 = "hello"; _ = &a; @@ -413,6 +428,7 @@ test "explicit cast from integer to error type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCastIntToErr(error.ItBroke); try comptime testCastIntToErr(error.ItBroke); @@ -427,6 +443,7 @@ test "peer resolve array and const slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testPeerResolveArrayConstSlice(true); try comptime testPeerResolveArrayConstSlice(true); @@ -442,6 +459,7 @@ test "implicitly cast from T to anyerror!?T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try castToOptionalTypeError(1); try comptime castToOptionalTypeError(1); @@ -467,6 +485,7 @@ fn castToOptionalTypeError(z: i32) !void { test "implicitly cast from [0]T to anyerror![]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCastZeroArrayToErrSliceMut(); try comptime testCastZeroArrayToErrSliceMut(); @@ -484,6 +503,7 @@ test "peer type resolution: [0]u8, []const u8, and anyerror![]u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() anyerror!void { @@ -515,6 +535,7 @@ fn peerTypeEmptyArrayAndSliceAndError(a: bool, slice: []u8) anyerror![]u8 { test "implicit cast from *const [N]T to []const T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCastConstArrayRefToConstSlice(); try comptime testCastConstArrayRefToConstSlice(); @@ -540,6 +561,7 @@ fn testCastConstArrayRefToConstSlice() !void { test "peer type resolution: error and [N]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, try testPeerErrorAndArray(0), "OK")); comptime assert(mem.eql(u8, try testPeerErrorAndArray(0), "OK")); @@ -564,6 +586,7 @@ fn testPeerErrorAndArray2(x: u8) anyerror![]const u8 { test "single-item pointer of array to slice to unknown length pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCastPtrOfArrayToSliceAndPtr(); try comptime testCastPtrOfArrayToSliceAndPtr(); @@ -593,6 +616,7 @@ fn testCastPtrOfArrayToSliceAndPtr() !void { test "cast *[1][*]const u8 to [*]const ?[*]const u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const window_name = [1][*]const u8{"window name"}; const x: [*]const ?[*]const u8 = &window_name; @@ -605,6 +629,7 @@ test "@intCast on vector" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -639,6 +664,7 @@ test "@floatCast cast down" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var double: f64 = 0.001534; @@ -656,6 +682,7 @@ test "@floatCast cast down" { test "peer type resolution: unreachable, error set, unreachable" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Error = error{ FileDescriptorAlreadyPresentInSet, @@ -691,6 +718,7 @@ test "peer type resolution: error set supersets" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a: error{ One, Two } = undefined; const b: error{One} = undefined; @@ -720,6 +748,7 @@ test "peer type resolution: disjoint error sets" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a: error{ One, Two } = undefined; const b: error{Three} = undefined; @@ -750,6 +779,7 @@ test "peer type resolution: error union and error set" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a: error{Three} = undefined; const b: error{ One, Two }!u32 = undefined; @@ -783,6 +813,7 @@ test "peer type resolution: error union after non-error" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a: u32 = undefined; const b: error{ One, Two }!u32 = undefined; @@ -816,6 +847,7 @@ test "peer cast *[0]T to E![]const T" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buffer: [5]u8 = "abcde".*; const buf: anyerror![]const u8 = buffer[0..]; @@ -831,6 +863,7 @@ test "peer cast *[0]T to []const T" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buffer: [5]u8 = "abcde".*; const buf: []const u8 = buffer[0..]; @@ -854,6 +887,7 @@ test "peer resolution of string literals" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const E = enum { a, b, c, d }; @@ -875,6 +909,7 @@ test "peer resolution of string literals" { test "peer cast [:x]T to []T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -891,6 +926,7 @@ test "peer cast [:x]T to []T" { test "peer cast [N:x]T to [N]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -907,6 +943,7 @@ test "peer cast [N:x]T to [N]T" { test "peer cast *[N:x]T to *[N]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -922,6 +959,7 @@ test "peer cast *[N:x]T to *[N]T" { test "peer cast [*:x]T to [*]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -942,6 +980,7 @@ test "peer cast [:x]T to [*:x]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -962,6 +1001,7 @@ test "peer cast [:x]T to [*:x]T" { test "peer type resolution implicit cast to return type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -982,6 +1022,7 @@ test "peer type resolution implicit cast to return type" { test "peer type resolution implicit cast to variable type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1007,6 +1048,7 @@ test "variable initialization uses result locations properly with regards to the test "cast between C pointer with different but compatible types" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(arg: [*]c_ushort) u16 { @@ -1024,6 +1066,7 @@ test "cast between C pointer with different but compatible types" { test "peer type resolve string lit with sentinel-terminated mutable slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var array: [4:0]u8 = undefined; array[4] = 0; // TODO remove this when #4372 is solved @@ -1090,6 +1133,7 @@ test "implicit cast from [*]T to ?*anyopaque" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = [_]u8{ 3, 2, 1 }; var runtime_zero: usize = 0; @@ -1120,6 +1164,7 @@ fn foobar(func: PFN_void) !void { test "cast function with an opaque parameter" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) { // https://github.com/ziglang/zig/issues/16845 @@ -1152,6 +1197,7 @@ test "implicit ptr to *anyopaque" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u32 = 1; const ptr: *align(@alignOf(u32)) anyopaque = &a; @@ -1165,6 +1211,7 @@ test "implicit ptr to *anyopaque" { test "return null from fn () anyerror!?&T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = returnNullFromOptionalTypeErrorRef(); const b = returnNullLitFromOptionalTypeErrorRef(); @@ -1181,6 +1228,7 @@ fn returnNullLitFromOptionalTypeErrorRef() anyerror!?*A { test "peer type resolution: [0]u8 and []const u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(peerTypeEmptyArrayAndSlice(true, "hi").len == 0); try expect(peerTypeEmptyArrayAndSlice(false, "hi").len == 1); @@ -1201,6 +1249,7 @@ test "implicitly cast from [N]T to ?[]const T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, castToOptionalSlice().?, "hi")); comptime assert(mem.eql(u8, castToOptionalSlice().?, "hi")); @@ -1215,6 +1264,7 @@ test "cast u128 to f128 and back" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testCast128(); try testCast128(); @@ -1236,6 +1286,7 @@ test "implicit cast from *[N]T to ?[*]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: ?[*]u16 = null; var y: [4]u16 = [4]u16{ 0, 1, 2, 3 }; @@ -1251,6 +1302,7 @@ test "implicit cast from *T to ?*anyopaque" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u8 = 1; incrementVoidPtrValue(&a); @@ -1264,6 +1316,7 @@ fn incrementVoidPtrValue(value: ?*anyopaque) void { test "implicit cast *[0]T to E![]const u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x = @as(anyerror![]const u8, &[0]u8{}); _ = &x; @@ -1285,6 +1338,7 @@ test "*const [N]null u8 to ?[]const u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1321,6 +1375,7 @@ test "assignment to optional pointer result loc" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo: struct { ptr: ?*anyopaque } = .{ .ptr = &global_struct }; _ = &foo; @@ -1328,6 +1383,8 @@ test "assignment to optional pointer result loc" { } test "cast between *[N]void and []void" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var a: [4]void = undefined; const b: []void = &a; try expect(b.len == 4); @@ -1336,6 +1393,7 @@ test "cast between *[N]void and []void" { test "peer resolve arrays of different size to const slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, boolToStr(true), "true")); try expect(mem.eql(u8, boolToStr(false), "false")); @@ -1353,6 +1411,7 @@ test "cast f16 to wider types" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1373,6 +1432,7 @@ test "cast f128 to narrower types" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1391,6 +1451,7 @@ test "peer type resolution: unreachable, null, slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(num: usize, word: []const u8) !void { @@ -1431,6 +1492,7 @@ test "cast compatible optional types" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: ?[:0]const u8 = null; _ = &a; @@ -1441,6 +1503,7 @@ test "cast compatible optional types" { test "coerce undefined single-item pointer of array to error union of slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = @as([*]u8, undefined)[0..0]; var b: error{a}![]const u8 = a; @@ -1464,6 +1527,7 @@ test "coerce between pointers of compatible differently-named floats" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) { // https://github.com/ziglang/zig/issues/12396 @@ -1518,6 +1582,7 @@ test "cast typed undefined to int" { test "implicit cast from [:0]T to [*c]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [:0]const u8 = "foo"; _ = &a; @@ -1541,6 +1606,7 @@ test "bitcast packed struct with u0" { test "optional pointer coerced to optional allowzero pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var p: ?*u32 = undefined; var q: ?*allowzero u32 = undefined; @@ -1557,6 +1623,8 @@ test "optional slice coerced to allowzero many pointer" { } test "optional slice passed as parameter coerced to allowzero many pointer" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const ns = struct { const Color = struct { r: u8, @@ -1576,6 +1644,8 @@ test "optional slice passed as parameter coerced to allowzero many pointer" { } test "single item pointer to pointer to array to slice" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var x: i32 = 1234; try expect(@as([]const i32, @as(*[1]i32, &x))[0] == 1234); const z1 = @as([]const i32, @as(*[1]i32, &x)); @@ -1583,6 +1653,8 @@ test "single item pointer to pointer to array to slice" { } test "peer type resolution forms error union" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var foo: i32 = 123; _ = &foo; const result = if (foo < 0) switch (-foo) { @@ -1616,6 +1688,8 @@ test "@volatileCast without a result location" { } test "coercion from single-item pointer to @as to slice" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var x: u32 = 1; // Why the following line gets a compile error? @@ -1628,6 +1702,7 @@ test "peer type resolution: const sentinel slice and mutable non-sentinel slice" if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(comptime T: type, comptime s: T) !void { @@ -1658,6 +1733,7 @@ test "peer type resolution: float and comptime-known fixed-width integer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const i: u8 = 100; var f: f32 = 1.234; @@ -1680,6 +1756,7 @@ test "peer type resolution: same array type with sentinel" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [2:0]u32 = .{ 0, 1 }; var b: [2:0]u32 = .{ 2, 3 }; @@ -1702,6 +1779,7 @@ test "peer type resolution: array with sentinel and array without sentinel" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [2:0]u32 = .{ 0, 1 }; var b: [2]u32 = .{ 2, 3 }; @@ -1724,6 +1802,7 @@ test "peer type resolution: array and vector with same child type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var arr: [2]u32 = .{ 0, 1 }; var vec: @Vector(2, u32) = .{ 2, 3 }; @@ -1747,6 +1826,7 @@ test "peer type resolution: array with smaller child type and vector with larger if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var arr: [2]u8 = .{ 0, 1 }; var vec: @Vector(2, u64) = .{ 2, 3 }; @@ -1769,6 +1849,7 @@ test "peer type resolution: error union and optional of same type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = error{Foo}; var a: E!*u8 = error.Foo; @@ -1792,6 +1873,7 @@ test "peer type resolution: C pointer and @TypeOf(null)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [*c]c_int = 0x1000; _ = &a; @@ -1814,6 +1896,7 @@ test "peer type resolution: three-way resolution combines error set and optional if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = error{Foo}; var a: E = error.Foo; @@ -1858,6 +1941,7 @@ test "peer type resolution: vector and optional vector" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: ?@Vector(3, u32) = .{ 0, 1, 2 }; var b: @Vector(3, u32) = .{ 3, 4, 5 }; @@ -1880,6 +1964,7 @@ test "peer type resolution: optional fixed-width int and comptime_int" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: ?i32 = 42; _ = &a; @@ -1902,6 +1987,7 @@ test "peer type resolution: array and tuple" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var arr: [3]i32 = .{ 1, 2, 3 }; _ = &arr; @@ -1926,6 +2012,7 @@ test "peer type resolution: vector and tuple" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var vec: @Vector(3, i32) = .{ 1, 2, 3 }; _ = &vec; @@ -1950,6 +2037,7 @@ test "peer type resolution: vector and array and tuple" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var vec: @Vector(2, i8) = .{ 10, 20 }; var arr: [2]i8 = .{ 30, 40 }; @@ -1992,6 +2080,7 @@ test "peer type resolution: empty tuple pointer and slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [:0]const u8 = "Hello"; var b = &.{}; @@ -2013,6 +2102,7 @@ test "peer type resolution: tuple pointer and slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [:0]const u8 = "Hello"; var b = &.{ @as(u8, 'x'), @as(u8, 'y'), @as(u8, 'z') }; @@ -2034,6 +2124,7 @@ test "peer type resolution: tuple pointer and optional slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // Miscompilation on Intel's OpenCL CPU runtime. if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky @@ -2058,6 +2149,7 @@ test "peer type resolution: many compatible pointers" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf = "foo-3".*; @@ -2125,6 +2217,7 @@ test "peer type resolution: tuples with comptime fields" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = .{ 1, 2 }; const b = .{ @as(u32, 3), @as(i16, 4) }; @@ -2156,6 +2249,7 @@ test "peer type resolution: C pointer and many pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf = "hello".*; @@ -2179,6 +2273,7 @@ test "peer type resolution: pointer attributes are combined correctly" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf_a align(4) = "foo".*; var buf_b align(4) = "bar".*; @@ -2222,6 +2317,7 @@ test "peer type resolution: arrays of compatible types" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var e0: u8 = 3; var e1: u8 = 2; @@ -2239,6 +2335,7 @@ test "cast builtins can wrap result in optional" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const MyEnum = enum(u32) { _ }; @@ -2276,6 +2373,7 @@ test "cast builtins can wrap result in error union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const MyEnum = enum(u32) { _ }; @@ -2314,6 +2412,7 @@ test "cast builtins can wrap result in error union and optional" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const MyEnum = enum(u32) { _ }; @@ -2354,6 +2453,7 @@ test "@floatCast on vector" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -2394,6 +2494,7 @@ test "@ptrFromInt on vector" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -2417,6 +2518,7 @@ test "@intFromPtr on vector" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -2441,6 +2543,7 @@ test "@floatFromInt on vector" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -2460,6 +2563,7 @@ test "@intFromFloat on vector" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -2480,6 +2584,7 @@ test "@intFromBool on vector" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64 and builtin.os.tag == .windows) @@ -2503,6 +2608,7 @@ test "@intFromBool on vector" { test "numeric coercions with undefined" { if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const from: i32 = undefined; var to: f32 = from; @@ -2513,6 +2619,7 @@ test "numeric coercions with undefined" { test "15-bit int to float" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u15 = 42; _ = &a; @@ -2525,6 +2632,7 @@ test "@as does not corrupt values with incompatible representations" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x: f32 = @as(f16, blk: { if (false) { @@ -2540,6 +2648,7 @@ test "result information is preserved through many nested structures" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -2566,6 +2675,7 @@ test "@intCast vector of signed integer" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: @Vector(4, i32) = .{ 1, 2, 3, 4 }; _ = &x; @@ -2586,6 +2696,7 @@ test "implicit cast from ptr to tuple to ptr to struct" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const ComptimeReason = union(enum) { c_import: struct { @@ -2611,6 +2722,7 @@ test "bitcast vector" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const u8x32 = @Vector(32, u8); const u32x8 = @Vector(8, u32); diff --git a/test/behavior/cast_int.zig b/test/behavior/cast_int.zig index 065710c5c2..67834385d1 100644 --- a/test/behavior/cast_int.zig +++ b/test/behavior/cast_int.zig @@ -9,6 +9,7 @@ test "@intCast i32 to u7" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u128 = maxInt(u128); var y: i32 = 120; @@ -34,6 +35,8 @@ test "coerce i8 to i32 and @intCast back" { } test "coerce non byte-sized integers accross 32bits boundary" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + { var v: u21 = 6417; _ = &v; @@ -163,6 +166,8 @@ const Piece = packed struct { }; test "load non byte-sized optional value" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + // Originally reported at https://github.com/ziglang/zig/issues/14200 if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; @@ -178,6 +183,8 @@ test "load non byte-sized optional value" { } test "load non byte-sized value in struct" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.cpu.arch.endian() != .little) return error.SkipZigTest; // packed struct TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; @@ -215,6 +222,7 @@ test "load non byte-sized value in union" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // note: this bug is triggered by the == operator, expectEqual will hide it // using ptrCast not to depend on unitialised memory state diff --git a/test/behavior/comptime_memory.zig b/test/behavior/comptime_memory.zig index 968b7be79d..73b9ea60f2 100644 --- a/test/behavior/comptime_memory.zig +++ b/test/behavior/comptime_memory.zig @@ -408,6 +408,8 @@ test "mutate entire slice at comptime" { } test "dereference undefined pointer to zero-bit type" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const p0: *void = undefined; try testing.expectEqual({}, p0.*); @@ -416,6 +418,8 @@ test "dereference undefined pointer to zero-bit type" { } test "type pun extern struct" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = extern struct { f: u8 }; comptime var s = S{ .f = 123 }; @as(*u8, @ptrCast(&s)).* = 72; @@ -513,5 +517,7 @@ fn fieldPtrTest() u32 { return a.value; } test "pointer in aggregate field can mutate comptime state" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try comptime std.testing.expect(fieldPtrTest() == 2); } diff --git a/test/behavior/const_slice_child.zig b/test/behavior/const_slice_child.zig index 35bc007d84..0f81fdc935 100644 --- a/test/behavior/const_slice_child.zig +++ b/test/behavior/const_slice_child.zig @@ -10,6 +10,7 @@ test "const slice child" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const strs = [_][*]const u8{ "one", "two", "three" }; argv = &strs; diff --git a/test/behavior/decltest.zig b/test/behavior/decltest.zig index b01a431e28..57afc4eade 100644 --- a/test/behavior/decltest.zig +++ b/test/behavior/decltest.zig @@ -5,5 +5,7 @@ pub fn the_add_function(a: u32, b: u32) u32 { } test the_add_function { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (the_add_function(1, 2) != 3) unreachable; } diff --git a/test/behavior/defer.zig b/test/behavior/defer.zig index 593282ac59..8f8ba8647d 100644 --- a/test/behavior/defer.zig +++ b/test/behavior/defer.zig @@ -5,6 +5,8 @@ const expectEqual = std.testing.expectEqual; const expectError = std.testing.expectError; test "break and continue inside loop inside defer expression" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + testBreakContInDefer(10); comptime testBreakContInDefer(10); } @@ -21,6 +23,8 @@ fn testBreakContInDefer(x: usize) void { } test "defer and labeled break" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var i = @as(usize, 0); blk: { @@ -34,6 +38,7 @@ test "defer and labeled break" { test "errdefer does not apply to fn inside fn" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (testNestedFnErrDefer()) |_| @panic("expected error") else |e| try expect(e == error.Bad); } @@ -53,6 +58,7 @@ test "return variable while defer expression in scope to modify it" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -94,6 +100,7 @@ test "mixing normal and error defers" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(runSomeErrorDefers(true) catch unreachable); try expect(result[0] == 'c'); @@ -114,6 +121,7 @@ test "errdefer with payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo() !i32 { @@ -136,6 +144,7 @@ test "reference to errdefer payload" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo() !i32 { @@ -158,6 +167,7 @@ test "reference to errdefer payload" { test "simple else prong doesn't emit an error for unreachable else prong" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo() error{Foo}!void { diff --git a/test/behavior/destructure.zig b/test/behavior/destructure.zig index 43ddbb7a4d..3164d25187 100644 --- a/test/behavior/destructure.zig +++ b/test/behavior/destructure.zig @@ -23,6 +23,8 @@ test "simple destructure" { } test "destructure with comptime syntax" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn doTheTest() !void { { diff --git a/test/behavior/duplicated_test_names.zig b/test/behavior/duplicated_test_names.zig index 81b9ebdf50..9453f22ae1 100644 --- a/test/behavior/duplicated_test_names.zig +++ b/test/behavior/duplicated_test_names.zig @@ -16,6 +16,7 @@ test "thingy" {} test thingy { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (thingy(1, 2) != 3) unreachable; } diff --git a/test/behavior/empty_tuple_fields.zig b/test/behavior/empty_tuple_fields.zig index a7a3d27e02..788ed19e5e 100644 --- a/test/behavior/empty_tuple_fields.zig +++ b/test/behavior/empty_tuple_fields.zig @@ -5,6 +5,7 @@ test "empty file level struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = @import("empty_file_level_struct.zig"); const info = @typeInfo(T); @@ -17,6 +18,7 @@ test "empty file level union" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = @import("empty_file_level_union.zig"); const info = @typeInfo(T); diff --git a/test/behavior/empty_union.zig b/test/behavior/empty_union.zig index f05feacfaf..a42dfda7e1 100644 --- a/test/behavior/empty_union.zig +++ b/test/behavior/empty_union.zig @@ -48,6 +48,8 @@ test "empty extern union" { } test "empty union passed as argument" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const U = union(enum) { fn f(u: @This()) void { switch (u) {} @@ -57,6 +59,8 @@ test "empty union passed as argument" { } test "empty enum passed as argument" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const E = enum { fn f(e: @This()) void { switch (e) {} diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index 77b22f82aa..8e93739687 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -610,6 +610,7 @@ fn testEnumWithSpecifiedTagValues(x: MultipleChoice) !void { test "enum with specified tag values" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testEnumWithSpecifiedTagValues(MultipleChoice.C); try comptime testEnumWithSpecifiedTagValues(MultipleChoice.C); @@ -618,6 +619,7 @@ test "enum with specified tag values" { test "non-exhaustive enum" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const E = enum(u8) { a, b, _ }; @@ -682,6 +684,7 @@ test "empty non-exhaustive enum" { test "single field non-exhaustive enum" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const E = enum(u8) { a, _ }; @@ -746,6 +749,7 @@ test "cast integer literal to enum" { test "enum with specified and unspecified tag values" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2.D); try comptime testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2.D); @@ -854,6 +858,8 @@ fn doALoopThing(id: EnumWithOneMember) void { } test "comparison operator on enum with one member is comptime-known" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + doALoopThing(EnumWithOneMember.Eof); } @@ -907,6 +913,7 @@ test "enum literal casting to tagged union" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Arch = union(enum) { x86_64, @@ -933,6 +940,7 @@ const Bar = enum { A, B, C, D }; test "enum literal casting to error union with payload enum" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var bar: error{B}!Bar = undefined; bar = .B; // should never cast to the error set @@ -944,6 +952,7 @@ test "constant enum initialization with differing sizes" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try test3_1(test3_foo); try test3_2(test3_bar); @@ -987,6 +996,7 @@ test "@tagName" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, testEnumTagNameBare(BareNumber.Three), "Three")); comptime assert(mem.eql(u8, testEnumTagNameBare(BareNumber.Three), "Three")); @@ -1003,6 +1013,7 @@ test "@tagName non-exhaustive enum" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, testEnumTagNameBare(NonExhaustive.B), "B")); comptime assert(mem.eql(u8, testEnumTagNameBare(NonExhaustive.B), "B")); @@ -1014,6 +1025,7 @@ test "@tagName is null-terminated" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(n: BareNumber) !void { @@ -1029,6 +1041,7 @@ test "tag name with assigned enum values" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const LocalFoo = enum(u8) { A = 1, @@ -1042,6 +1055,7 @@ test "tag name with assigned enum values" { test "@tagName on enum literals" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, @tagName(.FooBar), "FooBar")); comptime assert(mem.eql(u8, @tagName(.FooBar), "FooBar")); @@ -1052,6 +1066,7 @@ test "tag name with signed enum values" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const LocalFoo = enum(isize) { alfa = 62, @@ -1068,6 +1083,7 @@ test "enum literal casting to optional" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var bar: ?Bar = undefined; bar = .B; @@ -1096,6 +1112,7 @@ test "bit field access with enum fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var data = bit_field_1; try expect(getA(&data) == A.Two); @@ -1136,6 +1153,7 @@ test "tag name functions are unique" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { const E = enum { a, b }; @@ -1212,6 +1230,8 @@ test "enum tag from a local variable" { } test "auto-numbered enum with signed tag type" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const E = enum(i32) { a, b }; try std.testing.expectEqual(@as(i32, 0), @intFromEnum(E.a)); @@ -1227,6 +1247,8 @@ test "auto-numbered enum with signed tag type" { } test "lazy initialized field" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try std.testing.expectEqual(@as(u8, @alignOf(struct {})), getLazyInitialized(.a)); } @@ -1266,6 +1288,7 @@ test "matching captures causes enum equivalence" { test "large enum field values" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { const E = enum(u64) { min = std.math.minInt(u64), max = std.math.maxInt(u64) }; diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 7703a02f68..08f842d93b 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -31,6 +31,7 @@ fn shouldBeNotEqual(a: anyerror, b: anyerror) void { test "error binary operator" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = errBinaryOperatorG(true) catch 3; const b = errBinaryOperatorG(false) catch 3; @@ -62,12 +63,14 @@ pub fn baz() anyerror!i32 { test "error wrapping" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect((baz() catch unreachable) == 15); } test "unwrap simple value from error" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const i = unwrapSimpleValueFromErrorDo() catch unreachable; try expect(i == 13); @@ -78,6 +81,7 @@ fn unwrapSimpleValueFromErrorDo() anyerror!isize { test "error return in assignment" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; doErrReturnInAssignment() catch unreachable; } @@ -100,6 +104,7 @@ test "syntax: optional operator in front of error union operator" { test "widen cast integer payload of error union function call" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn errorable() !u64 { @@ -124,6 +129,7 @@ test "debug info for optional error set" { test "implicit cast to optional to error union to return result loc" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -235,6 +241,8 @@ fn testExplicitErrorSetCast(set1: Set1) !void { } test "@errorCast on error unions" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn doTheTest() !void { { @@ -262,6 +270,7 @@ test "@errorCast on error unions" { test "comptime test error for empty error set" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testComptimeTestErrorEmptySet(1234); try comptime testComptimeTestErrorEmptySet(1234); @@ -297,6 +306,8 @@ test "inferred empty error set comptime catch" { } test "error inference with an empty set" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { const Struct = struct { pub fn func() (error{})!usize { @@ -319,6 +330,7 @@ test "error inference with an empty set" { test "error union peer type resolution" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testErrorUnionPeerTypeResolution(1); } @@ -350,6 +362,7 @@ fn quux_1() !i32 { test "error: Zero sized error set returned with value payload crash" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; _ = try foo3(0); _ = try comptime foo3(0); @@ -363,6 +376,7 @@ fn foo3(b: usize) Error!usize { test "error: Infer error set from literals" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; _ = nullLiteral("n") catch |err| handleErrors(err); _ = floatLiteral("n") catch |err| handleErrors(err); @@ -402,6 +416,7 @@ test "nested error union function call in optional unwrap" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Foo = struct { @@ -448,6 +463,7 @@ test "nested error union function call in optional unwrap" { test "return function call to error set from error union function" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn errorable() anyerror!i32 { @@ -466,6 +482,7 @@ test "optional error set is the same size as error set" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; comptime assert(@sizeOf(?anyerror) == @sizeOf(anyerror)); comptime assert(@alignOf(?anyerror) == @alignOf(anyerror)); @@ -481,6 +498,7 @@ test "optional error set is the same size as error set" { test "nested catch" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -506,6 +524,7 @@ test "nested catch" { test "function pointer with return type that is error union with payload which is pointer of parent struct" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Foo = struct { @@ -531,6 +550,7 @@ test "return result loc as peer result loc in inferred error set function" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -562,6 +582,7 @@ test "error payload type is correctly resolved" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const MyIntWrapper = struct { const Self = @This(); @@ -592,6 +613,7 @@ test "@errorName" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, @errorName(error.AnError), "AnError")); try expect(mem.eql(u8, @errorName(error.ALongerErrorName), "ALongerErrorName")); @@ -606,6 +628,7 @@ test "@errorName sentinel length matches slice length" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const name = testBuiltinErrorName(error.FooBar); const length: usize = 6; @@ -700,6 +723,7 @@ test "error union payload is properly aligned" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: u128, @@ -731,6 +755,7 @@ test "ret_ptr doesn't cause own inferred error set to be resolved" { test "simple else prong allowed even when all errors handled" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo() !u8 { @@ -759,6 +784,7 @@ test "pointer to error union payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var err_union: anyerror!u8 = 15; @@ -792,6 +818,7 @@ test "error union of noreturn used with if" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; NoReturn.a = 64; if (NoReturn.loop()) { @@ -806,6 +833,7 @@ test "error union of noreturn used with try" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; NoReturn.a = 64; const err = NoReturn.testTry(); @@ -817,6 +845,7 @@ test "error union of noreturn used with catch" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; NoReturn.a = 64; const err = NoReturn.testCatch(); @@ -828,6 +857,7 @@ test "alignment of wrapping an error union payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const I = extern struct { x: i128 }; @@ -843,6 +873,7 @@ test "alignment of wrapping an error union payload" { test "compare error union and error set" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: anyerror = error.Foo; var b: anyerror!u32 = error.Bar; @@ -881,6 +912,7 @@ test "error from comptime string" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const name = "Weird error name!"; const S = struct { @@ -904,6 +936,7 @@ test "field access of anyerror results in smaller error set" { test "optional error union return type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo() ?anyerror!u32 { @@ -918,6 +951,7 @@ test "optional error union return type" { test "optional error set return type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = error{ A, B }; const S = struct { @@ -931,6 +965,8 @@ test "optional error set return type" { } test "optional error set function parameter" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn doTheTest(a: ?anyerror) !void { try std.testing.expect(a.? == error.OutOfMemory); @@ -960,6 +996,7 @@ test "returning an error union containing a type with no runtime bits" { test "try used in recursive function with inferred error set" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Value = union(enum) { values: []const @This(), @@ -1001,6 +1038,7 @@ test "function called at runtime is properly analyzed for inferred error set" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo() !void { @@ -1024,6 +1062,7 @@ test "generic type constructed from inferred error set of unresolved function" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn write(_: void, bytes: []const u8) !usize { @@ -1039,6 +1078,8 @@ test "generic type constructed from inferred error set of unresolved function" { } test "errorCast to adhoc inferred error set" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { inline fn baz() !i32 { return @errorCast(err()); @@ -1051,6 +1092,8 @@ test "errorCast to adhoc inferred error set" { } test "errorCast from error sets to error unions" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const err_union: Set1!void = @errorCast(error.A); try expectError(error.A, err_union); } diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 380afd49a5..ef4e182df2 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -73,6 +73,7 @@ fn constExprEvalOnSingleExprBlocksFn(x: i32, b: bool) i32 { test "constant expressions" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var array: [array_size]u8 = undefined; _ = &array; @@ -142,6 +143,7 @@ test "pointer to type" { test "a type constructed in a global expression" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var l: List = undefined; l.array[0] = 10; @@ -304,6 +306,8 @@ fn performFn(comptime prefix_char: u8, start_value: i32) i32 { } test "comptime iterate over fn ptr list" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try expect(performFn('t', 1) == 6); try expect(performFn('o', 0) == 1); try expect(performFn('w', 99) == 99); @@ -394,6 +398,7 @@ test "return 0 from function that has u0 return type" { test "statically initialized struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; st_init_str_foo.x += 1; try expect(st_init_str_foo.x == 14); @@ -408,6 +413,8 @@ var st_init_str_foo = StInitStrFoo{ }; test "inline for with same type but different values" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var res: usize = 0; inline for ([_]type{ [2]u8, [1]u8, [2]u8 }) |T| { var a: T = undefined; @@ -444,6 +451,7 @@ test "binary math operator in partially inlined function" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var s: [4]u32 = undefined; var b: [16]u8 = undefined; @@ -489,6 +497,7 @@ test "comptime bitwise operators" { test "comptime shlWithOverflow" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const ct_shifted = @shlWithOverflow(~@as(u64, 0), 16)[0]; var a = ~@as(u64, 0); @@ -501,6 +510,7 @@ test "comptime shlWithOverflow" { test "const ptr to variable data changes at runtime" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(foo_ref.name[0] == 'a'); foo_ref.name = "b"; @@ -522,6 +532,7 @@ test "runtime 128 bit integer division" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u128 = 152313999999999991610955792383; var b: u128 = 10000000000000000000; @@ -533,6 +544,7 @@ test "runtime 128 bit integer division" { test "@tagName of @typeInfo" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const str = @tagName(@typeInfo(u8)); try expect(std.mem.eql(u8, str, "Int")); @@ -542,6 +554,7 @@ test "static eval list init" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(static_vec3.data[2] == 1.0); try expect(vec3(0.0, 0.0, 3.0).data[2] == 3.0); @@ -713,6 +726,8 @@ fn loopNTimes(comptime n: usize) void { } test "variable inside inline loop that has different types on different iterations" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try testVarInsideInlineLoop(.{ true, @as(u32, 42) }); } @@ -736,6 +751,7 @@ test "array concatenation of function calls" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = oneItem(3) ++ oneItem(4); try expect(std.mem.eql(i32, &a, &[_]i32{ 3, 4 })); @@ -745,6 +761,7 @@ test "array multiplication of function calls" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = oneItem(3) ** scalar(2); try expect(std.mem.eql(i32, &a, &[_]i32{ 3, 3 })); @@ -762,6 +779,7 @@ test "array concatenation peer resolves element types - value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = [2]u3{ 1, 7 }; var b = [3]u8{ 200, 225, 255 }; @@ -779,6 +797,7 @@ test "array concatenation peer resolves element types - pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = [2]u3{ 1, 7 }; var b = [3]u8{ 200, 225, 255 }; @@ -795,6 +814,7 @@ test "array concatenation sets the sentinel - value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = [2]u3{ 1, 7 }; var b = [3:69]u8{ 200, 225, 255 }; @@ -813,6 +833,7 @@ test "array concatenation sets the sentinel - value" { test "array concatenation sets the sentinel - pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = [2]u3{ 1, 7 }; var b = [3:69]u8{ 200, 225, 255 }; @@ -831,6 +852,7 @@ test "array multiplication sets the sentinel - value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = [2:7]u3{ 1, 6 }; _ = &a; @@ -848,6 +870,7 @@ test "array multiplication sets the sentinel - pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = [2:7]u3{ 1, 6 }; const b = &a ** 2; @@ -984,6 +1007,7 @@ test "closure capture type of runtime-known var" { test "comptime break passing through runtime condition converted to runtime break" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1018,6 +1042,7 @@ test "comptime break to outer loop passing through runtime condition converted t if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1070,6 +1095,7 @@ test "comptime break operand passing through runtime condition converted to runt test "comptime break operand passing through runtime switch converted to runtime break" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(runtime: u8) !void { @@ -1090,6 +1116,7 @@ test "comptime break operand passing through runtime switch converted to runtime test "no dependency loop for alignment of self struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1127,6 +1154,7 @@ test "no dependency loop for alignment of self struct" { test "no dependency loop for alignment of self bare union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1164,6 +1192,7 @@ test "no dependency loop for alignment of self bare union" { test "no dependency loop for alignment of self tagged union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1237,6 +1266,7 @@ test "pass pointer to field of comptime-only type as a runtime parameter" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Mixed = struct { @@ -1354,6 +1384,7 @@ test "lazy value is resolved as slice operand" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = struct { a: u32 }; var a: [512]u64 = undefined; @@ -1477,6 +1508,7 @@ test "continue nested inline for loop in named block expr" { test "x and false is comptime-known false" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = struct { var x: u32 = 0; @@ -1504,6 +1536,7 @@ test "x and false is comptime-known false" { test "x or true is comptime-known true" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = struct { var x: u32 = 0; @@ -1533,6 +1566,7 @@ test "non-optional and optional array elements concatenated" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const array = [1]u8{'A'} ++ [1]?u8{null}; var index: usize = 0; @@ -1564,6 +1598,8 @@ test "comptime function turns function value to function pointer" { } test "container level const and var have unique addresses" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { x: i32, y: i32, @@ -1607,6 +1643,8 @@ test "struct in comptime false branch is not evaluated" { } test "result of nested switch assigned to variable" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var zds: u32 = 0; zds = switch (zds) { 0 => switch (zds) { @@ -1621,6 +1659,8 @@ test "result of nested switch assigned to variable" { } test "inline for loop of functions returning error unions" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const T1 = struct { fn v() error{}!usize { return 1; @@ -1639,6 +1679,8 @@ test "inline for loop of functions returning error unions" { } test "if inside a switch" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var condition = true; var wave_type: u32 = 0; _ = .{ &condition, &wave_type }; @@ -1675,6 +1717,8 @@ test "early exit in container level const" { } test "@inComptime" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn inComptime() bool { return @inComptime(); diff --git a/test/behavior/export_builtin.zig b/test/behavior/export_builtin.zig index 25b6e2527e..547a9b990a 100644 --- a/test/behavior/export_builtin.zig +++ b/test/behavior/export_builtin.zig @@ -48,6 +48,7 @@ test "exporting using field access" { test "exporting comptime-known value" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and (builtin.target.ofmt != .elf and builtin.target.ofmt != .macho and @@ -67,6 +68,7 @@ test "exporting comptime-known value" { test "exporting comptime var" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and (builtin.target.ofmt != .elf and builtin.target.ofmt != .macho and diff --git a/test/behavior/export_keyword.zig b/test/behavior/export_keyword.zig index a6baf6d1a5..472418d9b2 100644 --- a/test/behavior/export_keyword.zig +++ b/test/behavior/export_keyword.zig @@ -9,6 +9,8 @@ const builtin = @import("builtin"); // and generates code const vram = @as([*]volatile u8, @ptrFromInt(0x20000000))[0..0x8000]; export fn writeToVRam() void { + if (builtin.zig_backend == .stage2_riscv64) return; + vram[0] = 'X'; } @@ -24,6 +26,7 @@ const PackedUnion = packed union { test "packed struct, enum, union parameters in extern function" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; testPackedStuff(&(PackedStruct{ .a = 1, diff --git a/test/behavior/extern.zig b/test/behavior/extern.zig index a85f300b10..135f5e5648 100644 --- a/test/behavior/extern.zig +++ b/test/behavior/extern.zig @@ -7,6 +7,7 @@ test "anyopaque extern symbol" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = @extern(*anyopaque, .{ .name = "a_mystery_symbol" }); const b: *i32 = @alignCast(@ptrCast(a)); @@ -19,6 +20,7 @@ test "function extern symbol" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = @extern(*const fn () callconv(.C) i32, .{ .name = "a_mystery_function" }); try expect(a() == 4567); @@ -32,6 +34,7 @@ test "function extern symbol matches extern decl" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { extern fn another_mystery_function() u32; diff --git a/test/behavior/field_parent_ptr.zig b/test/behavior/field_parent_ptr.zig index 51b7fc8cfd..0488d941c4 100644 --- a/test/behavior/field_parent_ptr.zig +++ b/test/behavior/field_parent_ptr.zig @@ -2,6 +2,8 @@ const expect = @import("std").testing.expect; const builtin = @import("builtin"); test "@fieldParentPtr struct" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const C = struct { a: bool = true, b: f32 = 3.14, @@ -135,6 +137,8 @@ test "@fieldParentPtr struct" { } test "@fieldParentPtr extern struct" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const C = extern struct { a: bool = true, b: f32 = 3.14, @@ -269,6 +273,7 @@ test "@fieldParentPtr extern struct" { test "@fieldParentPtr extern struct first zero-bit field" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const C = extern struct { a: u0 = 0, @@ -372,6 +377,7 @@ test "@fieldParentPtr extern struct first zero-bit field" { test "@fieldParentPtr extern struct middle zero-bit field" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const C = extern struct { a: f32 = 3.14, @@ -475,6 +481,7 @@ test "@fieldParentPtr extern struct middle zero-bit field" { test "@fieldParentPtr extern struct last zero-bit field" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const C = extern struct { a: f32 = 3.14, @@ -581,6 +588,7 @@ test "@fieldParentPtr unaligned packed struct" { if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const C = packed struct { a: bool = true, @@ -719,6 +727,7 @@ test "@fieldParentPtr aligned packed struct" { if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const C = packed struct { a: f32 = 3.14, @@ -856,6 +865,7 @@ test "@fieldParentPtr nested packed struct" { if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { const C = packed struct { @@ -1018,6 +1028,7 @@ test "@fieldParentPtr packed struct first zero-bit field" { if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const C = packed struct { a: u0 = 0, @@ -1123,6 +1134,7 @@ test "@fieldParentPtr packed struct middle zero-bit field" { if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const C = packed struct { a: f32 = 3.14, @@ -1228,6 +1240,7 @@ test "@fieldParentPtr packed struct last zero-bit field" { if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const C = packed struct { a: f32 = 3.14, @@ -1330,6 +1343,8 @@ test "@fieldParentPtr packed struct last zero-bit field" { } test "@fieldParentPtr tagged union" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const C = union(enum) { a: bool, b: f32, @@ -1463,6 +1478,8 @@ test "@fieldParentPtr tagged union" { } test "@fieldParentPtr untagged union" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const C = union { a: bool, b: f32, @@ -1596,6 +1613,8 @@ test "@fieldParentPtr untagged union" { } test "@fieldParentPtr extern union" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const C = extern union { a: bool, b: f32, @@ -1731,6 +1750,7 @@ test "@fieldParentPtr extern union" { test "@fieldParentPtr packed union" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.target.cpu.arch.endian() == .big) return error.SkipZigTest; // TODO const C = packed union { @@ -1868,6 +1888,7 @@ test "@fieldParentPtr packed union" { test "@fieldParentPtr tagged union all zero-bit fields" { if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const C = union(enum) { a: u0, diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index a78f9b6f82..2e18b58d3c 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -15,12 +15,15 @@ fn epsForType(comptime T: type) T { test "add f16" { if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testAdd(f16); try comptime testAdd(f16); } test "add f32/f64" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try testAdd(f32); try comptime testAdd(f32); try testAdd(f64); @@ -30,6 +33,7 @@ test "add f32/f64" { test "add f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testAdd(f80); try comptime testAdd(f80); @@ -49,12 +53,15 @@ fn testAdd(comptime T: type) !void { test "sub f16" { if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSub(f16); try comptime testSub(f16); } test "sub f32/f64" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try testSub(f32); try comptime testSub(f32); try testSub(f64); @@ -64,6 +71,7 @@ test "sub f32/f64" { test "sub f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSub(f80); try comptime testSub(f80); @@ -83,12 +91,15 @@ fn testSub(comptime T: type) !void { test "mul f16" { if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testMul(f16); try comptime testMul(f16); } test "mul f32/f64" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try testMul(f32); try comptime testMul(f32); try testMul(f64); @@ -98,6 +109,7 @@ test "mul f32/f64" { test "mul f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testMul(f80); try comptime testMul(f80); @@ -119,6 +131,7 @@ test "cmp f16" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCmp(f16); try comptime testCmp(f16); @@ -127,6 +140,7 @@ test "cmp f16" { test "cmp f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCmp(f32); try comptime testCmp(f32); @@ -140,6 +154,7 @@ test "cmp f128" { if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCmp(f128); try comptime testCmp(f128); @@ -213,6 +228,7 @@ test "different sized float comparisons" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testDifferentSizedFloatComparisons(); try comptime testDifferentSizedFloatComparisons(); @@ -261,6 +277,7 @@ test "@sqrt f16" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSqrt(f16); try comptime testSqrt(f16); @@ -272,6 +289,7 @@ test "@sqrt f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSqrt(f32); try comptime testSqrt(f32); @@ -285,6 +303,7 @@ test "@sqrt f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.os.tag == .freebsd) { // TODO https://github.com/ziglang/zig/issues/10875 @@ -371,6 +390,7 @@ test "@sqrt with vectors" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSqrtWithVectors(); try comptime testSqrtWithVectors(); @@ -392,6 +412,7 @@ test "@sin f16" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSin(f16); try comptime testSin(f16); @@ -403,6 +424,7 @@ test "@sin f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSin(f32); comptime try testSin(f32); @@ -416,6 +438,7 @@ test "@sin f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSin(f80); comptime try testSin(f80); @@ -443,6 +466,7 @@ test "@sin with vectors" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSinWithVectors(); try comptime testSinWithVectors(); @@ -464,6 +488,7 @@ test "@cos f16" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCos(f16); try comptime testCos(f16); @@ -475,6 +500,7 @@ test "@cos f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCos(f32); try comptime testCos(f32); @@ -488,6 +514,7 @@ test "@cos f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCos(f80); try comptime testCos(f80); @@ -515,6 +542,7 @@ test "@cos with vectors" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCosWithVectors(); try comptime testCosWithVectors(); @@ -536,6 +564,7 @@ test "@tan f16" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testTan(f16); try comptime testTan(f16); @@ -547,6 +576,7 @@ test "@tan f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testTan(f32); try comptime testTan(f32); @@ -560,6 +590,7 @@ test "@tan f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testTan(f80); try comptime testTan(f80); @@ -587,6 +618,7 @@ test "@tan with vectors" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testTanWithVectors(); try comptime testTanWithVectors(); @@ -608,6 +640,7 @@ test "@exp f16" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testExp(f16); try comptime testExp(f16); @@ -619,6 +652,7 @@ test "@exp f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testExp(f32); try comptime testExp(f32); @@ -632,6 +666,7 @@ test "@exp f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testExp(f80); try comptime testExp(f80); @@ -663,6 +698,7 @@ test "@exp with vectors" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testExpWithVectors(); try comptime testExpWithVectors(); @@ -684,6 +720,7 @@ test "@exp2 f16" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testExp2(f16); try comptime testExp2(f16); @@ -695,6 +732,7 @@ test "@exp2 f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testExp2(f32); try comptime testExp2(f32); @@ -708,6 +746,7 @@ test "@exp2 f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testExp2(f80); try comptime testExp2(f80); @@ -734,6 +773,7 @@ test "@exp2 with @vectors" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testExp2WithVectors(); try comptime testExp2WithVectors(); @@ -755,6 +795,7 @@ test "@log f16" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testLog(f16); try comptime testLog(f16); @@ -766,6 +807,7 @@ test "@log f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testLog(f32); try comptime testLog(f32); @@ -779,6 +821,7 @@ test "@log f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testLog(f80); try comptime testLog(f80); @@ -806,6 +849,7 @@ test "@log with @vectors" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var v: @Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 }; @@ -824,6 +868,7 @@ test "@log2 f16" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testLog2(f16); try comptime testLog2(f16); @@ -835,6 +880,7 @@ test "@log2 f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testLog2(f32); try comptime testLog2(f32); @@ -848,6 +894,7 @@ test "@log2 f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testLog2(f80); try comptime testLog2(f80); @@ -873,6 +920,7 @@ test "@log2 with vectors" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/13681 if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64 and @@ -899,6 +947,7 @@ test "@log10 f16" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testLog10(f16); try comptime testLog10(f16); @@ -910,6 +959,7 @@ test "@log10 f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testLog10(f32); try comptime testLog10(f32); @@ -923,6 +973,7 @@ test "@log10 f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testLog10(f80); try comptime testLog10(f80); @@ -949,6 +1000,7 @@ test "@log10 with vectors" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testLog10WithVectors(); try comptime testLog10WithVectors(); @@ -969,6 +1021,7 @@ test "@abs f16" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testFabs(f16); try comptime testFabs(f16); @@ -978,6 +1031,7 @@ test "@abs f32/f64" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testFabs(f32); try comptime testFabs(f32); @@ -992,6 +1046,7 @@ test "@abs f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testFabs(f80); try comptime testFabs(f80); @@ -1069,6 +1124,7 @@ test "@abs with vectors" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testFabsWithVectors(); try comptime testFabsWithVectors(); @@ -1089,6 +1145,7 @@ test "@floor f16" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testFloor(f16); try comptime testFloor(f16); @@ -1099,6 +1156,7 @@ test "@floor f32/f64" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testFloor(f32); try comptime testFloor(f32); @@ -1113,6 +1171,7 @@ test "@floor f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) { // https://github.com/ziglang/zig/issues/12602 @@ -1161,6 +1220,7 @@ test "@floor with vectors" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; @@ -1184,6 +1244,7 @@ test "@ceil f16" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCeil(f16); try comptime testCeil(f16); @@ -1195,6 +1256,7 @@ test "@ceil f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCeil(f32); try comptime testCeil(f32); @@ -1209,6 +1271,7 @@ test "@ceil f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) { // https://github.com/ziglang/zig/issues/12602 @@ -1258,6 +1321,7 @@ test "@ceil with vectors" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; @@ -1281,6 +1345,7 @@ test "@trunc f16" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) { // https://github.com/ziglang/zig/issues/16846 @@ -1297,6 +1362,7 @@ test "@trunc f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) { // https://github.com/ziglang/zig/issues/16846 @@ -1316,6 +1382,7 @@ test "@trunc f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) { // https://github.com/ziglang/zig/issues/12602 @@ -1365,6 +1432,7 @@ test "@trunc with vectors" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; @@ -1389,6 +1457,7 @@ test "neg f16" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.os.tag == .freebsd) { // TODO file issue to track this failure @@ -1405,6 +1474,7 @@ test "neg f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testNeg(f32); try comptime testNeg(f32); @@ -1419,6 +1489,7 @@ test "neg f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testNeg(f80); try comptime testNeg(f80); @@ -1525,6 +1596,7 @@ test "comptime fixed-width float zero divided by zero produces NaN" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; inline for (.{ f16, f32, f64, f80, f128 }) |F| { try expect(math.isNan(@as(F, 0) / @as(F, 0))); @@ -1584,6 +1656,7 @@ test "comptime inf >= runtime 1" { test "comptime isNan(nan * 1)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const nan_times_one = comptime std.math.nan(f64) * 1; try std.testing.expect(std.math.isNan(nan_times_one)); @@ -1591,6 +1664,7 @@ test "comptime isNan(nan * 1)" { test "runtime isNan(nan * 1)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const nan_times_one = std.math.nan(f64) * 1; try std.testing.expect(std.math.isNan(nan_times_one)); @@ -1598,6 +1672,7 @@ test "runtime isNan(nan * 1)" { test "comptime isNan(nan * 0)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const nan_times_zero = comptime std.math.nan(f64) * 0; try std.testing.expect(std.math.isNan(nan_times_zero)); @@ -1607,6 +1682,7 @@ test "comptime isNan(nan * 0)" { test "runtime isNan(nan * 0)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const nan_times_zero = std.math.nan(f64) * 0; try std.testing.expect(std.math.isNan(nan_times_zero)); @@ -1616,6 +1692,7 @@ test "runtime isNan(nan * 0)" { test "comptime isNan(inf * 0)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const inf_times_zero = comptime std.math.inf(f64) * 0; try std.testing.expect(std.math.isNan(inf_times_zero)); @@ -1625,6 +1702,7 @@ test "comptime isNan(inf * 0)" { test "runtime isNan(inf * 0)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const inf_times_zero = std.math.inf(f64) * 0; try std.testing.expect(std.math.isNan(inf_times_zero)); diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index ab7aca6ed6..fc7b1605bf 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -6,6 +6,8 @@ const expect = testing.expect; const expectEqual = testing.expectEqual; test "params" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try expect(testParamsAdd(22, 11) == 33); } fn testParamsAdd(a: i32, b: i32) i32 { @@ -13,6 +15,8 @@ fn testParamsAdd(a: i32, b: i32) i32 { } test "local variables" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + testLocVars(2); } fn testLocVars(b: i32) void { @@ -21,6 +25,8 @@ fn testLocVars(b: i32) void { } test "mutable local variables" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var zero: i32 = 0; _ = &zero; try expect(zero == 0); @@ -71,6 +77,7 @@ fn outer(y: u32) *const fn (u32) u32 { test "return inner function which references comptime variable of outer function" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const func = outer(10); try expect(func(3) == 7); @@ -80,6 +87,7 @@ test "discard the result of a function that returns a struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() void { @@ -104,6 +112,7 @@ test "inline function call that calls optional function pointer, return pointer if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { field: u32, @@ -181,12 +190,14 @@ test "function with complex callconv and return type expressions" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(fComplexCallconvRet(3).x == 9); } test "pass by non-copying value" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(addPointCoords(Point{ .x = 1, .y = 2 }) == 3); } @@ -202,6 +213,7 @@ fn addPointCoords(pt: Point) i32 { test "pass by non-copying value through var arg" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect((try addPointCoordsVar(Point{ .x = 1, .y = 2 })) == 3); } @@ -213,6 +225,7 @@ fn addPointCoordsVar(pt: anytype) !i32 { test "pass by non-copying value as method" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var pt = Point2{ .x = 1, .y = 2 }; try expect(pt.addPointCoords() == 3); @@ -229,6 +242,7 @@ const Point2 = struct { test "pass by non-copying value as method, which is generic" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var pt = Point3{ .x = 1, .y = 2 }; try expect(pt.addPointCoords(i32) == 3); @@ -257,6 +271,7 @@ test "implicit cast fn call result to optional in field result" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -283,6 +298,7 @@ test "implicit cast fn call result to optional in field result" { test "void parameters" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try voidFun(1, void{}, 2, {}); } @@ -309,6 +325,7 @@ test "function pointers" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const fns = [_]*const @TypeOf(fn1){ &fn1, @@ -346,6 +363,7 @@ test "function call with anon list literal" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -366,6 +384,7 @@ test "function call with anon list literal - 2D" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -384,6 +403,8 @@ test "function call with anon list literal - 2D" { } test "ability to give comptime types and non comptime types to same parameter" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn doTheTest() !void { var x: i32 = 1; @@ -402,6 +423,8 @@ test "ability to give comptime types and non comptime types to same parameter" { } test "function with inferred error set but returning no error" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn foo() !void {} }; @@ -412,6 +435,7 @@ test "function with inferred error set but returning no error" { test "import passed byref to function in return type" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn get() @import("std").ArrayListUnmanaged(i32) { @@ -429,6 +453,7 @@ test "implicit cast function to function ptr" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S1 = struct { export fn someFunctionThatReturnsAValue() c_int { @@ -449,6 +474,7 @@ test "implicit cast function to function ptr" { test "method call with optional and error union first param" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: i32 = 1234, @@ -468,6 +494,7 @@ test "method call with optional and error union first param" { test "method call with optional pointer first param" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: i32 = 1234, @@ -487,6 +514,7 @@ test "using @ptrCast on function pointers" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = struct { data: [4]u8 }; @@ -524,6 +552,7 @@ test "function returns function returning type" { test "peer type resolution of inferred error set with non-void payload" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn openDataFile(mode: enum { read, write }) !u32 { @@ -566,6 +595,8 @@ test "lazy values passed to anytype parameter" { } test "pass and return comptime-only types" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn returnNull(comptime x: @Type(.Null)) @Type(.Null) { return x; @@ -605,6 +636,8 @@ test "comptime parameters don't have to be marked comptime if only called at com } test "inline function with comptime-known comptime-only return type called at runtime" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { inline fn foo(x: *i32, y: *const i32) type { x.* = y.*; diff --git a/test/behavior/fn_delegation.zig b/test/behavior/fn_delegation.zig index 95dbfeb4b2..6a3d46c15d 100644 --- a/test/behavior/fn_delegation.zig +++ b/test/behavior/fn_delegation.zig @@ -34,6 +34,7 @@ fn custom(comptime T: type, comptime num: u64) fn (T) u64 { test "fn delegation" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const foo = Foo{}; try expect(foo.one() == 11); diff --git a/test/behavior/for.zig b/test/behavior/for.zig index 7614fd4683..200bfd0ce2 100644 --- a/test/behavior/for.zig +++ b/test/behavior/for.zig @@ -7,6 +7,7 @@ const mem = std.mem; test "continue in for loop" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const array = [_]i32{ 1, 2, 3, 4, 5 }; var sum: i32 = 0; @@ -21,6 +22,8 @@ test "continue in for loop" { } test "break from outer for loop" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try testBreakOuter(); try comptime testBreakOuter(); } @@ -38,6 +41,8 @@ fn testBreakOuter() !void { } test "continue outer for loop" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try testContinueOuter(); try comptime testContinueOuter(); } @@ -69,6 +74,7 @@ test "basic for loop" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const expected_result = [_]u8{ 9, 8, 7, 6, 0, 1, 2, 3 } ** 3; @@ -112,6 +118,7 @@ test "for with null and T peer types and inferred result location type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(slice: []const u8) !void { @@ -132,6 +139,7 @@ test "for with null and T peer types and inferred result location type" { test "2 break statements and an else" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry(t: bool, f: bool) !void { @@ -153,6 +161,7 @@ test "for loop with pointer elem var" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const source = "abcdefg"; var target: [source.len]u8 = undefined; @@ -179,6 +188,7 @@ fn mangleString(s: []u8) void { test "for copies its payload" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -198,6 +208,7 @@ test "for on slice with allowzero ptr" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(slice: []const u8) !void { @@ -213,6 +224,7 @@ test "for on slice with allowzero ptr" { test "else continue outer for" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var i: usize = 6; var buf: [5]u8 = undefined; @@ -226,6 +238,7 @@ test "else continue outer for" { test "for loop with else branch" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var x = [_]u32{ 1, 2 }; @@ -250,6 +263,7 @@ test "for loop with else branch" { test "count over fixed range" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var sum: usize = 0; for (0..6) |i| { @@ -262,6 +276,7 @@ test "count over fixed range" { test "two counters" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var sum: usize = 0; for (0..10, 10..20) |i, j| { @@ -275,6 +290,7 @@ test "two counters" { test "1-based counter and ptr to array" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var ok: usize = 0; @@ -308,6 +324,7 @@ test "slice and two counters, one is offset and one is runtime" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const slice: []const u8 = "blah"; var start: usize = 0; @@ -337,6 +354,7 @@ test "two slices, one captured by-ref" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [10]u8 = undefined; const slice1: []const u8 = "blah"; @@ -356,6 +374,7 @@ test "raw pointer and slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [10]u8 = undefined; const slice: []const u8 = "blah"; @@ -375,6 +394,7 @@ test "raw pointer and counter" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [10]u8 = undefined; const ptr: [*]u8 = &buf; @@ -393,6 +413,7 @@ test "inline for with slice as the comptime-known" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const comptime_slice = "hello"; var runtime_i: usize = 3; @@ -424,6 +445,7 @@ test "inline for with counter as the comptime-known" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var runtime_slice = "hello"; var runtime_i: usize = 3; @@ -456,6 +478,7 @@ test "inline for on tuple pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { u32, u32, u32 }; var s: S = .{ 100, 200, 300 }; @@ -471,6 +494,7 @@ test "ref counter that starts at zero" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; for ([_]usize{ 0, 1, 2 }, 0..) |i, j| { try expectEqual(i, j); @@ -486,6 +510,7 @@ test "inferred alloc ptr of for loop" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var cond = false; diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index 9786ea5d06..46c5babdf1 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -5,6 +5,8 @@ const expect = testing.expect; const expectEqual = testing.expectEqual; test "one param, explicit comptime" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var x: usize = 0; x += checkSize(i32); x += checkSize(bool); @@ -19,6 +21,7 @@ fn checkSize(comptime T: type) usize { test "simple generic fn" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(max(i32, 3, -1) == 3); try expect(max(u8, 1, 100) == 100); @@ -55,6 +58,7 @@ test "fn with comptime args" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(gimmeTheBigOne(1234, 5678) == 5678); try expect(shouldCallSameInstance(34, 12) == 34); @@ -65,6 +69,7 @@ test "anytype params" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(max_i32(12, 34) == 34); try expect(max_f64(1.2, 3.4) == 3.4); @@ -89,6 +94,7 @@ fn max_f64(a: f64, b: f64) f64 { test "type constructed by comptime function call" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var l: SimpleList(10) = undefined; l.array[0] = 10; @@ -113,6 +119,7 @@ test "function with return type type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var list: List(i32) = undefined; var list2: List(i32) = undefined; @@ -144,6 +151,8 @@ fn GenericDataThing(comptime count: isize) type { } test "use generic param in generic param" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try expect(aGenericFn(i32, 3, 4) == 7); } fn aGenericFn(comptime T: type, comptime a: T, b: T) T { @@ -154,6 +163,7 @@ test "generic fn with implicit cast" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(getFirstByte(u8, &[_]u8{13}) == 13); try expect(getFirstByte(u16, &[_]u16{ @@ -172,6 +182,7 @@ test "generic fn keeps non-generic parameter types" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = 128; @@ -247,6 +258,7 @@ test "generic function instantiation turns into comptime call" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -280,6 +292,7 @@ test "generic function instantiation turns into comptime call" { test "generic function with void and comptime parameter" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: i32 }; const namespace = struct { @@ -296,6 +309,7 @@ test "generic function with void and comptime parameter" { test "anonymous struct return type referencing comptime parameter" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { pub fn extraData(comptime T: type, index: usize) struct { data: T, end: usize } { @@ -314,6 +328,7 @@ test "generic function instantiation non-duplicates" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.os.tag == .wasi) return error.SkipZigTest; const S = struct { @@ -385,6 +400,7 @@ test "extern function used as generic parameter" { test "generic struct as parameter type" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(comptime Int: type, thing: struct { int: Int }) !void { @@ -399,6 +415,8 @@ test "generic struct as parameter type" { } test "slice as parameter type" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn internComptimeString(comptime str: []const u8) *const []const u8 { return &struct { @@ -423,6 +441,7 @@ test "null sentinel pointer passed as generic argument" { test "generic function passed as comptime argument" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doMath(comptime f: fn (type, i32, i32) error{Overflow}!i32, a: i32, b: i32) !void { @@ -435,6 +454,7 @@ test "generic function passed as comptime argument" { test "return type of generic function is function pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn b(comptime T: type) ?*const fn () error{}!T { @@ -447,6 +467,7 @@ test "return type of generic function is function pointer" { test "coerced function body has inequal value with its uncoerced body" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = B(i32, c); @@ -496,6 +517,7 @@ test "union in struct captures argument" { test "function argument tuple used as struct field" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn DeleagateWithContext(comptime Function: type) type { @@ -530,6 +552,7 @@ test "call generic function with from function called by the generic function" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64 and builtin.os.tag == .windows) return error.SkipZigTest; @@ -572,6 +595,7 @@ fn StructCapture(comptime T: type) type { test "call generic function that uses capture from function declaration's scope" { if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = StructCapture(f64); const s = S.foo(123); diff --git a/test/behavior/globals.zig b/test/behavior/globals.zig index 89dc20c5c7..b832323323 100644 --- a/test/behavior/globals.zig +++ b/test/behavior/globals.zig @@ -7,6 +7,7 @@ test "store to global array" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(pos[1] == 0.0); pos = [2]f32{ 0.0, 1.0 }; @@ -18,6 +19,7 @@ test "store to global vector" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(vpos[1] == 0.0); vpos = @Vector(2, f32){ 0.0, 1.0 }; @@ -28,6 +30,7 @@ test "slices pointing at the same address as global array." { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const a = [_]u8{ 1, 2, 3 }; @@ -47,6 +50,7 @@ test "global loads can affect liveness" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const ByRef = struct { diff --git a/test/behavior/hasdecl.zig b/test/behavior/hasdecl.zig index 7eeba80f3e..71f9200b27 100644 --- a/test/behavior/hasdecl.zig +++ b/test/behavior/hasdecl.zig @@ -12,6 +12,8 @@ const Bar = struct { }; test "@hasDecl" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try expect(@hasDecl(Foo, "public_thing")); try expect(!@hasDecl(Foo, "private_thing")); try expect(!@hasDecl(Foo, "no_thing")); @@ -22,6 +24,8 @@ test "@hasDecl" { } test "@hasDecl using a sliced string literal" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try expect(@hasDecl(@This(), "std") == true); try expect(@hasDecl(@This(), "std"[0..0]) == false); try expect(@hasDecl(@This(), "std"[0..1]) == false); diff --git a/test/behavior/if.zig b/test/behavior/if.zig index 69ad917e6a..61a5fc8f1b 100644 --- a/test/behavior/if.zig +++ b/test/behavior/if.zig @@ -45,6 +45,7 @@ var global_with_err: anyerror!u32 = error.SomeError; test "unwrap mutable global var" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (global_with_val) |v| { try expect(v == 0); @@ -82,6 +83,7 @@ test "const result loc, runtime if cond, else unreachable" { test "if copies its payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -118,6 +120,7 @@ test "if peer expressions inferred optional type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var self: []const u8 = "abcdef"; var index: usize = 0; @@ -136,6 +139,7 @@ test "if-else expression with runtime condition result location is inferred opti if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = struct { b: u64, c: u64 }; var d: bool = true; @@ -145,6 +149,8 @@ test "if-else expression with runtime condition result location is inferred opti } test "result location with inferred type ends up being pointer to comptime_int" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var a: ?u32 = 1234; var b: u32 = 2000; _ = .{ &a, &b }; @@ -173,6 +179,8 @@ fn returnTrue() bool { } test "if value shouldn't be load-elided if used later (structs)" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const Foo = struct { x: i32 }; var a = Foo{ .x = 1 }; @@ -190,6 +198,8 @@ test "if value shouldn't be load-elided if used later (structs)" { } test "if value shouldn't be load-elided if used later (optionals)" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var a: ?i32 = 1; var b: ?i32 = 1; diff --git a/test/behavior/import.zig b/test/behavior/import.zig index c2bb39983d..befaea9cae 100644 --- a/test/behavior/import.zig +++ b/test/behavior/import.zig @@ -6,18 +6,21 @@ const a_namespace = @import("import/a_namespace.zig"); test "call fn via namespace lookup" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(@as(i32, 1234) == a_namespace.foo()); } test "importing the same thing gives the same import" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(@import("std") == @import("std")); } test "import in non-toplevel scope" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { usingnamespace @import("import/a_namespace.zig"); @@ -27,6 +30,7 @@ test "import in non-toplevel scope" { test "import empty file" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; _ = @import("import/empty.zig"); } diff --git a/test/behavior/import_c_keywords.zig b/test/behavior/import_c_keywords.zig index 3ef952c9e6..9029dca31d 100644 --- a/test/behavior/import_c_keywords.zig +++ b/test/behavior/import_c_keywords.zig @@ -33,6 +33,7 @@ test "import c keywords" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try std.testing.expect(int == .c_keyword_variable); try std.testing.expect(long == .c_keyword_variable); diff --git a/test/behavior/incomplete_struct_param_tld.zig b/test/behavior/incomplete_struct_param_tld.zig index 4edf974dab..485156de04 100644 --- a/test/behavior/incomplete_struct_param_tld.zig +++ b/test/behavior/incomplete_struct_param_tld.zig @@ -23,6 +23,7 @@ fn foo(a: A) i32 { test "incomplete struct param top level declaration" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = A{ .b = B{ diff --git a/test/behavior/inline_switch.zig b/test/behavior/inline_switch.zig index 59dc7096b9..444697b091 100644 --- a/test/behavior/inline_switch.zig +++ b/test/behavior/inline_switch.zig @@ -5,6 +5,7 @@ const builtin = @import("builtin"); test "inline scalar prongs" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: usize = 0; switch (x) { @@ -20,6 +21,7 @@ test "inline scalar prongs" { test "inline prong ranges" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: usize = 0; _ = &x; @@ -35,6 +37,7 @@ const E = enum { a, b, c, d }; test "inline switch enums" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: E = .a; _ = &x; @@ -49,6 +52,7 @@ test "inline switch unions" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: U = .a; _ = &x; @@ -75,6 +79,7 @@ test "inline switch unions" { test "inline else bool" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a = true; _ = &a; @@ -87,6 +92,7 @@ test "inline else bool" { test "inline else error" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Err = error{ a, b, c }; var a = Err.a; @@ -100,6 +106,7 @@ test "inline else error" { test "inline else enum" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E2 = enum(u8) { a = 2, b = 3, c = 4, d = 5 }; var a: E2 = .a; @@ -113,6 +120,7 @@ test "inline else enum" { test "inline else int with gaps" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u8 = 0; _ = &a; @@ -131,6 +139,7 @@ test "inline else int with gaps" { test "inline else int all values" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u2 = 0; _ = &a; diff --git a/test/behavior/int128.zig b/test/behavior/int128.zig index 6d7b54ea31..544b38fca6 100644 --- a/test/behavior/int128.zig +++ b/test/behavior/int128.zig @@ -9,6 +9,7 @@ test "uint128" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buff: u128 = maxInt(u128); try expect(buff == maxInt(u128)); @@ -28,6 +29,7 @@ test "undefined 128 bit int" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @setRuntimeSafety(true); @@ -47,6 +49,7 @@ test "int128" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buff: i128 = -1; try expect(buff < 0 and (buff + 1) == 0); @@ -70,6 +73,7 @@ test "truncate int128" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var buff: u128 = maxInt(u128); @@ -93,6 +97,7 @@ test "shift int128" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const types = .{ u128, i128 }; inline for (types) |t| { diff --git a/test/behavior/int_comparison_elision.zig b/test/behavior/int_comparison_elision.zig index 28102ef295..2e25cef8f8 100644 --- a/test/behavior/int_comparison_elision.zig +++ b/test/behavior/int_comparison_elision.zig @@ -4,6 +4,8 @@ const maxInt = std.math.maxInt; const builtin = @import("builtin"); test "int comparison elision" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + testIntEdges(u0); testIntEdges(i0); testIntEdges(u1); diff --git a/test/behavior/int_div.zig b/test/behavior/int_div.zig index c3c32f1e9a..e9aac82240 100644 --- a/test/behavior/int_div.zig +++ b/test/behavior/int_div.zig @@ -6,6 +6,7 @@ test "integer division" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testDivision(); try comptime testDivision(); @@ -97,6 +98,7 @@ test "large integer division" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var numerator: u256 = 99999999999999999997315645440; diff --git a/test/behavior/ir_block_deps.zig b/test/behavior/ir_block_deps.zig index a46ad2d8a8..e3bb57cf89 100644 --- a/test/behavior/ir_block_deps.zig +++ b/test/behavior/ir_block_deps.zig @@ -21,6 +21,7 @@ test "ir block deps" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect((foo(1) catch unreachable) == 0); try expect((foo(2) catch unreachable) == 0); diff --git a/test/behavior/lower_strlit_to_vector.zig b/test/behavior/lower_strlit_to_vector.zig index 948d708aa7..79315e7a53 100644 --- a/test/behavior/lower_strlit_to_vector.zig +++ b/test/behavior/lower_strlit_to_vector.zig @@ -6,6 +6,7 @@ test "strlit to vector" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const strlit = "0123456789abcdef0123456789ABCDEF"; const vec_from_strlit: @Vector(32, u8) = strlit.*; diff --git a/test/behavior/math.zig b/test/behavior/math.zig index efc698c128..eaef26b804 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -12,6 +12,7 @@ const math = std.math; test "assignment operators" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var i: u32 = 0; i += 5; @@ -64,6 +65,7 @@ test "@clz" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testClz(); try comptime testClz(); @@ -82,6 +84,7 @@ test "@clz big ints" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testClzBigInts(); try comptime testClzBigInts(); @@ -103,6 +106,7 @@ test "@clz vectors" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testClzVectors(); try comptime testClzVectors(); @@ -146,6 +150,7 @@ test "@ctz" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCtz(); try comptime testCtz(); @@ -169,6 +174,7 @@ test "@ctz 128-bit integers" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCtz128(); try comptime testCtz128(); @@ -187,6 +193,7 @@ test "@ctz vectors" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) { // This regressed with LLVM 14: @@ -229,6 +236,7 @@ test "float equality" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x: f64 = 0.012; const y: f64 = x + 1.0; @@ -343,6 +351,8 @@ test "comptime_int multi-limb partial shift right" { } test "xor" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try test_xor(); try comptime test_xor(); } @@ -385,6 +395,8 @@ fn comptimeAdd(comptime a: comptime_int, comptime b: comptime_int) comptime_int } test "binary not" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try expect(comptime x: { break :x ~@as(u16, 0b1010101010101010) == 0b0101010101010101; }); @@ -407,6 +419,7 @@ test "binary not 128-bit" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(comptime x: { break :x ~@as(u128, 0x55555555_55555555_55555555_55555555) == 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa; @@ -430,6 +443,7 @@ test "division" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) { // https://github.com/ziglang/zig/issues/16846 @@ -518,6 +532,7 @@ test "division half-precision floats" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testDivisionFP16(); try comptime testDivisionFP16(); @@ -554,6 +569,8 @@ fn mod(comptime T: type, a: T, b: T) T { } test "unsigned wrapping" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try testUnsignedWrappingEval(maxInt(u32)); try comptime testUnsignedWrappingEval(maxInt(u32)); } @@ -565,6 +582,8 @@ fn testUnsignedWrappingEval(x: u32) !void { } test "signed wrapping" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try testSignedWrappingEval(maxInt(i32)); try comptime testSignedWrappingEval(maxInt(i32)); } @@ -576,6 +595,8 @@ fn testSignedWrappingEval(x: i32) !void { } test "signed negation wrapping" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try testSignedNegationWrappingEval(minInt(i16)); try comptime testSignedNegationWrappingEval(minInt(i16)); } @@ -586,6 +607,8 @@ fn testSignedNegationWrappingEval(x: i16) !void { } test "unsigned negation wrapping" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try testUnsignedNegationWrappingEval(1); try comptime testUnsignedNegationWrappingEval(1); } @@ -598,6 +621,7 @@ fn testUnsignedNegationWrappingEval(x: u16) !void { test "negation wrapping" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expectEqual(@as(u1, 1), negateWrap(u1, 1)); } @@ -611,6 +635,7 @@ fn negateWrap(comptime T: type, x: T) T { test "unsigned 64-bit division" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) { // https://github.com/ziglang/zig/issues/16846 @@ -644,6 +669,8 @@ test "bit shift a u1" { } test "truncating shift right" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try testShrTrunc(maxInt(u16)); try comptime testShrTrunc(maxInt(u16)); } @@ -658,6 +685,7 @@ test "f128" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try test_f128(); try comptime test_f128(); @@ -689,6 +717,7 @@ test "128-bit multiplication" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var a: i128 = 3; @@ -715,6 +744,7 @@ test "@addWithOverflow" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var a: u8 = 250; @@ -765,6 +795,7 @@ test "@addWithOverflow" { test "small int addition" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u2 = 0; try expect(x == 0); @@ -786,6 +817,7 @@ test "small int addition" { test "basic @mulWithOverflow" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var a: u8 = 86; @@ -818,6 +850,7 @@ test "basic @mulWithOverflow" { test "extensive @mulWithOverflow" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var a: u5 = 3; @@ -989,6 +1022,8 @@ test "extensive @mulWithOverflow" { } test "@mulWithOverflow bitsize > 32" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + // aarch64 fails on a release build of the compiler. if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -1056,6 +1091,7 @@ test "@mulWithOverflow u256" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { const const_lhs: u256 = 8035709466408580321693645878924206181189; @@ -1091,6 +1127,7 @@ test "@subWithOverflow" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var a: u8 = 1; @@ -1143,6 +1180,7 @@ test "@shlWithOverflow" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var a: u4 = 2; @@ -1250,6 +1288,7 @@ test "quad hex float literal parsing accurate" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a: f128 = 0x1.1111222233334444555566667777p+0; @@ -1345,6 +1384,8 @@ test "quad hex float literal parsing accurate" { } test "truncating shift left" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try testShlTrunc(maxInt(u16)); try comptime testShlTrunc(maxInt(u16)); } @@ -1354,6 +1395,8 @@ fn testShlTrunc(x: u16) !void { } test "exact shift left" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try testShlExact(0b00110101); try comptime testShlExact(0b00110101); @@ -1365,6 +1408,8 @@ fn testShlExact(x: u8) !void { } test "exact shift right" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try testShrExact(0b10110100); try comptime testShrExact(0b10110100); } @@ -1374,6 +1419,8 @@ fn testShrExact(x: u8) !void { } test "shift left/right on u0 operand" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn doTheTest() !void { var x: u0 = 0; @@ -1408,6 +1455,7 @@ test "remainder division" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) { // https://github.com/ziglang/zig/issues/12602 @@ -1446,6 +1494,7 @@ test "float remainder division using @rem" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime frem(f16); try comptime frem(f32); @@ -1489,6 +1538,7 @@ test "float modulo division using @mod" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime fmod(f16); try comptime fmod(f32); @@ -1531,6 +1581,7 @@ test "@round f16" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testRound(f16, 12.0); try comptime testRound(f16, 12.0); @@ -1542,6 +1593,7 @@ test "@round f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testRound(f64, 12.0); try comptime testRound(f64, 12.0); @@ -1561,6 +1613,7 @@ test "@round f80" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testRound(f80, 12.0); try comptime testRound(f80, 12.0); @@ -1573,6 +1626,7 @@ test "@round f128" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testRound(f128, 12.0); try comptime testRound(f128, 12.0); @@ -1590,6 +1644,7 @@ test "vector integer addition" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1612,6 +1667,7 @@ test "NaN comparison" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testNanEqNan(f16); try testNanEqNan(f32); @@ -1629,6 +1685,7 @@ test "NaN comparison f80" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testNanEqNan(f80); try comptime testNanEqNan(f80); @@ -1651,6 +1708,7 @@ test "vector comparison" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .avx2)) return error.SkipZigTest; @@ -1683,6 +1741,7 @@ test "signed zeros are represented properly" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1712,6 +1771,7 @@ test "absFloat" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testAbsFloat(); try comptime testAbsFloat(); @@ -1745,6 +1805,7 @@ test "@clz works on both vector and scalar inputs" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u32 = 0x1; _ = &x; diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig index a6a2e3b8e8..54973a8b3f 100644 --- a/test/behavior/maximum_minimum.zig +++ b/test/behavior/maximum_minimum.zig @@ -9,6 +9,7 @@ test "@max" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -31,6 +32,7 @@ test "@max on vectors" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; @@ -63,6 +65,7 @@ test "@min" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -85,6 +88,7 @@ test "@min for vectors" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; @@ -120,6 +124,7 @@ test "@min/max for floats" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(comptime T: type) !void { @@ -155,6 +160,8 @@ test "@min/@max on lazy values" { } test "@min/@max more than two arguments" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const x: u32 = 30; const y: u32 = 10; const z: u32 = 20; @@ -167,6 +174,7 @@ test "@min/@max more than two vector arguments" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x: @Vector(2, u32) = .{ 3, 2 }; const y: @Vector(2, u32) = .{ 4, 1 }; @@ -179,6 +187,7 @@ test "@min/@max notices bounds" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u16 = 20; const y = 30; @@ -198,6 +207,7 @@ test "@min/@max notices vector bounds" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: @Vector(2, u16) = .{ 140, 40 }; const y: @Vector(2, u64) = .{ 5, 100 }; @@ -229,6 +239,7 @@ test "@min/@max notices bounds from types" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u16 = 123; var y: u32 = 456; @@ -251,6 +262,7 @@ test "@min/@max notices bounds from vector types" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: @Vector(2, u16) = .{ 30, 67 }; var y: @Vector(2, u32) = .{ 20, 500 }; @@ -271,6 +283,7 @@ test "@min/@max notices bounds from types when comptime-known value is undef" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u32 = 1_000_000; _ = &x; @@ -291,6 +304,7 @@ test "@min/@max notices bounds from vector types when element of comptime-known if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .avx)) return error.SkipZigTest; @@ -311,6 +325,8 @@ test "@min/@max notices bounds from vector types when element of comptime-known } test "@min/@max of signed and unsigned runtime integers" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var x: i32 = -1; var y: u31 = 1; _ = .{ &x, &y }; diff --git a/test/behavior/member_func.zig b/test/behavior/member_func.zig index bb1e1e1769..1563ad7a4a 100644 --- a/test/behavior/member_func.zig +++ b/test/behavior/member_func.zig @@ -31,6 +31,7 @@ test "standard field calls" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(HasFuncs.one(0) == 1); try expect(HasFuncs.two(0) == 2); @@ -75,6 +76,7 @@ test "@field field calls" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(@field(HasFuncs, "one")(0) == 1); try expect(@field(HasFuncs, "two")(0) == 2); diff --git a/test/behavior/memcpy.zig b/test/behavior/memcpy.zig index fa9203713d..a571b1e2f7 100644 --- a/test/behavior/memcpy.zig +++ b/test/behavior/memcpy.zig @@ -7,6 +7,7 @@ test "memcpy and memset intrinsics" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testMemcpyMemset(); try comptime testMemcpyMemset(); @@ -28,6 +29,7 @@ test "@memcpy with both operands single-ptr-to-array, one is null-terminated" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testMemcpyBothSinglePtrArrayOneIsNullTerminated(); try comptime testMemcpyBothSinglePtrArrayOneIsNullTerminated(); @@ -48,6 +50,7 @@ test "@memcpy dest many pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testMemcpyDestManyPtr(); try comptime testMemcpyDestManyPtr(); @@ -70,6 +73,7 @@ test "@memcpy slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testMemcpySlice(); try comptime testMemcpySlice(); diff --git a/test/behavior/memset.zig b/test/behavior/memset.zig index 69f890e146..185c6fafe1 100644 --- a/test/behavior/memset.zig +++ b/test/behavior/memset.zig @@ -7,6 +7,7 @@ test "@memset on array pointers" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testMemsetArray(); try comptime testMemsetArray(); @@ -36,6 +37,7 @@ test "@memset on slices" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testMemsetSlice(); try comptime testMemsetSlice(); @@ -71,6 +73,7 @@ test "memset with bool element" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [5]bool = undefined; @memset(&buf, true); @@ -83,6 +86,7 @@ test "memset with 1-byte struct element" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: bool }; var buf: [5]S = undefined; @@ -96,6 +100,7 @@ test "memset with 1-byte array element" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = [1]bool; var buf: [5]A = undefined; @@ -109,6 +114,7 @@ test "memset with large array element, runtime known" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = [128]u64; var buf: [5]A = undefined; @@ -127,6 +133,7 @@ test "memset with large array element, comptime known" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = [128]u64; var buf: [5]A = undefined; @@ -144,6 +151,7 @@ test "@memset provides result type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: u32 }; @@ -162,6 +170,7 @@ test "zero keys with @memset" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Keys = struct { up: bool, diff --git a/test/behavior/merge_error_sets.zig b/test/behavior/merge_error_sets.zig index 492cb27699..b1f7f69d56 100644 --- a/test/behavior/merge_error_sets.zig +++ b/test/behavior/merge_error_sets.zig @@ -13,6 +13,7 @@ fn foo() C!void { test "merge error sets" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (foo()) { @panic("unexpected"); diff --git a/test/behavior/muladd.zig b/test/behavior/muladd.zig index 000f3fbd95..7b45cc9b72 100644 --- a/test/behavior/muladd.zig +++ b/test/behavior/muladd.zig @@ -6,6 +6,8 @@ const no_x86_64_hardware_fma_support = builtin.zig_backend == .stage2_x86_64 and !std.Target.x86.featureSetHas(builtin.cpu.features, .fma); test "@mulAdd" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (no_x86_64_hardware_fma_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO @@ -37,6 +39,7 @@ test "@mulAdd f16" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testMulAdd16(); try testMulAdd16(); @@ -57,6 +60,7 @@ test "@mulAdd f80" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testMulAdd80(); try testMulAdd80(); @@ -77,6 +81,7 @@ test "@mulAdd f128" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testMulAdd128(); try testMulAdd128(); @@ -109,6 +114,7 @@ test "vector f16" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime vector16(); try vector16(); @@ -129,6 +135,7 @@ fn vector32() !void { test "vector f32" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (no_x86_64_hardware_fma_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO @@ -153,6 +160,7 @@ fn vector64() !void { test "vector f64" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (no_x86_64_hardware_fma_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO @@ -182,6 +190,7 @@ test "vector f80" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime vector80(); try vector80(); @@ -208,6 +217,7 @@ test "vector f128" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime vector128(); try vector128(); diff --git a/test/behavior/multiple_externs_with_conflicting_types.zig b/test/behavior/multiple_externs_with_conflicting_types.zig index bd9735aee9..ac13e2bf34 100644 --- a/test/behavior/multiple_externs_with_conflicting_types.zig +++ b/test/behavior/multiple_externs_with_conflicting_types.zig @@ -16,6 +16,7 @@ test "call extern function defined with conflicting type" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @import("conflicting_externs/a.zig").issue529(null); issue529(null); diff --git a/test/behavior/namespace_depends_on_compile_var.zig b/test/behavior/namespace_depends_on_compile_var.zig index a115f557ab..cd8e42f20d 100644 --- a/test/behavior/namespace_depends_on_compile_var.zig +++ b/test/behavior/namespace_depends_on_compile_var.zig @@ -4,6 +4,7 @@ const expect = std.testing.expect; test "namespace depends on compile var" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (some_namespace.a_bool) { try expect(some_namespace.a_bool); diff --git a/test/behavior/nan.zig b/test/behavior/nan.zig index fc5ce4d0f9..e177afa9d0 100644 --- a/test/behavior/nan.zig +++ b/test/behavior/nan.zig @@ -26,6 +26,7 @@ test "nan memory equality" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // signaled try testing.expect(mem.eql(u8, mem.asBytes(&snan_u16), mem.asBytes(&snan_f16))); diff --git a/test/behavior/null.zig b/test/behavior/null.zig index ffebff6d83..323f47c896 100644 --- a/test/behavior/null.zig +++ b/test/behavior/null.zig @@ -32,6 +32,7 @@ test "test maybe object and get a pointer to the inner value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var maybe_bool: ?bool = true; @@ -52,6 +53,7 @@ test "maybe return" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try maybeReturnImpl(); try comptime maybeReturnImpl(); @@ -71,6 +73,7 @@ fn foo(x: ?i32) ?bool { test "test null runtime" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testTestNullRuntime(null); } @@ -82,6 +85,7 @@ fn testTestNullRuntime(x: ?i32) !void { test "optional void" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try optionalVoidImpl(); try comptime optionalVoidImpl(); @@ -105,6 +109,7 @@ const Empty = struct {}; test "optional struct{}" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; _ = try optionalEmptyStructImpl(); _ = try comptime optionalEmptyStructImpl(); @@ -130,6 +135,7 @@ test "null with default unwrap" { test "optional pointer to 0 bit type null value at runtime" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const EmptyStruct = struct {}; var x: ?*EmptyStruct = null; @@ -141,6 +147,7 @@ test "if var maybe pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(shouldBeAPlus1(Particle{ .a = 14, @@ -184,6 +191,7 @@ test "unwrap optional which is field of global var" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; struct_with_optional.field = null; if (struct_with_optional.field) |payload| { diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig index f370f324ea..c5fb888bc9 100644 --- a/test/behavior/optional.zig +++ b/test/behavior/optional.zig @@ -9,6 +9,7 @@ const expectEqualStrings = std.testing.expectEqualStrings; test "passing an optional integer as a parameter" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() bool { @@ -28,6 +29,7 @@ pub const EmptyStruct = struct {}; test "optional pointer to size zero struct" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var e = EmptyStruct{}; const o: ?*EmptyStruct = &e; @@ -58,6 +60,7 @@ fn testNullPtrsEql() !void { test "optional with zero-bit type" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(comptime ZeroBit: type, comptime zero_bit: ZeroBit) !void { @@ -110,6 +113,7 @@ test "address of unwrap optional" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Foo = struct { @@ -131,6 +135,7 @@ test "nested optional field in struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S2 = struct { y: u8, @@ -149,6 +154,7 @@ test "equality compare optionals and non-optionals" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -206,6 +212,7 @@ test "equality compare optionals and non-optionals" { test "compare optionals with modified payloads" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var lhs: ?bool = false; const lhs_payload = &lhs.?; @@ -233,6 +240,7 @@ test "compare optionals with modified payloads" { test "unwrap function call with optional pointer return value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -254,6 +262,7 @@ test "unwrap function call with optional pointer return value" { test "nested orelse" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -280,6 +289,7 @@ test "nested orelse" { test "self-referential struct through a slice of optional" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Node = struct { @@ -316,6 +326,7 @@ test "coerce an anon struct literal to optional struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Struct = struct { @@ -335,6 +346,7 @@ test "0-bit child type coerced to optional return ptr result location" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -360,6 +372,7 @@ test "0-bit child type coerced to optional return ptr result location" { test "0-bit child type coerced to optional" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -387,6 +400,7 @@ test "array of optional unaligned types" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Enum = enum { one, two, three }; @@ -423,6 +437,7 @@ test "optional pointer to zero bit optional payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const B = struct { fn foo(_: *@This()) void {} @@ -442,6 +457,7 @@ test "optional pointer to zero bit error union payload" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const B = struct { fn foo(_: *@This()) void {} @@ -475,6 +491,7 @@ const NoReturn = struct { test "optional of noreturn used with if" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; NoReturn.a = 64; if (NoReturn.loop()) |_| { @@ -486,6 +503,7 @@ test "optional of noreturn used with if" { test "optional of noreturn used with orelse" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; NoReturn.a = 64; const val = NoReturn.testOrelse(); @@ -505,6 +523,7 @@ test "alignment of wrapping an optional payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const I = extern struct { x: i128 }; @@ -522,6 +541,7 @@ test "Optional slice size is optimized" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(@sizeOf(?[]u8) == @sizeOf([]u8)); var a: ?[]const u8 = null; @@ -535,6 +555,7 @@ test "Optional slice passed to function" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(a: ?[]const u8) !void { @@ -551,6 +572,7 @@ test "Optional slice passed to function" { test "peer type resolution in nested if expressions" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Thing = struct { n: i32 }; var a = false; @@ -578,6 +600,7 @@ test "cast slice to const slice nested in error union and optional" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn inner() !?[]u8 { @@ -591,6 +614,8 @@ test "cast slice to const slice nested in error union and optional" { } test "variable of optional of noreturn" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var null_opv: ?noreturn = null; _ = &null_opv; try std.testing.expectEqual(@as(?noreturn, null), null_opv); @@ -600,6 +625,7 @@ test "copied optional doesn't alias source" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var opt_x: ?[3]f32 = [_]f32{0.0} ** 3; diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index b194f7ac9e..fa78c023a6 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -124,6 +124,7 @@ test "correct sizeOf and offsets in packed structs" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const PStruct = packed struct { bool_a: bool, @@ -192,6 +193,7 @@ test "nested packed structs" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S1 = packed struct { a: u8, b: u8, c: u8 }; @@ -238,6 +240,7 @@ test "regular in irregular packed struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Irregular = packed struct { bar: Regular = Regular{}, @@ -258,6 +261,7 @@ test "nested packed struct unaligned" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet const S1 = packed struct { @@ -330,6 +334,7 @@ test "byte-aligned field pointer offsets" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = packed struct { @@ -432,6 +437,7 @@ test "nested packed struct field pointers" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // ubsan unaligned pointer access + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet const S2 = packed struct { @@ -469,6 +475,7 @@ test "load pointer from packed struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = struct { index: u16, @@ -489,6 +496,7 @@ test "@intFromPtr on a packed struct field" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (native_endian != .little) return error.SkipZigTest; const S = struct { @@ -512,6 +520,7 @@ test "@intFromPtr on a packed struct field unaligned and nested" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet const S1 = packed struct { @@ -618,6 +627,8 @@ test "@intFromPtr on a packed struct field unaligned and nested" { } test "packed struct fields modification" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + // Originally reported at https://github.com/ziglang/zig/issues/16615 if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; @@ -646,6 +657,7 @@ test "optional pointer in packed struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = packed struct { ptr: ?*const u8 }; var n: u8 = 0; @@ -661,6 +673,7 @@ test "nested packed struct field access test" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Vec2 = packed struct { x: f32, @@ -777,6 +790,7 @@ test "nested packed struct field access test" { test "nested packed struct at non-zero offset" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Pair = packed struct(u24) { a: u16 = 0, @@ -810,6 +824,7 @@ test "nested packed struct at non-zero offset 2" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO packed structs larger than 64 bits if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Pair = packed struct(u40) { @@ -874,6 +889,7 @@ test "runtime init of unnamed packed struct type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var z: u8 = 123; _ = &z; @@ -890,6 +906,7 @@ test "packed struct passed to callconv(.C) function" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Packed = packed struct { @@ -938,6 +955,7 @@ test "packed struct initialized in bitcast" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = packed struct { val: u8 }; var val: u8 = 123; @@ -951,6 +969,7 @@ test "pointer to container level packed struct field" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = packed struct(u32) { test_bit: bool, @@ -975,6 +994,7 @@ test "store undefined to packed result location" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u4 = 0; _ = &x; @@ -995,6 +1015,8 @@ test "bitcast back and forth" { } test "field access of packed struct smaller than its abi size inside struct initialized with rls" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + // Originally reported at https://github.com/ziglang/zig/issues/14200 if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; @@ -1013,6 +1035,8 @@ test "field access of packed struct smaller than its abi size inside struct init } test "modify nested packed struct aligned field" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + // Originally reported at https://github.com/ziglang/zig/issues/14632 if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; @@ -1043,6 +1067,8 @@ test "modify nested packed struct aligned field" { } test "assigning packed struct inside another packed struct" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + // Originally reported at https://github.com/ziglang/zig/issues/9674 if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; @@ -1077,6 +1103,7 @@ test "packed struct used as part of anon decl name" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = packed struct { a: u0 = 0 }; var a: u8 = 0; @@ -1099,6 +1126,8 @@ test "packed struct acts as a namespace" { } test "pointer loaded correctly from packed struct" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const RAM = struct { data: [0xFFFF + 1]u8, fn new() !@This() { @@ -1142,6 +1171,7 @@ test "assignment to non-byte-aligned field in packed struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Frame = packed struct { num: u20, @@ -1164,6 +1194,7 @@ test "packed struct field pointer aligned properly" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Foo = packed struct { a: i32, @@ -1183,6 +1214,7 @@ test "load flag from packed struct in union" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = packed struct { a: bool, @@ -1251,6 +1283,7 @@ test "2-byte packed struct argument in C calling convention" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = packed struct(u16) { x: u15 = 0, diff --git a/test/behavior/packed-union.zig b/test/behavior/packed-union.zig index 55b76e2625..d76f28ae59 100644 --- a/test/behavior/packed-union.zig +++ b/test/behavior/packed-union.zig @@ -8,6 +8,7 @@ test "flags in packed union" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testFlagsInPackedUnion(); try comptime testFlagsInPackedUnion(); @@ -50,6 +51,7 @@ test "flags in packed union at offset" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testFlagsInPackedUnionAtOffset(); try comptime testFlagsInPackedUnionAtOffset(); @@ -98,6 +100,8 @@ fn testFlagsInPackedUnionAtOffset() !void { } test "packed union in packed struct" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + // Originally reported at https://github.com/ziglang/zig/issues/16581 if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; @@ -137,6 +141,7 @@ test "packed union initialized with a runtime value" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Fields = packed struct { timestamp: u50, diff --git a/test/behavior/packed_struct_explicit_backing_int.zig b/test/behavior/packed_struct_explicit_backing_int.zig index 29b8c4aa9b..35762a1b14 100644 --- a/test/behavior/packed_struct_explicit_backing_int.zig +++ b/test/behavior/packed_struct_explicit_backing_int.zig @@ -10,6 +10,7 @@ test "packed struct explicit backing integer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S1 = packed struct { a: u8, b: u8, c: u8 }; diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index 27c6403e77..bc45a978e6 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -6,6 +6,8 @@ const expect = testing.expect; const expectError = testing.expectError; test "dereference pointer" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try comptime testDerefPtr(); try testDerefPtr(); } @@ -20,6 +22,7 @@ fn testDerefPtr() !void { test "pointer arithmetic" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var ptr: [*]const u8 = "abcd"; @@ -52,6 +55,7 @@ fn PtrOf(comptime T: type) type { test "implicit cast single item pointer to C pointer and back" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var y: u8 = 11; const x: [*c]u8 = &y; @@ -68,6 +72,7 @@ test "initialize const optional C pointer to null" { test "assigning integer to C pointer" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: i32 = 0; var y: i32 = 1; @@ -85,6 +90,7 @@ test "assigning integer to C pointer" { test "C pointer comparison and arithmetic" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -154,6 +160,7 @@ test "implicit casting between C pointer and optional non-C pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var slice: []const u8 = "aoeu"; _ = &slice; @@ -170,6 +177,7 @@ test "implicit cast error unions with non-optional to optional pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -197,6 +205,7 @@ test "allowzero pointer and slice" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var ptr: [*]allowzero i32 = @ptrFromInt(0); const opt_ptr: ?[*]allowzero i32 = ptr; @@ -216,6 +225,7 @@ test "assign null directly to C pointer and test null equality" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: [*c]i32 = null; _ = &x; @@ -283,6 +293,7 @@ test "array initialization types" { test "null terminated pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -300,6 +311,7 @@ test "null terminated pointer" { test "allow any sentinel" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -315,6 +327,7 @@ test "allow any sentinel" { test "pointer sentinel with enums" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Number = enum { @@ -337,6 +350,7 @@ test "pointer sentinel with optional element" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -353,6 +367,7 @@ test "pointer sentinel with +inf" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -422,6 +437,7 @@ test "indexing array with sentinel returns correct type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var s: [:0]const u8 = "abc"; try testing.expectEqualSlices(u8, "*const u8", @typeName(@TypeOf(&s[0]))); @@ -430,6 +446,7 @@ test "indexing array with sentinel returns correct type" { test "element pointer to slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -452,6 +469,7 @@ test "element pointer to slice" { test "element pointer arithmetic to slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -476,6 +494,7 @@ test "element pointer arithmetic to slice" { test "array slicing to slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -506,6 +525,7 @@ test "ptrCast comptime known slice to C pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const s: [:0]const u8 = "foo"; var p: [*c]const u8 = @ptrCast(s); @@ -525,6 +545,7 @@ test "pointer alignment and element type include call expression" { test "pointer to array has explicit alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Base = extern struct { a: u8 }; diff --git a/test/behavior/popcount.zig b/test/behavior/popcount.zig index 261019c65f..56a2171083 100644 --- a/test/behavior/popcount.zig +++ b/test/behavior/popcount.zig @@ -8,6 +8,7 @@ test "@popCount integers" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testPopCountIntegers(); try testPopCountIntegers(); @@ -18,6 +19,7 @@ test "@popCount 128bit integer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; comptime { try expect(@popCount(@as(u128, 0b11111111000110001100010000100001000011000011100101010001)) == 24); @@ -81,6 +83,7 @@ test "@popCount vectors" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testPopCountVectors(); try testPopCountVectors(); diff --git a/test/behavior/prefetch.zig b/test/behavior/prefetch.zig index e98e848393..1f21d23001 100644 --- a/test/behavior/prefetch.zig +++ b/test/behavior/prefetch.zig @@ -3,6 +3,7 @@ const std = @import("std"); test "@prefetch()" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [2]u32 = .{ 42, 42 }; var a_len = a.len; diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig index 11afc9474a..4d191ce582 100644 --- a/test/behavior/ptrcast.zig +++ b/test/behavior/ptrcast.zig @@ -58,6 +58,7 @@ fn testReinterpretStructWrappedBytesAsInteger() !void { test "reinterpret bytes of an array into an extern struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testReinterpretBytesAsExternStruct(); try comptime testReinterpretBytesAsExternStruct(); @@ -175,6 +176,7 @@ test "reinterpret struct field at comptime" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const numNative = comptime Bytes.init(0x12345678); if (native_endian != .little) { @@ -232,6 +234,7 @@ test "implicit optional pointer to optional anyopaque pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [4]u8 = "aoeu".*; const x: ?[*]u8 = &buf; @@ -244,6 +247,7 @@ test "@ptrCast slice to slice" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(slice: []u32) []i32 { diff --git a/test/behavior/ptrfromint.zig b/test/behavior/ptrfromint.zig index 89706be891..5e4c6175c3 100644 --- a/test/behavior/ptrfromint.zig +++ b/test/behavior/ptrfromint.zig @@ -3,6 +3,8 @@ const builtin = @import("builtin"); const expectEqual = std.testing.expectEqual; test "casting integer address to function pointer" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + addressToFunction(); comptime addressToFunction(); } @@ -17,6 +19,7 @@ test "mutate through ptr initialized with constant ptrFromInt value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; forceCompilerAnalyzeBranchHardCodedPtrDereference(false); } @@ -34,6 +37,7 @@ test "@ptrFromInt creates null pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const ptr = @as(?*u32, @ptrFromInt(0)); try expectEqual(@as(?*u32, null), ptr); @@ -43,6 +47,7 @@ test "@ptrFromInt creates allowzero zero pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const ptr = @as(*allowzero u32, @ptrFromInt(0)); try expectEqual(@as(usize, 0), @intFromPtr(ptr)); diff --git a/test/behavior/pub_enum.zig b/test/behavior/pub_enum.zig index c0935b78be..c749c82efe 100644 --- a/test/behavior/pub_enum.zig +++ b/test/behavior/pub_enum.zig @@ -4,6 +4,7 @@ const expect = @import("std").testing.expect; test "pub enum" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try pubEnumTest(other.APubEnum.Two); } @@ -13,6 +14,7 @@ fn pubEnumTest(foo: other.APubEnum) !void { test "cast with imported symbol" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(@as(other.size_t, 42) == 42); } diff --git a/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig b/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig index bb6d5b1359..366730424a 100644 --- a/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig +++ b/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig @@ -8,6 +8,7 @@ test "reference a variable in an if after an if in the 2nd switch prong" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try foo(true, Num.Two, false, "aoeu"); try expect(!ok); diff --git a/test/behavior/reflection.zig b/test/behavior/reflection.zig index aea84bc45a..f07b5a512e 100644 --- a/test/behavior/reflection.zig +++ b/test/behavior/reflection.zig @@ -28,6 +28,7 @@ fn dummy(a: bool, b: i32, c: f32) i32 { test "reflection: @field" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var f = Foo{ .one = 42, diff --git a/test/behavior/return_address.zig b/test/behavior/return_address.zig index 3e8c18c04a..675e0e6191 100644 --- a/test/behavior/return_address.zig +++ b/test/behavior/return_address.zig @@ -10,6 +10,7 @@ test "return address" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; _ = retAddr(); // TODO: #14938 diff --git a/test/behavior/saturating_arithmetic.zig b/test/behavior/saturating_arithmetic.zig index 82d10d9540..843b3beaad 100644 --- a/test/behavior/saturating_arithmetic.zig +++ b/test/behavior/saturating_arithmetic.zig @@ -9,6 +9,7 @@ test "saturating add" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -57,6 +58,7 @@ test "saturating add 128bit" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -82,6 +84,7 @@ test "saturating subtraction" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -129,6 +132,7 @@ test "saturating subtraction 128bit" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -158,6 +162,7 @@ test "saturating multiplication" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .wasm32) { // https://github.com/ziglang/zig/issues/9660 @@ -203,6 +208,7 @@ test "saturating shift-left" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -243,6 +249,7 @@ test "saturating shl uses the LHS type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const lhs_const: u8 = 1; var lhs_var: u8 = 1; diff --git a/test/behavior/select.zig b/test/behavior/select.zig index 2396d8bb11..90166dcfe5 100644 --- a/test/behavior/select.zig +++ b/test/behavior/select.zig @@ -9,6 +9,7 @@ test "@select vectors" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime selectVectors(); try selectVectors(); @@ -39,6 +40,7 @@ test "@select arrays" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .avx2)) return error.SkipZigTest; diff --git a/test/behavior/shuffle.zig b/test/behavior/shuffle.zig index 58c925b8fd..fb16f3fbb3 100644 --- a/test/behavior/shuffle.zig +++ b/test/behavior/shuffle.zig @@ -7,6 +7,7 @@ test "@shuffle int" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .ssse3)) return error.SkipZigTest; @@ -54,6 +55,7 @@ test "@shuffle bool 1" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64 and builtin.os.tag == .windows) @@ -83,6 +85,7 @@ test "@shuffle bool 2" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) { // https://github.com/ziglang/zig/issues/3246 diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig index b6206df491..c050487779 100644 --- a/test/behavior/sizeof_and_typeof.zig +++ b/test/behavior/sizeof_and_typeof.zig @@ -81,6 +81,7 @@ const P = packed struct { test "@offsetOf" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // Packed structs have fixed memory layout try expect(@offsetOf(P, "a") == 0); @@ -143,6 +144,8 @@ test "@sizeOf(T) == 0 doesn't force resolving struct size" { } test "@TypeOf() has no runtime side effects" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn foo(comptime T: type, ptr: *T) T { ptr.* += 1; @@ -157,6 +160,7 @@ test "@TypeOf() has no runtime side effects" { test "branching logic inside @TypeOf" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { var data: i32 = 0; @@ -271,6 +275,7 @@ test "runtime instructions inside typeof in comptime only scope" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var y: i8 = 2; @@ -327,6 +332,7 @@ test "peer type resolution with @TypeOf doesn't trigger dependency loop check" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = struct { next: @TypeOf(null, @as(*const @This(), undefined)), @@ -408,6 +414,7 @@ test "Extern function calls, dereferences and field access in @TypeOf" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Test = struct { fn test_fn_1(a: c_long) @TypeOf(c_fopen("test", "r").*) { @@ -431,6 +438,8 @@ test "Extern function calls, dereferences and field access in @TypeOf" { } test "@sizeOf struct is resolved when used as operand of slicing" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const dummy = struct {}; const S = struct { var buf: [1]u8 = undefined; diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index bf55f4f233..8453ffc451 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -30,6 +30,7 @@ comptime { test "slicing" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var array: [20]i32 = undefined; @@ -66,6 +67,7 @@ test "comptime slice of undefined pointer of length 0" { test "implicitly cast array of size 0 to slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var msg = [_]u8{}; try assertLenIsZero(&msg); @@ -122,6 +124,7 @@ test "slice of type" { test "generic malloc free" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = memAlloc(u8, 10) catch unreachable; memFree(u8, a); @@ -173,6 +176,7 @@ test "comptime pointer cast array and then slice" { test "slicing zero length array" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const s1 = ""[0..]; const s2 = ([_]u32{})[0..]; @@ -183,6 +187,8 @@ test "slicing zero length array" { } test "slicing pointer by length" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 }; const ptr: [*]const u8 = @as([*]const u8, @ptrCast(&array)); const slice = ptr[1..][0..5]; @@ -231,6 +237,7 @@ test "runtime safety lets us slice from len..len" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var an_array = [_]u8{ 1, 2, 3 }; try expect(mem.eql(u8, sliceFromLenToLen(an_array[0..], 3, 3), "")); @@ -243,6 +250,7 @@ fn sliceFromLenToLen(a_slice: []u8, start: usize, end: usize) []u8 { test "C pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [*c]const u8 = "kjdhfkjdhfdkjhfkfjhdfkjdhfkdjhfdkjhf"; var len: u32 = 10; @@ -255,6 +263,7 @@ test "C pointer slice access" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [10]u32 = [1]u32{42} ** 10; const c_ptr = @as([*c]const u32, @ptrCast(&buf)); @@ -285,6 +294,7 @@ fn sliceSum(comptime q: []const u8) i32 { test "slice type with custom alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const LazilyResolvedType = struct { anything: i32, @@ -298,6 +308,7 @@ test "slice type with custom alignment" { test "obtaining a null terminated slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // here we have a normal array var buf: [50]u8 = undefined; @@ -342,6 +353,7 @@ test "empty array to slice" { test "@ptrCast slice to pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -357,6 +369,8 @@ test "@ptrCast slice to pointer" { } test "slice multi-pointer without end" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn doTheTest() !void { try testPointer(); @@ -394,6 +408,7 @@ test "slice syntax resulting in pointer-to-array" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -612,6 +627,7 @@ test "slice syntax resulting in pointer-to-array" { test "slice pointer-to-array null terminated" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; comptime { var array = [5:0]u8{ 1, 2, 3, 4, 5 }; @@ -630,6 +646,7 @@ test "slice pointer-to-array null terminated" { test "slice pointer-to-array zero length" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; comptime { { @@ -664,6 +681,7 @@ test "type coercion of pointer to anon struct literal to pointer to slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const U = union { @@ -720,6 +738,7 @@ test "array mult of slice gives ptr to array" { test "slice bounds in comptime concatenation" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bs = comptime blk: { const b = "........1........"; @@ -755,6 +774,7 @@ test "slice sentinel access at comptime" { test "slicing array with sentinel as end index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn do() !void { @@ -773,6 +793,7 @@ test "slicing array with sentinel as end index" { test "slicing slice with sentinel as end index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn do() !void { @@ -831,6 +852,7 @@ test "global slice field access" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { var slice: []const u8 = undefined; @@ -842,6 +864,8 @@ test "global slice field access" { } test "slice of void" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var n: usize = 10; _ = &n; var arr: [12]void = undefined; @@ -850,6 +874,8 @@ test "slice of void" { } test "slice with dereferenced value" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var a: usize = 0; const idx: *usize = &a; _ = blk: { @@ -884,6 +910,7 @@ test "empty slice ptr is non null" { test "slice decays to many pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var buf: [8]u8 = "abcdefg\x00".*; const p: [*:0]const u8 = buf[0..7 :0]; @@ -894,6 +921,7 @@ test "write through pointer to optional slice arg" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn bar(foo: *?[]const u8) !void { @@ -913,6 +941,7 @@ test "modify slice length at comptime" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const arr: [2]u8 = .{ 10, 20 }; comptime var s: []const u8 = arr[0..0]; @@ -930,6 +959,7 @@ test "slicing zero length array field of struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: [0]usize, @@ -945,6 +975,7 @@ test "slicing slices gives correct result" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const foo = "1234"; const bar = foo[0..4]; @@ -959,6 +990,7 @@ test "get address of element of zero-sized slice" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn destroy(_: *void) void {} @@ -972,6 +1004,7 @@ test "sentinel-terminated 0-length slices" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const u32s: [4]u32 = [_]u32{ 0, 1, 2, 3 }; diff --git a/test/behavior/src.zig b/test/behavior/src.zig index ebf6ab06b0..7c2b377d5b 100644 --- a/test/behavior/src.zig +++ b/test/behavior/src.zig @@ -17,6 +17,7 @@ test "@src" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try doTheTest(); } @@ -37,6 +38,8 @@ test "@src used as a comptime parameter" { } test "@src in tuple passed to anytype function" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn Foo(a: anytype) u32 { return a[0].line; diff --git a/test/behavior/string_literals.zig b/test/behavior/string_literals.zig index 01f285bf0c..1dba4c1a7f 100644 --- a/test/behavior/string_literals.zig +++ b/test/behavior/string_literals.zig @@ -8,6 +8,7 @@ const ptr_tag_name: [*:0]const u8 = tag_name; test "@tagName() returns a string literal" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try std.testing.expect(*const [13:0]u8 == @TypeOf(tag_name)); try std.testing.expect(std.mem.eql(u8, "TestEnumValue", tag_name)); @@ -21,6 +22,7 @@ const ptr_error_name: [*:0]const u8 = error_name; test "@errorName() returns a string literal" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try std.testing.expect(*const [13:0]u8 == @TypeOf(error_name)); try std.testing.expect(std.mem.eql(u8, "TestErrorCode", error_name)); @@ -34,6 +36,7 @@ const ptr_type_name: [*:0]const u8 = type_name; test "@typeName() returns a string literal" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try std.testing.expect(*const [type_name.len:0]u8 == @TypeOf(type_name)); try std.testing.expect(std.mem.eql(u8, "behavior.string_literals.TestType", type_name)); @@ -47,6 +50,7 @@ const expected_contents = "hello zig\n"; test "@embedFile() returns a string literal" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try std.testing.expect(*const [expected_contents.len:0]u8 == @TypeOf(actual_contents)); try std.testing.expect(std.mem.eql(u8, expected_contents, actual_contents)); @@ -61,6 +65,7 @@ fn testFnForSrc() std.builtin.SourceLocation { test "@src() returns a struct containing 0-terminated string slices" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const src = testFnForSrc(); try std.testing.expect([:0]const u8 == @TypeOf(src.file)); diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 5ab3b0d38d..4312612141 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -12,6 +12,7 @@ top_level_field: i32, test "top level fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var instance = @This(){ .top_level_field = 1234, @@ -68,6 +69,7 @@ const SmallStruct = struct { test "lower unnamed constants" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo = SmallStruct{ .a = 1, .b = 255 }; try expect(foo.first() == 1); @@ -91,6 +93,7 @@ test "structs" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo: StructFoo = undefined; @memset(@as([*]u8, @ptrCast(&foo))[0..@sizeOf(StructFoo)], 0); @@ -109,6 +112,7 @@ fn testMutation(foo: *StructFoo) void { test "struct byval assign" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo1: StructFoo = undefined; var foo2: StructFoo = undefined; @@ -121,6 +125,8 @@ test "struct byval assign" { } test "call struct static method" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const result = StructWithNoFields.add(3, 4); try expect(result == 7); } @@ -173,6 +179,7 @@ const MemberFnTestFoo = struct { test "call member function directly" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const instance = MemberFnTestFoo{ .x = 1234 }; const result = MemberFnTestFoo.member(instance); @@ -181,6 +188,7 @@ test "call member function directly" { test "store member function in variable" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const instance = MemberFnTestFoo{ .x = 1234 }; const memberFn = MemberFnTestFoo.member; @@ -202,6 +210,7 @@ const MemberFnRand = struct { test "return struct byval from function" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Bar = struct { x: i32, @@ -250,6 +259,7 @@ test "usingnamespace within struct scope" { test "struct field init with catch" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -293,6 +303,7 @@ const Val = struct { test "struct point to self" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var root: Node = undefined; root.val.x = 1; @@ -347,6 +358,7 @@ test "self-referencing struct via array member" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = struct { children: [1]*@This(), @@ -369,6 +381,7 @@ const EmptyStruct = struct { test "align 1 field before self referential align 8 field as slice return type" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const result = alloc(Expr); try expect(result.len == 0); @@ -393,6 +406,7 @@ test "packed struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo = APackedStruct{ .x = 1, @@ -417,6 +431,7 @@ test "packed struct 24bits" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.cpu.arch == .wasm32) return error.SkipZigTest; // TODO if (comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -463,6 +478,7 @@ test "runtime struct initialization of bitfield" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const s1 = Nibbles{ .x = x1, @@ -502,6 +518,7 @@ test "packed struct fields are ordered from LSB to MSB" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var all: u64 = 0x7765443322221111; var bytes: [8]u8 align(@alignOf(Bitfields)) = undefined; @@ -522,6 +539,7 @@ test "implicit cast packed struct field to const ptr" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const LevelUpMove = packed struct { move_id: u9, @@ -557,6 +575,7 @@ test "packed struct with non-ABI-aligned field" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = packed struct { x: u9, @@ -586,6 +605,7 @@ test "bit field access" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var data = bit_field_1; try expect(getA(&data) == 1); @@ -616,6 +636,7 @@ fn getC(data: *const BitField1) u2 { test "default struct initialization fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: i32 = 1234, @@ -642,6 +663,7 @@ test "packed array 24bits" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; comptime { try expect(@sizeOf([9]Foo32Bits) == 9 * 4); @@ -709,6 +731,7 @@ test "pointer to packed struct member in a stack variable" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = packed struct { a: u2, @@ -736,6 +759,7 @@ test "packed struct with u0 field access" { test "access to global struct fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; g_foo.bar.value = 42; try expect(g_foo.bar.value == 42); @@ -761,6 +785,7 @@ test "packed struct with fp fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = packed struct { data0: f32, @@ -788,6 +813,7 @@ test "fn with C calling convention returns struct by value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -815,6 +841,7 @@ test "non-packed struct with u128 entry in union" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = union(enum) { Num: u128, @@ -863,6 +890,7 @@ test "packed struct field passed to generic function" { test "anonymous struct literal syntax" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Point = struct { @@ -885,6 +913,8 @@ test "anonymous struct literal syntax" { } test "fully anonymous struct" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn doTheTest() !void { try dump(.{ @@ -907,6 +937,8 @@ test "fully anonymous struct" { } test "fully anonymous list literal" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn doTheTest() !void { try dump(.{ @as(u32, 1234), @as(f64, 12.34), true, "hi" }); @@ -954,6 +986,7 @@ test "tuple element initialized with fn call" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -972,6 +1005,7 @@ test "struct with union field" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Value = struct { ref: u32 = 2, @@ -993,6 +1027,7 @@ test "struct with 0-length union array field" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = union { a: u32, @@ -1013,6 +1048,7 @@ test "type coercion of anon struct literal to struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const S2 = struct { @@ -1052,6 +1088,7 @@ test "type coercion of pointer to anon struct literal to pointer to struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const S2 = struct { @@ -1091,6 +1128,7 @@ test "packed struct with undefined initializers" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const P = packed struct { @@ -1120,6 +1158,7 @@ test "packed struct with undefined initializers" { test "for loop over pointers to struct, getting field from struct pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Foo = struct { @@ -1160,6 +1199,7 @@ test "anon init through error unions and optionals" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: u32, @@ -1187,6 +1227,7 @@ test "anon init through optional" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: u32, @@ -1207,6 +1248,7 @@ test "anon init through error union" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: u32, @@ -1226,6 +1268,7 @@ test "typed init through error unions and optionals" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: u32, @@ -1260,6 +1303,7 @@ test "initialize struct with empty literal" { test "loading a struct pointer perfoms a copy" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: i32, @@ -1288,6 +1332,7 @@ test "packed struct aggregate init" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(a: i2, b: i6) u8 { @@ -1347,6 +1392,7 @@ test "store to comptime field" { test "struct field init value is size of the struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const namespace = struct { const S = extern struct { @@ -1364,6 +1410,7 @@ test "under-aligned struct field" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = extern union { fd: i32, @@ -1386,6 +1433,7 @@ test "fieldParentPtr of a zero-bit field" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn testStruct(comptime A: type) !void { @@ -1444,6 +1492,8 @@ test "struct field has a pointer to an aligned version of itself" { } test "struct has only one reference" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn optionalStructParam(_: ?struct { x: u8 }) void {} fn errorUnionStructParam(_: error{}!struct { x: u8 }) void {} @@ -1493,6 +1543,7 @@ test "no dependency loop on pointer to optional struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = struct { b: B }; @@ -1514,6 +1565,7 @@ test "discarded struct initialization works as expected" { test "function pointer in struct returns the struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = struct { const A = @This(); @@ -1549,6 +1601,7 @@ test "no dependency loop on optional field wrapped in generic function" { test "optional field init with tuple" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: ?struct { b: u32 }, @@ -1563,6 +1616,7 @@ test "optional field init with tuple" { test "if inside struct init inside if" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const MyStruct = struct { x: u32 }; const b: u32 = 5; @@ -1652,6 +1706,7 @@ test "struct field pointer has correct alignment" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1682,6 +1737,7 @@ test "extern struct field pointer has correct alignment" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1719,6 +1775,8 @@ test "extern struct field pointer has correct alignment" { } test "packed struct field in anonymous struct" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const T = packed struct { f1: bool = false, }; @@ -1730,6 +1788,8 @@ fn countFields(v: anytype) usize { } test "struct init with no result pointer sets field result types" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { // A function parameter has a result type, but no result pointer. fn f(s: struct { x: u32 }) u32 { @@ -1744,6 +1804,8 @@ test "struct init with no result pointer sets field result types" { } test "runtime side-effects in comptime-known struct init" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var side_effects: u4 = 0; const S = struct { a: u4, b: u4, c: u4, d: u4 }; const init = S{ @@ -1769,6 +1831,8 @@ test "runtime side-effects in comptime-known struct init" { } test "pointer to struct initialized through reference to anonymous initializer provides result types" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { a: u8, b: u16, c: *const anyopaque }; var my_u16: u16 = 0xABCD; _ = &my_u16; @@ -1804,6 +1868,8 @@ test "comptimeness of optional and error union payload is analyzed properly" { } test "initializer uses own alignment" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { x: u32 = @alignOf(@This()) + 1, }; @@ -1815,6 +1881,8 @@ test "initializer uses own alignment" { } test "initializer uses own size" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { x: u32 = @sizeOf(@This()) + 1, }; @@ -1826,6 +1894,8 @@ test "initializer uses own size" { } test "initializer takes a pointer to a variable inside its struct" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const namespace = struct { const S = struct { s: *S = &S.instance, @@ -1844,6 +1914,8 @@ test "initializer takes a pointer to a variable inside its struct" { } test "circular dependency through pointer field of a struct" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { const StructInner = extern struct { outer: StructOuter = std.mem.zeroes(StructOuter), @@ -1865,6 +1937,8 @@ test "circular dependency through pointer field of a struct" { } test "field calls do not force struct field init resolution" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { x: u32 = blk: { _ = @TypeOf(make().dummyFn()); // runtime field call - S not fully resolved - dummyFn call should not force field init resolution @@ -1895,6 +1969,7 @@ test "extern struct fields are aligned to 1" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Foo = extern struct { a: u8 align(1), @@ -1914,6 +1989,7 @@ test "assign to slice.len of global variable" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const allocator = std.testing.allocator; @@ -1964,6 +2040,7 @@ test "runtime call in nested initializer" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Holder = struct { array: []const u8, @@ -1996,6 +2073,7 @@ test "runtime value in nested initializer passed as pointer to function" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Bar = struct { b: u32, @@ -2023,6 +2101,7 @@ test "struct field default value is a call" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Z = packed struct { a: u32, @@ -2055,6 +2134,7 @@ test "struct field default value is a call" { test "aggregate initializers should allow initializing comptime fields, verifying equality" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u32 = 15; _ = &x; @@ -2070,6 +2150,7 @@ test "assignment of field with padding" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Mesh = extern struct { id: u32, @@ -2100,6 +2181,7 @@ test "initiate global variable with runtime value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { field: i32, @@ -2118,6 +2200,7 @@ test "initiate global variable with runtime value" { test "struct containing optional pointer to array of @This()" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: ?*const [1]@This(), diff --git a/test/behavior/struct_contains_null_ptr_itself.zig b/test/behavior/struct_contains_null_ptr_itself.zig index d0cb3ef443..d3dacc50cd 100644 --- a/test/behavior/struct_contains_null_ptr_itself.zig +++ b/test/behavior/struct_contains_null_ptr_itself.zig @@ -5,6 +5,7 @@ const builtin = @import("builtin"); test "struct contains null pointer which contains original struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: ?*NodeLineComment = null; _ = &x; diff --git a/test/behavior/struct_contains_slice_of_itself.zig b/test/behavior/struct_contains_slice_of_itself.zig index adb1c31047..6f6d829567 100644 --- a/test/behavior/struct_contains_slice_of_itself.zig +++ b/test/behavior/struct_contains_slice_of_itself.zig @@ -13,6 +13,7 @@ const NodeAligned = struct { test "struct contains slice of itself" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var other_nodes = [_]Node{ Node{ @@ -53,6 +54,7 @@ test "struct contains slice of itself" { test "struct contains aligned slice of itself" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var other_nodes = [_]NodeAligned{ NodeAligned{ diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index c36b4a520d..78365e8763 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -7,6 +7,7 @@ const expectEqual = std.testing.expectEqual; test "switch with numbers" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSwitchWithNumbers(13); } @@ -22,6 +23,7 @@ fn testSwitchWithNumbers(x: u32) !void { test "switch with all ranges" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(testSwitchWithAllRanges(50, 3) == 1); try expect(testSwitchWithAllRanges(101, 0) == 2); @@ -55,6 +57,7 @@ test "implicit comptime switch" { test "switch on enum" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const fruit = Fruit.Orange; nonConstSwitchOnEnum(fruit); @@ -74,6 +77,7 @@ fn nonConstSwitchOnEnum(fruit: Fruit) void { test "switch statement" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try nonConstSwitch(SwitchStatementFoo.C); } @@ -90,6 +94,7 @@ const SwitchStatementFoo = enum { A, B, C, D }; test "switch with multiple expressions" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x = switch (returnsFive()) { 1, 2, 3 => 1, @@ -118,6 +123,7 @@ fn trueIfBoolFalseOtherwise(comptime T: type) bool { test "switching on booleans" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSwitchOnBools(); try comptime testSwitchOnBools(); @@ -173,6 +179,7 @@ test "undefined.u0" { test "switch with disjoint range" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var q: u8 = 0; _ = &q; @@ -184,6 +191,8 @@ test "switch with disjoint range" { } test "switch variable for range and multiple prongs" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn doTheTest() !void { try doTheSwitch(16); @@ -215,6 +224,7 @@ fn poll() void { test "switch on global mutable var isn't constant-folded" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; while (state < 2) { poll(); @@ -231,6 +241,7 @@ test "switch prong with variable" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try switchProngWithVarFn(SwitchProngWithVarEnum{ .One = 13 }); try switchProngWithVarFn(SwitchProngWithVarEnum{ .Two = 13.0 }); @@ -255,6 +266,7 @@ test "switch on enum using pointer capture" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSwitchEnumPtrCapture(); try comptime testSwitchEnumPtrCapture(); @@ -274,6 +286,7 @@ fn testSwitchEnumPtrCapture() !void { test "switch handles all cases of number" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSwitchHandleAllCases(); try comptime testSwitchHandleAllCases(); @@ -315,6 +328,7 @@ test "switch on union with some prongs capturing" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const X = union(enum) { a, @@ -368,6 +382,7 @@ test "anon enum literal used in switch on union enum" { test "switch all prongs unreachable" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testAllProngsUnreachable(); try comptime testAllProngsUnreachable(); @@ -391,6 +406,7 @@ fn switchWithUnreachable(x: i32) i32 { test "capture value of switch with all unreachable prongs" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x = return_a_number() catch |err| switch (err) { else => unreachable, @@ -404,6 +420,7 @@ fn return_a_number() anyerror!i32 { test "switch on integer with else capturing expr" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -425,6 +442,7 @@ test "else prong of switch on error set excludes other cases" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -460,6 +478,7 @@ test "switch prongs with error set cases make a new error set type for capture v if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -494,6 +513,7 @@ test "switch prongs with error set cases make a new error set type for capture v test "return result loc and then switch with range implicit casted to error union" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -514,6 +534,7 @@ test "switch with null and T peer types and inferred result location type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(c: u8) !void { @@ -534,6 +555,7 @@ test "switch prongs with cases with identical payload types" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Union = union(enum) { A: usize, @@ -640,6 +662,7 @@ test "switch prong pointer capture alignment" { test "switch on pointer type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const X = struct { @@ -688,6 +711,7 @@ test "switch capture copies its payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -711,6 +735,7 @@ test "switch capture copies its payload" { test "capture of integer forwards the switch condition directly" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(x: u8) !void { @@ -732,6 +757,7 @@ test "capture of integer forwards the switch condition directly" { test "enum value without tag name used as switch item" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = enum(u32) { a = 1, @@ -749,6 +775,8 @@ test "enum value without tag name used as switch item" { } test "switch item sizeof" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn doTheTest() !void { var a: usize = 0; @@ -777,6 +805,8 @@ test "comptime inline switch" { } test "switch capture peer type resolution" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const U = union(enum) { a: u32, b: u64, @@ -792,6 +822,8 @@ test "switch capture peer type resolution" { } test "switch capture peer type resolution for in-memory coercible payloads" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const T1 = c_int; const T2 = @Type(@typeInfo(T1)); @@ -813,6 +845,7 @@ test "switch capture peer type resolution for in-memory coercible payloads" { test "switch pointer capture peer type resolution" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T1 = c_int; const T2 = @Type(@typeInfo(T1)); @@ -840,6 +873,8 @@ test "switch pointer capture peer type resolution" { } test "inline switch range that includes the maximum value of the switched type" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const inputs: [3]u8 = .{ 0, 254, 255 }; for (inputs) |input| { switch (input) { @@ -850,6 +885,8 @@ test "inline switch range that includes the maximum value of the switched type" } test "nested break ignores switch conditions and breaks instead" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn register_to_address(ident: []const u8) !u8 { const reg: u8 = if (std.mem.eql(u8, ident, "zero")) 0x00 else blk: { @@ -870,6 +907,7 @@ test "nested break ignores switch conditions and breaks instead" { test "peer type resolution on switch captures ignores unused payload bits" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Foo = union(enum) { a: u32, @@ -895,6 +933,7 @@ test "peer type resolution on switch captures ignores unused payload bits" { test "switch prong captures range" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn a(b: []u3, c: u3) void { @@ -912,6 +951,8 @@ test "switch prong captures range" { } test "prong with inline call to unreachable" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const U = union(enum) { void: void, bool: bool, @@ -929,6 +970,8 @@ test "prong with inline call to unreachable" { } test "block error return trace index is reset between prongs" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn returnError() error{TestFailed} { return error.TestFailed; diff --git a/test/behavior/switch_on_captured_error.zig b/test/behavior/switch_on_captured_error.zig index 6e70c851b1..a4bdc8755f 100644 --- a/test/behavior/switch_on_captured_error.zig +++ b/test/behavior/switch_on_captured_error.zig @@ -8,6 +8,7 @@ const builtin = @import("builtin"); test "switch on error union catch capture" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Error = error{ A, B, C }; @@ -302,6 +303,7 @@ test "switch on error union catch capture" { test "switch on error union if else capture" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Error = error{ A, B, C }; diff --git a/test/behavior/switch_prong_err_enum.zig b/test/behavior/switch_prong_err_enum.zig index 15d366d04f..8d622ed4d4 100644 --- a/test/behavior/switch_prong_err_enum.zig +++ b/test/behavior/switch_prong_err_enum.zig @@ -24,6 +24,7 @@ test "switch prong returns error enum" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; switch (doThing(17) catch unreachable) { FormValue.Address => |payload| { diff --git a/test/behavior/switch_prong_implicit_cast.zig b/test/behavior/switch_prong_implicit_cast.zig index 54107bb6bd..2281ddd448 100644 --- a/test/behavior/switch_prong_implicit_cast.zig +++ b/test/behavior/switch_prong_implicit_cast.zig @@ -18,6 +18,7 @@ test "switch prong implicit cast" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const result = switch (foo(2) catch unreachable) { FormValue.One => false, diff --git a/test/behavior/this.zig b/test/behavior/this.zig index 3f8fe13316..c8e1459ec8 100644 --- a/test/behavior/this.zig +++ b/test/behavior/this.zig @@ -21,12 +21,15 @@ fn add(x: i32, y: i32) i32 { } test "this refer to module call private fn" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try expect(module.add(1, 2) == 3); } test "this refer to container" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var pt: Point(i32) = undefined; pt.x = 12; @@ -50,6 +53,7 @@ test "this used as optional function parameter" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var global: State = undefined; global.enter = prev; diff --git a/test/behavior/threadlocal.zig b/test/behavior/threadlocal.zig index f91e10d12d..87daebda78 100644 --- a/test/behavior/threadlocal.zig +++ b/test/behavior/threadlocal.zig @@ -6,6 +6,7 @@ test "thread local variable" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) { .x86_64, .x86 => {}, else => return error.SkipZigTest, @@ -28,6 +29,7 @@ test "pointer to thread local array" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) { .x86_64, .x86 => {}, else => return error.SkipZigTest, @@ -45,6 +47,7 @@ test "reference a global threadlocal variable" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) { .x86_64, .x86 => {}, else => return error.SkipZigTest, diff --git a/test/behavior/truncate.zig b/test/behavior/truncate.zig index 267d291d48..404b9f6d71 100644 --- a/test/behavior/truncate.zig +++ b/test/behavior/truncate.zig @@ -69,6 +69,7 @@ test "truncate on vectors" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { diff --git a/test/behavior/try.zig b/test/behavior/try.zig index cc76658e93..e8ab96e5c9 100644 --- a/test/behavior/try.zig +++ b/test/behavior/try.zig @@ -4,6 +4,7 @@ const expect = std.testing.expect; test "try on error union" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try tryOnErrorUnionImpl(); try comptime tryOnErrorUnionImpl(); @@ -51,6 +52,7 @@ test "`try`ing an if/else expression" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn getError() !void { diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig index 2b52df45a1..ab407f8e60 100644 --- a/test/behavior/tuple.zig +++ b/test/behavior/tuple.zig @@ -10,6 +10,7 @@ test "tuple concatenation" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -55,6 +56,7 @@ test "more tuple concatenation" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = struct { fn consume_tuple(tuple: anytype, len: usize) !void { @@ -131,6 +133,7 @@ test "tuple initializer for var" { test "array-like initializer for tuple types" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = @Type(.{ .Struct = .{ @@ -218,6 +221,7 @@ test "fieldParentPtr of tuple" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u32 = 0; _ = &x; @@ -229,6 +233,7 @@ test "fieldParentPtr of anon struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u32 = 0; _ = &x; @@ -253,6 +258,7 @@ test "offsetOf anon struct" { test "initializing tuple with mixed comptime-runtime fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u32 = 15; _ = &x; @@ -265,6 +271,7 @@ test "initializing tuple with mixed comptime-runtime fields" { test "initializing anon struct with mixed comptime-runtime fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u32 = 15; _ = &x; @@ -278,6 +285,7 @@ test "tuple in tuple passed to generic function" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn pair(x: f32, y: f32) std.meta.Tuple(&.{ f32, f32 }) { @@ -297,6 +305,7 @@ test "coerce tuple to tuple" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = std.meta.Tuple(&.{u8}); const S = struct { @@ -311,6 +320,7 @@ test "tuple type with void field" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = std.meta.Tuple(&[_]type{void}); const x = T{{}}; @@ -318,6 +328,8 @@ test "tuple type with void field" { } test "zero sized struct in tuple handled correctly" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const State = struct { const Self = @This(); data: @Type(.{ @@ -347,6 +359,7 @@ test "zero sized struct in tuple handled correctly" { test "tuple type with void field and a runtime field" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = std.meta.Tuple(&[_]type{ usize, void }); var t: T = .{ 5, {} }; @@ -358,6 +371,7 @@ test "branching inside tuple literal" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn foo(a: anytype) !void { @@ -373,6 +387,7 @@ test "tuple initialized with a runtime known value" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = union(enum) { e: []const u8 }; const W = union(enum) { w: E }; @@ -387,6 +402,7 @@ test "tuple of struct concatenation and coercion to array" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const StructWithDefault = struct { value: f32 = 42 }; const SomeStruct = struct { array: [4]StructWithDefault }; @@ -401,6 +417,7 @@ test "nested runtime conditionals in tuple initializer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var data: u8 = 0; _ = &data; @@ -436,6 +453,7 @@ test "tuple pointer is indexable" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { u32, bool }; @@ -459,6 +477,7 @@ test "coerce anon tuple to tuple" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u8 = 1; var y: u16 = 2; @@ -493,6 +512,7 @@ test "tuple with runtime value coerced into a slice with a sentinel" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn f(a: [:null]const ?u8) !void { @@ -562,6 +582,8 @@ test "comptime fields in tuple can be initialized" { } test "tuple default values" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const T = struct { usize, usize = 123, diff --git a/test/behavior/tuple_declarations.zig b/test/behavior/tuple_declarations.zig index e6d5d76fc8..dc9214e7bb 100644 --- a/test/behavior/tuple_declarations.zig +++ b/test/behavior/tuple_declarations.zig @@ -7,6 +7,7 @@ const expectEqualStrings = testing.expectEqualStrings; test "tuple declaration type info" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { const T = struct { comptime u32 align(2) = 1, []const u8 }; @@ -35,6 +36,7 @@ test "tuple declaration type info" { test "Tuple declaration usage" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = struct { u32, []const u8 }; var t: T = .{ 1, "foo" }; diff --git a/test/behavior/type.zig b/test/behavior/type.zig index ba29640774..bf1b8a76f4 100644 --- a/test/behavior/type.zig +++ b/test/behavior/type.zig @@ -203,6 +203,7 @@ test "Type.Opaque" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Opaque = @Type(.{ .Opaque = .{ @@ -260,6 +261,7 @@ test "Type.Struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = @Type(@typeInfo(struct { x: u8, y: u32 })); const infoA = @typeInfo(A).Struct; @@ -347,6 +349,7 @@ test "Type.Struct" { test "Type.Enum" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Foo = @Type(.{ .Enum = .{ @@ -383,6 +386,7 @@ test "Type.Union" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Untagged = @Type(.{ .Union = .{ diff --git a/test/behavior/type_info.zig b/test/behavior/type_info.zig index b650248e42..805f24bc18 100644 --- a/test/behavior/type_info.zig +++ b/test/behavior/type_info.zig @@ -161,6 +161,7 @@ test "type info: error set, error union info, anyerror" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testErrorSet(); try comptime testErrorSet(); @@ -192,6 +193,7 @@ test "type info: error set single value" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const TestSet = error.One; @@ -205,6 +207,7 @@ test "type info: error set merged" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const TestSet = error{ One, Two } || error{Three}; @@ -220,6 +223,7 @@ test "type info: enum info" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testEnum(); try comptime testEnum(); @@ -282,6 +286,7 @@ fn testUnion() !void { test "type info: struct info" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testStruct(); try comptime testStruct(); @@ -530,6 +535,7 @@ test "type info for async frames" { test "Declarations are returned in declaration order" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { pub const a = 1; @@ -552,6 +558,7 @@ test "Struct.is_tuple for anon list literal" { test "Struct.is_tuple for anon struct literal" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const info = @typeInfo(@TypeOf(.{ .a = 0 })); try expect(!info.Struct.is_tuple); @@ -565,6 +572,8 @@ test "StructField.is_comptime" { } test "typeInfo resolves usingnamespace declarations" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const A = struct { pub const f1 = 42; }; @@ -590,6 +599,7 @@ test "value from struct @typeInfo default_value can be loaded at comptime" { test "@typeInfo decls and usingnamespace" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = struct { pub const x = 5; @@ -630,6 +640,8 @@ test "type info of tuple of string literal default value" { } test "@typeInfo only contains pub decls" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const other = struct { const std = @import("std"); diff --git a/test/behavior/typename.zig b/test/behavior/typename.zig index c3eefc8de7..e5ebbb6f47 100644 --- a/test/behavior/typename.zig +++ b/test/behavior/typename.zig @@ -16,6 +16,7 @@ test "anon fn param" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/9339 try expectEqualStringsIgnoreDigits( @@ -41,6 +42,7 @@ test "anon field init" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Foo = .{ .T1 = struct {}, @@ -66,6 +68,7 @@ test "basic" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expectEqualStrings("i64", @typeName(i64)); try expectEqualStrings("*usize", @typeName(*usize)); @@ -87,6 +90,7 @@ test "top level decl" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expectEqualStrings( "behavior.typename.A_Struct", @@ -136,6 +140,7 @@ test "fn param" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/675 try expectEqualStrings( @@ -215,6 +220,7 @@ test "local variable" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Foo = struct { a: u32 }; const Bar = union { a: u32 }; @@ -233,6 +239,7 @@ test "comptime parameters not converted to anytype in function type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = fn (fn (type) void, void) void; try expectEqualStrings("fn (comptime fn (comptime type) void, void) void", @typeName(T)); @@ -242,6 +249,7 @@ test "anon name strategy used in sub expression" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn getTheName() []const u8 { diff --git a/test/behavior/undefined.zig b/test/behavior/undefined.zig index adda49cfe0..e8733778e2 100644 --- a/test/behavior/undefined.zig +++ b/test/behavior/undefined.zig @@ -48,6 +48,7 @@ test "assign undefined to struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; comptime { var foo: Foo = undefined; @@ -65,6 +66,7 @@ test "assign undefined to struct with method" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; comptime { var foo: Foo = undefined; @@ -80,6 +82,7 @@ test "assign undefined to struct with method" { test "type name of undefined" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x = undefined; try expect(mem.eql(u8, @typeName(@TypeOf(x)), "@TypeOf(undefined)")); @@ -91,6 +94,7 @@ test "reslice of undefined global var slice" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var stack_buf: [100]u8 = [_]u8{0} ** 100; buf = &stack_buf; @@ -103,6 +107,7 @@ test "returned undef is 0xaa bytes when runtime safety is enabled" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Rect = struct { x: f32, diff --git a/test/behavior/underscore.zig b/test/behavior/underscore.zig index 66b49e52d5..a53fec489b 100644 --- a/test/behavior/underscore.zig +++ b/test/behavior/underscore.zig @@ -8,6 +8,7 @@ test "ignore lval with underscore" { test "ignore lval with underscore (while loop)" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; while (optionalReturnError()) |_| { while (optionalReturnError()) |_| { diff --git a/test/behavior/union.zig b/test/behavior/union.zig index b652d52896..cafaeed953 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -15,6 +15,7 @@ test "basic unions with floats" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo = FooWithFloats{ .int = 1 }; try expect(foo.int == 1); @@ -30,6 +31,7 @@ test "init union with runtime value - floats" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo: FooWithFloats = undefined; @@ -41,6 +43,7 @@ test "basic unions" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo = Foo{ .int = 1 }; try expect(foo.int == 1); @@ -59,6 +62,7 @@ test "init union with runtime value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo: Foo = undefined; @@ -97,6 +101,7 @@ const FooExtern = extern union { test "basic extern unions" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var foo = FooExtern{ .int = 1 }; try expect(foo.int == 1); @@ -168,6 +173,7 @@ test "constant tagged union with payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var empty = TaggedUnionWithPayload{ .Empty = {} }; var full = TaggedUnionWithPayload{ .Full = 13 }; @@ -218,6 +224,7 @@ test "union with specified enum tag" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try doTest(); try comptime doTest(); @@ -228,6 +235,7 @@ test "packed union generates correctly aligned type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = packed union { f1: *const fn () error{TestUnexpectedResult}!void, @@ -268,6 +276,7 @@ test "comparison between union and enum literal" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testComparison(); try comptime testComparison(); @@ -283,6 +292,7 @@ test "cast union to tag type of union" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCastUnionToTag(); try comptime testCastUnionToTag(); @@ -304,6 +314,7 @@ test "cast tag type of union to union" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: Value2 = Letter2.B; _ = &x; @@ -320,6 +331,7 @@ test "implicit cast union to its tag type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: Value2 = Letter2.B; _ = &x; @@ -341,6 +353,7 @@ test "constant packed union" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testConstPackedUnion(&[_]PackThis{PackThis{ .StringLiteral = 1 }}); } @@ -405,6 +418,7 @@ test "tagged union initialization with runtime void" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(testTaggedUnionInit({})); } @@ -472,6 +486,7 @@ test "global union with single field is correctly initialized" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; glbl = Foo1{ .f = @typeInfo(Foo1).Union.fields[0].type{ .x = 123 }, @@ -490,6 +505,7 @@ test "initialize global array of union" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; glbl_array[1] = FooUnion{ .U1 = 2 }; glbl_array[0] = FooUnion{ .U0 = 1 }; @@ -501,6 +517,7 @@ test "update the tag value for zero-sized unions" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = union(enum) { U0: void, @@ -529,6 +546,7 @@ test "union initializer generates padding only if needed" { test "runtime tag name with single field" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = union(enum) { A: i32, @@ -605,6 +623,7 @@ test "tagged union as return value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; switch (returnAnInt(13)) { TaggedFoo.One => |value| try expect(value == 13), @@ -620,6 +639,7 @@ test "tagged union with all void fields but a meaningful tag" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const B = union(enum) { @@ -648,6 +668,7 @@ test "union(enum(u32)) with specified and unspecified tag values" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; comptime assert(Tag(Tag(MultipleChoice2)) == u32); try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 }); @@ -740,6 +761,7 @@ test "@intFromEnum works on unions" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Bar = union(enum) { A: bool, @@ -799,6 +821,7 @@ test "return union init with void payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry() !void { @@ -823,6 +846,7 @@ test "@unionInit stored to a const" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const U = union(enum) { @@ -853,6 +877,7 @@ test "@unionInit can modify a union type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const UnionInitEnum = union(enum) { Boolean: bool, @@ -876,6 +901,7 @@ test "@unionInit can modify a pointer value" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const UnionInitEnum = union(enum) { Boolean: bool, @@ -931,6 +957,7 @@ test "anonymous union literal syntax" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Number = union { @@ -958,6 +985,7 @@ test "function call result coerces from tagged union to the tag" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Arch = union(enum) { @@ -993,6 +1021,7 @@ test "cast from anonymous struct to union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const U = union(enum) { @@ -1027,6 +1056,7 @@ test "cast from pointer to anonymous struct to pointer to union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const U = union(enum) { @@ -1061,6 +1091,7 @@ test "switching on non exhaustive union" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const E = enum(u8) { @@ -1120,6 +1151,7 @@ test "@unionInit on union with tag but no fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Type = enum(u8) { no_op = 105 }; @@ -1169,6 +1201,7 @@ test "global variable struct contains union initialized to non-most-aligned fiel if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = struct { const U = union(enum) { @@ -1195,6 +1228,7 @@ test "union with no result loc initiated with a runtime value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = union { a: u32, @@ -1212,6 +1246,7 @@ test "union with a large struct field" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: [8]usize, @@ -1246,6 +1281,7 @@ test "union tag is set when initiated as a temporary value at runtime" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = union(enum) { a, @@ -1266,6 +1302,7 @@ test "extern union most-aligned field is smaller" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = extern union { in6: extern struct { @@ -1285,6 +1322,7 @@ test "return an extern union from C calling convention" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const namespace = struct { const S = extern struct { @@ -1316,6 +1354,7 @@ test "noreturn field in union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = union(enum) { a: u32, @@ -1367,6 +1406,7 @@ test "@unionInit uses tag value instead of field index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = enum(u8) { b = 255, @@ -1396,6 +1436,7 @@ test "union field ptr - zero sized payload" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = union { foo: void, @@ -1410,6 +1451,7 @@ test "union field ptr - zero sized field" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = union { foo: void, @@ -1425,6 +1467,7 @@ test "packed union in packed struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = packed struct { nested: packed union { @@ -1476,6 +1519,7 @@ test "no dependency loop when function pointer in union returns the union" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = union(enum) { const U = @This(); @@ -1496,6 +1540,7 @@ test "no dependency loop when function pointer in union returns the union" { test "union reassignment can use previous value" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = union { a: u32, @@ -1511,6 +1556,7 @@ test "packed union with zero-bit field" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = packed struct { nested: packed union { @@ -1547,6 +1593,7 @@ test "reinterpreting enum value inside packed union" { test "access the tag of a global tagged union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = union(enum) { a, @@ -1558,6 +1605,7 @@ test "access the tag of a global tagged union" { test "coerce enum literal to union in result loc" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = union(enum) { a, @@ -1578,6 +1626,7 @@ test "defined-layout union field pointer has correct alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(comptime U: type) !void { @@ -1613,6 +1662,7 @@ test "undefined-layout union field pointer has correct alignment" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(comptime U: type) !void { @@ -1648,6 +1698,7 @@ test "packed union field pointer has correct alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = packed union { x: u20 }; const S = packed struct(u24) { a: u2, u: U, b: u2 }; @@ -1675,6 +1726,7 @@ test "packed union field pointer has correct alignment" { test "union with 128 bit integer" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const ValueTag = enum { int, other }; @@ -1698,6 +1750,7 @@ test "union with 128 bit integer" { test "memset extern union" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = extern union { foo: u8, @@ -1719,6 +1772,7 @@ test "memset extern union" { test "memset packed union" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = packed union { a: u32, @@ -1819,6 +1873,7 @@ test "reinterpret extern union" { test "reinterpret packed union" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = packed union { foo: u8, @@ -1891,6 +1946,7 @@ test "reinterpret packed union" { test "reinterpret packed union inside packed struct" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = packed union { a: u7, @@ -1927,6 +1983,8 @@ test "reinterpret packed union inside packed struct" { } test "inner struct initializer uses union layout" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const namespace = struct { const U = union { a: struct { @@ -1952,6 +2010,7 @@ test "inner struct initializer uses union layout" { test "inner struct initializer uses packed union layout" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const namespace = struct { const U = packed union { @@ -1978,6 +2037,7 @@ test "inner struct initializer uses packed union layout" { test "extern union initialized via reintepreted struct field initializer" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd }; @@ -1997,6 +2057,7 @@ test "extern union initialized via reintepreted struct field initializer" { test "packed union initialized via reintepreted struct field initializer" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd }; @@ -2017,6 +2078,7 @@ test "packed union initialized via reintepreted struct field initializer" { test "store of comptime reinterpreted memory to extern union" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd }; @@ -2039,6 +2101,7 @@ test "store of comptime reinterpreted memory to extern union" { test "store of comptime reinterpreted memory to packed union" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd }; @@ -2061,6 +2124,7 @@ test "store of comptime reinterpreted memory to packed union" { test "union field is a pointer to an aligned version of itself" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = union { next: *align(1) @This(), @@ -2072,6 +2136,8 @@ test "union field is a pointer to an aligned version of itself" { } test "pass register-sized field as non-register-sized union" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { fn taggedUnion(u: union(enum) { x: usize, y: [2]usize }) !void { try expectEqual(@as(usize, 42), u.x); @@ -2095,6 +2161,7 @@ test "pass register-sized field as non-register-sized union" { test "circular dependency through pointer field of a union" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const UnionInner = extern struct { @@ -2117,6 +2184,8 @@ test "circular dependency through pointer field of a union" { } test "pass nested union with rls" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const Union = union(enum) { a: u32, b: union(enum) { @@ -2139,6 +2208,7 @@ test "runtime union init, most-aligned field != largest" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = union(enum) { x: u128, @@ -2163,6 +2233,7 @@ test "copied union field doesn't alias source" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = union(enum) { array: [10]u32, @@ -2183,6 +2254,7 @@ test "create union(enum) from other union(enum)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const string = "hello world"; const TempRef = struct { diff --git a/test/behavior/union_with_members.zig b/test/behavior/union_with_members.zig index 186a30ad63..83ce38d5bc 100644 --- a/test/behavior/union_with_members.zig +++ b/test/behavior/union_with_members.zig @@ -21,6 +21,7 @@ test "enum with members" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a = ET{ .SINT = -42 }; const b = ET{ .UINT = 42 }; diff --git a/test/behavior/usingnamespace.zig b/test/behavior/usingnamespace.zig index 61b4137cde..9be734dd32 100644 --- a/test/behavior/usingnamespace.zig +++ b/test/behavior/usingnamespace.zig @@ -11,6 +11,8 @@ const C = struct { }; test "basic usingnamespace" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try std.testing.expect(C.B == bool); } @@ -21,6 +23,8 @@ fn Foo(comptime T: type) type { } test "usingnamespace inside a generic struct" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const std2 = Foo(std); const testing2 = Foo(std.testing); try std2.testing.expect(true); @@ -32,6 +36,8 @@ usingnamespace struct { }; test "usingnamespace does not redeclare an imported variable" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try comptime std.testing.expect(@This().foo == 42); } @@ -39,6 +45,7 @@ usingnamespace @import("usingnamespace/foo.zig"); test "usingnamespace omits mixing in private functions" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(@This().privateFunction()); try expect(!@This().printText()); @@ -48,6 +55,8 @@ fn privateFunction() bool { } test { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + _ = @import("usingnamespace/import_segregation.zig"); } @@ -55,6 +64,7 @@ usingnamespace @import("usingnamespace/a.zig"); test "two files usingnamespace import each other" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(@This().ok()); } @@ -62,6 +72,7 @@ test "two files usingnamespace import each other" { test { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const AA = struct { x: i32, @@ -94,6 +105,8 @@ const Mixin = struct { }; test "container member access usingnamespace decls" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var foo = Bar{}; foo.two(); } diff --git a/test/behavior/usingnamespace/file_1.zig b/test/behavior/usingnamespace/file_1.zig index 971fb8958d..e16ae80e48 100644 --- a/test/behavior/usingnamespace/file_1.zig +++ b/test/behavior/usingnamespace/file_1.zig @@ -1,9 +1,12 @@ const std = @import("std"); const expect = std.testing.expect; const imports = @import("imports.zig"); +const builtin = @import("builtin"); const A = 456; test { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try expect(imports.A == 123); } diff --git a/test/behavior/usingnamespace/import_segregation.zig b/test/behavior/usingnamespace/import_segregation.zig index f1dffc6383..f06a5bb4f6 100644 --- a/test/behavior/usingnamespace/import_segregation.zig +++ b/test/behavior/usingnamespace/import_segregation.zig @@ -6,6 +6,7 @@ usingnamespace @import("bar.zig"); test "no clobbering happened" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) { // https://github.com/ziglang/zig/issues/16846 diff --git a/test/behavior/var_args.zig b/test/behavior/var_args.zig index b0dccc1383..a3d4f09d2e 100644 --- a/test/behavior/var_args.zig +++ b/test/behavior/var_args.zig @@ -14,6 +14,8 @@ fn add(args: anytype) i32 { } test "add arbitrary args" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try expect(add(.{ @as(i32, 1), @as(i32, 2), @as(i32, 3), @as(i32, 4) }) == 10); try expect(add(.{@as(i32, 1234)}) == 1234); try expect(add(.{}) == 0); @@ -24,12 +26,15 @@ fn readFirstVarArg(args: anytype) void { } test "send void arg to var args" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + readFirstVarArg(.{{}}); } test "pass args directly" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(addSomeStuff(.{ @as(i32, 1), @as(i32, 2), @as(i32, 3), @as(i32, 4) }) == 10); try expect(addSomeStuff(.{@as(i32, 1234)}) == 1234); @@ -43,6 +48,7 @@ fn addSomeStuff(args: anytype) i32 { test "runtime parameter before var args" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect((try extraFn(10, .{})) == 0); try expect((try extraFn(10, .{false})) == 1); @@ -81,11 +87,15 @@ fn foo2(args: anytype) bool { } test "array of var args functions" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try expect(foos[0](.{})); try expect(!foos[1](.{})); } test "pass zero length array to var args param" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + doNothingWithFirstArg(.{""}); } @@ -99,6 +109,7 @@ test "simple variadic function" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.os.tag != .macos and comptime builtin.cpu.arch.isAARCH64()) { // https://github.com/ziglang/zig/issues/14096 return error.SkipZigTest; @@ -158,6 +169,7 @@ test "coerce reference to var arg" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.os.tag != .macos and comptime builtin.cpu.arch.isAARCH64()) { // https://github.com/ziglang/zig/issues/14096 return error.SkipZigTest; @@ -189,6 +201,7 @@ test "variadic functions" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.os.tag != .macos and comptime builtin.cpu.arch.isAARCH64()) { // https://github.com/ziglang/zig/issues/14096 return error.SkipZigTest; @@ -232,6 +245,7 @@ test "copy VaList" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.os.tag != .macos and comptime builtin.cpu.arch.isAARCH64()) { // https://github.com/ziglang/zig/issues/14096 return error.SkipZigTest; @@ -264,6 +278,7 @@ test "unused VaList arg" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.os.tag != .macos and comptime builtin.cpu.arch.isAARCH64()) { // https://github.com/ziglang/zig/issues/14096 return error.SkipZigTest; diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 8a23954c76..aeecc641d6 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -28,6 +28,7 @@ test "vector wrap operators" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; @@ -52,6 +53,7 @@ test "vector bin compares with mem.eql" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -76,6 +78,7 @@ test "vector int operators" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -100,6 +103,7 @@ test "vector float operators" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; inline for ([_]type{ f16, f32, f64, f80, f128 }) |T| { const S = struct { @@ -123,6 +127,7 @@ test "vector bit operators" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -142,6 +147,7 @@ test "implicit cast vector to array" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -160,6 +166,7 @@ test "array to vector" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -180,6 +187,7 @@ test "array vector coercion - odd sizes" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -219,6 +227,7 @@ test "array to vector with element type coercion" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -258,6 +267,7 @@ test "tuple to vector" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) { // Regressed with LLVM 14: @@ -287,6 +297,7 @@ test "vector casts of sizes not divisible by 8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -325,6 +336,7 @@ test "vector @splat" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .macos) @@ -371,6 +383,7 @@ test "load vector elements via comptime index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -392,6 +405,7 @@ test "store vector elements via comptime index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -419,6 +433,7 @@ test "load vector elements via runtime index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -441,6 +456,7 @@ test "store vector elements via runtime index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -485,6 +501,7 @@ test "vector comparison operators" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -532,6 +549,7 @@ test "vector division operators" { if (builtin.zig_backend == .stage2_llvm and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTestDiv(comptime T: type, x: @Vector(4, T), y: @Vector(4, T)) !void { @@ -622,6 +640,7 @@ test "vector bitwise not operator" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTestNot(comptime T: type, x: @Vector(4, T)) !void { @@ -653,6 +672,7 @@ test "vector shift operators" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTestShift(x: anytype, y: anytype) !void { @@ -743,6 +763,7 @@ test "vector reduce operation" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn testReduce(comptime op: std.builtin.ReduceOp, x: anytype, expected: anytype) !void { @@ -901,6 +922,7 @@ test "mask parameter of @shuffle is comptime scope" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .ssse3)) return error.SkipZigTest; @@ -927,6 +949,7 @@ test "saturating add" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -961,6 +984,7 @@ test "saturating subtraction" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -985,6 +1009,7 @@ test "saturating multiplication" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO: once #9660 has been solved, remove this line if (builtin.target.cpu.arch == .wasm32) return error.SkipZigTest; @@ -1013,6 +1038,7 @@ test "saturating shift-left" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1036,6 +1062,7 @@ test "multiplication-assignment operator with an array operand" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1056,6 +1083,7 @@ test "@addWithOverflow" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1103,6 +1131,7 @@ test "@subWithOverflow" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1134,6 +1163,7 @@ test "@mulWithOverflow" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1155,6 +1185,7 @@ test "@shlWithOverflow" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1198,6 +1229,7 @@ test "loading the second vector from a slice of vectors" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @setRuntimeSafety(false); var small_bases = [2]@Vector(2, u8){ @@ -1214,6 +1246,7 @@ test "array of vectors is copied" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Vec3 = @Vector(3, i32); var points = [_]Vec3{ @@ -1237,6 +1270,7 @@ test "byte vector initialized in inline function" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (comptime builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .x86_64 and builtin.cpu.features.isEnabled(@intFromEnum(std.Target.x86.Feature.avx512f))) @@ -1306,6 +1340,7 @@ test "@intCast to u0" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var zeros = @Vector(2, u32){ 0, 0 }; _ = &zeros; @@ -1330,6 +1365,7 @@ test "array operands to shuffle are coerced to vectors" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const mask = [5]i32{ -1, 0, 1, 2, 3 }; @@ -1345,6 +1381,7 @@ test "load packed vector element" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: @Vector(2, u15) = .{ 1, 4 }; try expect((&x[0]).* == 1); @@ -1358,6 +1395,7 @@ test "store packed vector element" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var v = @Vector(4, u1){ 1, 1, 1, 1 }; try expectEqual(@Vector(4, u1){ 1, 1, 1, 1 }, v); @@ -1373,6 +1411,7 @@ test "store to vector in slice" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var v = [_]@Vector(3, f32){ .{ 1, 1, 1 }, @@ -1392,6 +1431,8 @@ test "store vector with memset" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) { switch (builtin.target.cpu.arch) { @@ -1437,6 +1478,7 @@ test "store vector with memset" { test "addition of vectors represented as strings" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const V = @Vector(3, u8); const foo: V = "foo".*; @@ -1450,6 +1492,7 @@ test "compare vectors with different element types" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: @Vector(2, u8) = .{ 1, 2 }; var b: @Vector(2, u9) = .{ 3, 0 }; @@ -1462,6 +1505,7 @@ test "vector pointer is indexable" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const V = @Vector(2, u32); @@ -1502,6 +1546,7 @@ test "bitcast to vector with different child type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1530,7 +1575,6 @@ test "arithmetic on zero-length vectors" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO { @@ -1565,6 +1609,7 @@ test "@reduce on bool vector" { test "bitcast vector to array of smaller vectors" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const u8x32 = @Vector(32, u8); const u8x64 = @Vector(64, u8); diff --git a/test/behavior/void.zig b/test/behavior/void.zig index 42c57ca3a6..5c4215b870 100644 --- a/test/behavior/void.zig +++ b/test/behavior/void.zig @@ -20,6 +20,7 @@ test "compare void with void compile time known" { test "iterate over a void slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var j: usize = 0; for (times(10), 0..) |_, i| { @@ -36,6 +37,7 @@ test "void optional" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: ?void = {}; _ = &x; diff --git a/test/behavior/wasm.zig b/test/behavior/wasm.zig index c7d12be29d..2db3847f87 100644 --- a/test/behavior/wasm.zig +++ b/test/behavior/wasm.zig @@ -3,6 +3,8 @@ const expect = std.testing.expect; const builtin = @import("builtin"); test "memory size and grow" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var prev = @wasmMemorySize(0); _ = &prev; try expect(prev == @wasmMemoryGrow(0, 1)); diff --git a/test/behavior/while.zig b/test/behavior/while.zig index 51ae8c5d98..65e04f7bb2 100644 --- a/test/behavior/while.zig +++ b/test/behavior/while.zig @@ -5,6 +5,7 @@ const assert = std.debug.assert; test "while loop" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var i: i32 = 0; while (i < 4) { @@ -38,6 +39,8 @@ fn staticWhileLoop2() i32 { } test "while with continue expression" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var sum: i32 = 0; { var i: i32 = 0; @@ -50,6 +53,8 @@ test "while with continue expression" { } test "while with else" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var sum: i32 = 0; var i: i32 = 0; var got_else: i32 = 0; @@ -77,6 +82,8 @@ fn getNumberOrNull() ?i32 { } test "continue outer while loop" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + testContinueOuter(); comptime testContinueOuter(); } @@ -106,6 +113,7 @@ fn testBreakOuter() void { test "while copies its payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -123,6 +131,7 @@ test "while copies its payload" { test "continue and break" { if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try runContinueAndBreakTest(); try expect(continue_and_break_counter == 8); @@ -144,6 +153,7 @@ fn runContinueAndBreakTest() !void { test "while with optional as condition" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; numbers_left = 10; var sum: i32 = 0; @@ -156,6 +166,7 @@ test "while with optional as condition" { test "while with optional as condition with else" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; numbers_left = 10; var sum: i32 = 0; @@ -172,6 +183,7 @@ test "while with optional as condition with else" { test "while with error union condition" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; numbers_left = 10; var sum: i32 = 0; @@ -204,6 +216,7 @@ test "while on optional with else result follow else prong" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const result = while (returnNull()) |value| { break value; @@ -215,6 +228,7 @@ test "while on optional with else result follow break prong" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const result = while (returnOptional(10)) |value| { break value; @@ -252,6 +266,7 @@ fn returnWithImplicitCastFromWhileLoopTest() anyerror!void { test "while on error union with else result follow else prong" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const result = while (returnError()) |value| { break value; @@ -261,6 +276,7 @@ test "while on error union with else result follow else prong" { test "while on error union with else result follow break prong" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const result = while (returnSuccess(10)) |value| { break value; @@ -287,6 +303,7 @@ test "while optional 2 break statements and an else" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry(opt_t: ?bool, f: bool) !void { @@ -306,6 +323,7 @@ test "while error 2 break statements and an else" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn entry(opt_t: anyerror!bool, f: bool) !void { @@ -331,6 +349,8 @@ test "continue inline while loop" { } test "else continue outer while" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + var i: usize = 0; while (true) { i += 1; @@ -344,6 +364,7 @@ test "try terminating an infinite loop" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // Test coverage for https://github.com/ziglang/zig/issues/13546 const Foo = struct { @@ -371,6 +392,7 @@ test "while loop with comptime true condition needs no else block to return valu test "int returned from switch in while" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u32 = 3; const val: usize = while (true) switch (x) { @@ -384,6 +406,7 @@ test "breaking from a loop in an if statement" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn retOpt() ?u32 { diff --git a/test/behavior/widening.zig b/test/behavior/widening.zig index c60bb23e3b..16f97550b5 100644 --- a/test/behavior/widening.zig +++ b/test/behavior/widening.zig @@ -8,6 +8,7 @@ test "integer widening" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u8 = 250; var b: u16 = a; @@ -31,6 +32,7 @@ test "implicit unsigned integer to signed integer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u8 = 250; var b: i16 = a; @@ -44,6 +46,7 @@ test "float widening" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: f16 = 12.34; var b: f32 = a; @@ -64,6 +67,7 @@ test "float widening f16 to f128" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: f16 = 12.34; var y: f128 = x; @@ -76,6 +80,7 @@ test "cast small unsigned to larger signed" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(castSmallUnsignedToLargerSigned1(200) == @as(i16, 200)); try expect(castSmallUnsignedToLargerSigned2(9999) == @as(i64, 9999)); diff --git a/test/behavior/wrapping_arithmetic.zig b/test/behavior/wrapping_arithmetic.zig index 2733edad71..958be2f6f0 100644 --- a/test/behavior/wrapping_arithmetic.zig +++ b/test/behavior/wrapping_arithmetic.zig @@ -6,6 +6,7 @@ const expect = std.testing.expect; test "wrapping add" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -43,6 +44,7 @@ test "wrapping add" { test "wrapping subtraction" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -78,6 +80,7 @@ test "wrapping subtraction" { test "wrapping multiplication" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO: once #9660 has been solved, remove this line if (builtin.cpu.arch == .wasm32) return error.SkipZigTest; diff --git a/test/cases/compile_errors/capture_by_ref_while.zig b/test/cases/compile_errors/capture_by_ref_while.zig index 916662c180..dcb7c23548 100644 --- a/test/cases/compile_errors/capture_by_ref_while.zig +++ b/test/cases/compile_errors/capture_by_ref_while.zig @@ -7,4 +7,4 @@ test { // target=native // // :2:25: error: unused capture -// :2:39: error: unused capture \ No newline at end of file +// :2:39: error: unused capture diff --git a/test/cases/compile_errors/switch_expression-missing_error_prong.zig b/test/cases/compile_errors/switch_expression-missing_error_prong.zig index ee28057c43..ec0c0ceb2f 100644 --- a/test/cases/compile_errors/switch_expression-missing_error_prong.zig +++ b/test/cases/compile_errors/switch_expression-missing_error_prong.zig @@ -1,4 +1,4 @@ -const Error = error { +const Error = error{ One, Two, Three, diff --git a/test/cases/compile_errors/switch_expression-multiple_else_prongs.zig b/test/cases/compile_errors/switch_expression-multiple_else_prongs.zig index a6bb48db17..9a56c17081 100644 --- a/test/cases/compile_errors/switch_expression-multiple_else_prongs.zig +++ b/test/cases/compile_errors/switch_expression-multiple_else_prongs.zig @@ -5,14 +5,14 @@ fn f(x: u32) void { else => true, }; } -fn g(x: error{Foo, Bar, Baz}!u32) void { +fn g(x: error{ Foo, Bar, Baz }!u32) void { const value: bool = if (x) |_| true else |e| switch (e) { error.Foo => false, else => true, else => true, }; } -fn h(x: error{Foo, Bar, Baz}!u32) void { +fn h(x: error{ Foo, Bar, Baz }!u32) void { const value: u32 = x catch |e| switch (e) { error.Foo => 1, else => 2, diff --git a/test/cases/inherit_want_safety.zig b/test/cases/inherit_want_safety.zig index a0c79952b8..c9cd399df9 100644 --- a/test/cases/inherit_want_safety.zig +++ b/test/cases/inherit_want_safety.zig @@ -28,7 +28,7 @@ pub export fn entry() usize { } else |e| switch (e) { else => { u += 1; - } + }, } return u; } diff --git a/test/tests.zig b/test/tests.zig index 8affc41846..c0ef4a536a 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -435,6 +435,16 @@ const test_targets = blk: { // .use_llvm = true, //}, + .{ + .target = .{ + .cpu_arch = .riscv64, + .os_tag = .linux, + .abi = .musl, + }, + .use_llvm = false, + .use_lld = false, + }, + // https://github.com/ziglang/zig/issues/3340 //.{ // .target = .{ @@ -1019,6 +1029,10 @@ pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step { test_target.use_llvm == false and mem.eql(u8, options.name, "std")) continue; + if (target.cpu.arch != .x86_64 and + test_target.use_llvm == false and mem.eql(u8, options.name, "c-import")) + continue; + if (target.cpu.arch == .x86_64 and target.os.tag == .windows and test_target.target.cpu_arch == null and test_target.optimize_mode != .Debug and mem.eql(u8, options.name, "std")) From a615fbc1f8330e455d02fdda5c6de257b0cde7f4 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 14 Apr 2024 15:07:02 -0700 Subject: [PATCH 39/44] riscv: mutable globals --- src/arch/riscv64/CodeGen.zig | 19 +++++++++++++++++-- src/arch/riscv64/abi.zig | 32 ++++++++++++++++++++++++-------- src/arch/riscv64/bits.zig | 6 +++--- test/behavior/basic.zig | 1 - 4 files changed, 44 insertions(+), 14 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index d9f31b4a14..de866b7964 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1452,7 +1452,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout { const spill_frame_size = frame_size[@intFromEnum(FrameIndex.spill_frame)]; const call_frame_size = frame_size[@intFromEnum(FrameIndex.call_frame)]; - // TODO: this 24 should be a 16, but we were clobbering the top and bottom of the frame. + // TODO: this 64 should be a 16, but we were clobbering the top and bottom of the frame. // maybe everything can go from the bottom? const acc_frame_size: i32 = std.mem.alignForward( i32, @@ -1497,7 +1497,7 @@ fn memSize(self: *Self, ty: Type) Memory.Size { const mod = self.bin_file.comp.module.?; return switch (ty.zigTypeTag(mod)) { .Float => Memory.Size.fromBitSize(ty.floatBits(self.target.*)), - else => Memory.Size.fromSize(@intCast(ty.abiSize(mod))), + else => Memory.Size.fromByteSize(ty.abiSize(mod)), }; } @@ -4318,6 +4318,21 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { .off = -dst_reg_off.off, } }, }), + .indirect => |ro| { + const src_reg = try self.copyToTmpRegister(ty, src_mcv); + + _ = try self.addInst(.{ + .tag = .pseudo, + .ops = .pseudo_store_rm, + .data = .{ .rm = .{ + .r = src_reg, + .m = .{ + .base = .{ .reg = ro.reg }, + .mod = .{ .rm = .{ .disp = ro.off, .size = self.memSize(ty) } }, + }, + } }, + }); + }, .load_frame => |frame| return self.genSetStack(ty, frame, src_mcv), .memory => return self.fail("TODO: genCopy memory", .{}), .register_pair => |dst_regs| { diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index 468fede917..9fbb63638e 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -4,6 +4,7 @@ const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; const Module = @import("../../Module.zig"); +const assert = std.debug.assert; pub const Class = enum { memory, byval, integer, double_integer, fields, none }; @@ -93,14 +94,16 @@ pub fn classifyType(ty: Type, mod: *Module) Class { /// There are a maximum of 8 possible return slots. Returned values are in /// the beginning of the array; unused slots are filled with .none. -pub fn classifySystem(ty: Type, mod: *Module) [8]Class { +pub fn classifySystem(ty: Type, zcu: *Module) [8]Class { + const ip = zcu.intern_pool; var result = [1]Class{.none} ** 8; - switch (ty.zigTypeTag(mod)) { + + switch (ty.zigTypeTag(zcu)) { .Bool, .Void, .NoReturn => { result[0] = .integer; return result; }, - .Pointer => switch (ty.ptrSize(mod)) { + .Pointer => switch (ty.ptrSize(zcu)) { .Slice => { result[0] = .integer; result[1] = .integer; @@ -112,7 +115,7 @@ pub fn classifySystem(ty: Type, mod: *Module) [8]Class { }, }, .Optional => { - if (ty.isPtrLikeOptional(mod)) { + if (ty.isPtrLikeOptional(zcu)) { result[0] = .integer; return result; } @@ -121,7 +124,7 @@ pub fn classifySystem(ty: Type, mod: *Module) [8]Class { return result; }, .Int, .Enum, .ErrorSet => { - const int_bits = ty.intInfo(mod).bits; + const int_bits = ty.intInfo(zcu).bits; if (int_bits <= 64) { result[0] = .integer; return result; @@ -134,8 +137,8 @@ pub fn classifySystem(ty: Type, mod: *Module) [8]Class { unreachable; // support > 128 bit int arguments }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(mod); - const payload_bits = payload_ty.bitSize(mod); + const payload_ty = ty.errorUnionPayload(zcu); + const payload_bits = payload_ty.bitSize(zcu); // the error union itself result[0] = .integer; @@ -143,7 +146,20 @@ pub fn classifySystem(ty: Type, mod: *Module) [8]Class { // anyerror!void can fit into one register if (payload_bits == 0) return result; - std.debug.panic("support ErrorUnion payload {}", .{payload_ty.fmt(mod)}); + std.debug.panic("support ErrorUnion payload {}", .{payload_ty.fmt(zcu)}); + }, + .Struct => { + const loaded_struct = ip.loadStructType(ty.toIntern()); + const ty_size = ty.abiSize(zcu); + + if (loaded_struct.layout == .@"packed") { + assert(ty_size <= 16); + result[0] = .integer; + if (ty_size > 8) result[1] = .integer; + return result; + } + + std.debug.panic("support Struct in classifySystem", .{}); }, else => |bad_ty| std.debug.panic("classifySystem {s}", .{@tagName(bad_ty)}), } diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig index d3dd78cf3e..eef0828cdb 100644 --- a/src/arch/riscv64/bits.zig +++ b/src/arch/riscv64/bits.zig @@ -20,7 +20,7 @@ pub const Memory = struct { size: Size, disp: i32 = 0, }, - off: u64, + off: i32, }; pub const Size = enum(u4) { @@ -33,7 +33,7 @@ pub const Memory = struct { /// Double word, 8 Bytes dword, - pub fn fromSize(size: u32) Size { + pub fn fromByteSize(size: u64) Size { return switch (size) { 1 => .byte, 2 => .hword, @@ -66,7 +66,7 @@ pub const Memory = struct { /// Asserts `mem` can be represented as a `FrameLoc`. pub fn toFrameLoc(mem: Memory, mir: Mir) Mir.FrameLoc { const offset: i32 = switch (mem.mod) { - .off => |off| @intCast(off), + .off => |off| off, .rm => |rm| rm.disp, }; diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index 883540d31c..337cc5eb5a 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -67,7 +67,6 @@ var g2: i32 = 0; test "global variables" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(g2 == 0); g2 = g1; From a30af172e8dc360cb0a71a5c4dfd904120555715 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Tue, 16 Apr 2024 16:39:31 -0700 Subject: [PATCH 40/44] riscv: math progress --- src/arch/riscv64/CodeGen.zig | 131 ++++++++++++++-------------- src/arch/riscv64/Encoding.zig | 6 +- src/arch/riscv64/Mir.zig | 2 + test/behavior/align.zig | 2 - test/behavior/array.zig | 4 - test/behavior/basic.zig | 19 ---- test/behavior/bitcast.zig | 1 - test/behavior/call.zig | 2 - test/behavior/cast.zig | 7 -- test/behavior/decltest.zig | 2 - test/behavior/defer.zig | 5 -- test/behavior/enum.zig | 1 - test/behavior/eval.zig | 12 --- test/behavior/export_keyword.zig | 1 - test/behavior/fn.zig | 9 -- test/behavior/for.zig | 7 -- test/behavior/generics.zig | 5 -- test/behavior/globals.zig | 1 - test/behavior/packed-struct.zig | 1 - test/behavior/pointers.zig | 3 - test/behavior/ptrcast.zig | 1 - test/behavior/sizeof_and_typeof.zig | 4 - test/behavior/slice.zig | 2 - test/behavior/string_literals.zig | 2 - test/behavior/struct.zig | 5 -- test/behavior/this.zig | 2 - test/behavior/tuple.zig | 2 - test/behavior/type_info.zig | 7 -- test/behavior/undefined.zig | 3 - test/behavior/union.zig | 4 - test/behavior/vector.zig | 1 - test/behavior/while.zig | 10 --- 32 files changed, 73 insertions(+), 191 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index de866b7964..7ed1174433 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -11,6 +11,7 @@ const Type = @import("../../type.zig").Type; const Value = @import("../../Value.zig"); const link = @import("../../link.zig"); const Module = @import("../../Module.zig"); +const Package = @import("../../Package.zig"); const InternPool = @import("../../InternPool.zig"); const Compilation = @import("../../Compilation.zig"); const ErrorMsg = Module.ErrorMsg; @@ -54,6 +55,7 @@ const RegisterView = enum(u1) { gpa: Allocator, air: Air, +mod: *Package.Module, liveness: Liveness, bin_file: *link.File, target: *const std.Target, @@ -724,6 +726,7 @@ pub fn generate( var function = Self{ .gpa = gpa, .air = air, + .mod = mod, .liveness = liveness, .target = target, .bin_file = bin_file, @@ -2138,82 +2141,78 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs_ty = self.typeOf(extra.lhs); const rhs_ty = self.typeOf(extra.rhs); - const add_result_mcv = try self.binOp(.add, lhs, lhs_ty, rhs, rhs_ty); - const add_result_lock = self.register_manager.lockRegAssumeUnused(add_result_mcv.register); - defer self.register_manager.unlockReg(add_result_lock); - - const tuple_ty = self.typeOfIndex(inst); const int_info = lhs_ty.intInfo(zcu); - // TODO: optimization, set this to true. needs the other struct access stuff to support - // accessing registers. + const tuple_ty = self.typeOfIndex(inst); const result_mcv = try self.allocRegOrMem(inst, false); const offset = result_mcv.load_frame; - try self.genSetStack( - lhs_ty, - .{ + if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { + const add_result = try self.binOp(.add, lhs, lhs_ty, rhs, rhs_ty); + const add_result_reg = try self.copyToTmpRegister(lhs_ty, add_result); + const add_result_reg_lock = self.register_manager.lockRegAssumeUnused(add_result_reg); + defer self.register_manager.unlockReg(add_result_reg_lock); + + const shift_amount: u6 = @intCast(Type.usize.bitSize(zcu) - int_info.bits); + + const shift_reg, const shift_lock = try self.allocReg(); + defer self.register_manager.unlockReg(shift_lock); + + _ = try self.addInst(.{ + .tag = .slli, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = shift_reg, + .rs1 = add_result_reg, + .imm12 = Immediate.s(shift_amount), + }, + }, + }); + + _ = try self.addInst(.{ + .tag = if (int_info.signedness == .unsigned) .srli else .srai, + .ops = .rri, + .data = .{ + .i_type = .{ + .rd = shift_reg, + .rs1 = shift_reg, + .imm12 = Immediate.s(shift_amount), + }, + }, + }); + + const add_result_frame: FrameAddr = .{ .index = offset.index, .off = offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))), - }, - add_result_mcv, - ); + }; + try self.genSetStack( + lhs_ty, + add_result_frame, + add_result, + ); - if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { - if (int_info.signedness == .unsigned) { - switch (int_info.bits) { - 1...8 => { - const max_val = std.math.pow(u16, 2, int_info.bits) - 1; + const overflow_mcv = try self.binOp( + .cmp_neq, + .{ .register = shift_reg }, + lhs_ty, + .{ .register = add_result_reg }, + lhs_ty, + ); - const overflow_reg, const overflow_lock = try self.allocReg(); - defer self.register_manager.unlockReg(overflow_lock); + const overflow_frame: FrameAddr = .{ + .index = offset.index, + .off = offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), + }; + try self.genSetStack( + Type.u1, + overflow_frame, + overflow_mcv, + ); - const add_reg, const add_lock = blk: { - if (add_result_mcv == .register) break :blk .{ add_result_mcv.register, null }; - - const add_reg, const add_lock = try self.allocReg(); - try self.genSetReg(lhs_ty, add_reg, add_result_mcv); - break :blk .{ add_reg, add_lock }; - }; - defer if (add_lock) |lock| self.register_manager.unlockReg(lock); - - _ = try self.addInst(.{ - .tag = .andi, - .ops = .rri, - .data = .{ .i_type = .{ - .rd = overflow_reg, - .rs1 = add_reg, - .imm12 = Immediate.s(max_val), - } }, - }); - - const overflow_mcv = try self.binOp( - .cmp_neq, - .{ .register = overflow_reg }, - lhs_ty, - .{ .register = add_reg }, - lhs_ty, - ); - - try self.genSetStack( - Type.u1, - .{ - .index = offset.index, - .off = offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), - }, - overflow_mcv, - ); - - break :result result_mcv; - }, - - else => return self.fail("TODO: addWithOverflow check for size {d}", .{int_info.bits}), - } - } else { - return self.fail("TODO: airAddWithOverFlow calculate carry for signed addition", .{}); - } + break :result result_mcv; } else { - return self.fail("TODO: airAddWithOverflow with < 8 bits or non-pow of 2", .{}); + return self.fail("TODO: less than 8 bit or non-pow 2 addition", .{}); } }; @@ -3500,9 +3499,11 @@ fn genCall( if (self.bin_file.cast(link.File.Elf)) |elf_file| { const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func.owner_decl); const sym = elf_file.symbol(sym_index); + _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); const got_addr = sym.zigGotAddress(elf_file); try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr }); + _ = try self.addInst(.{ .tag = .jalr, .ops = .rri, diff --git a/src/arch/riscv64/Encoding.zig b/src/arch/riscv64/Encoding.zig index c23ba10d9b..7953bb0cca 100644 --- a/src/arch/riscv64/Encoding.zig +++ b/src/arch/riscv64/Encoding.zig @@ -16,6 +16,7 @@ pub const Mnemonic = enum { andi, slli, srli, + srai, addi, jalr, @@ -69,6 +70,7 @@ pub const Mnemonic = enum { .jalr => .{ .opcode = 0b1100111, .funct3 = 0b000, .funct7 = null }, .slli => .{ .opcode = 0b0010011, .funct3 = 0b001, .funct7 = null }, .srli => .{ .opcode = 0b0010011, .funct3 = 0b101, .funct7 = null }, + .srai => .{ .opcode = 0b0010011, .funct3 = 0b101, .funct7 = null, .offset = 1 << 10 }, .lui => .{ .opcode = 0b0110111, .funct3 = null, .funct7 = null }, @@ -123,6 +125,7 @@ pub const InstEnc = enum { .andi, .slli, .srli, + .srai, => .I, .lui, @@ -299,7 +302,7 @@ pub const Data = union(InstEnc) { .I = .{ .rd = ops[0].reg.id(), .rs1 = ops[1].reg.id(), - .imm0_11 = ops[2].imm.asBits(u12), + .imm0_11 = ops[2].imm.asBits(u12) + enc.offset, .opcode = enc.opcode, .funct3 = enc.funct3.?, @@ -374,6 +377,7 @@ const Enc = struct { opcode: u7, funct3: ?u3, funct7: ?u7, + offset: u12 = 0, }; fn verifyOps(mnem: Mnemonic, ops: []const Operand) bool { diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 9ecca44bd8..08bda25de6 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -53,6 +53,8 @@ pub const Inst = struct { srli, /// Immediate Logical Left Shift, uses i_type payload slli, + /// Immediate Arithmetic Right Shift, uses i_type payload. + srai, /// Register Logical Left Shift, uses r_type payload sllw, /// Register Logical Right Shit, uses r_type payload diff --git a/test/behavior/align.zig b/test/behavior/align.zig index ed650b3ef4..ace8fe7866 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -44,7 +44,6 @@ test "default alignment allows unspecified in type syntax" { } test "implicitly decreasing pointer alignment" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO const a: u32 align(4) = 3; const b: u32 align(8) = 4; try expect(addUnaligned(&a, &b) == 7); @@ -227,7 +226,6 @@ fn fnWithAlignedStack() i32 { } test "implicitly decreasing slice alignment" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 2cb7cfee4a..1759e5c696 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -203,7 +203,6 @@ test "nested arrays of strings" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const array_of_strings = [_][]const u8{ "hello", "this", "is", "my", "thing" }; for (array_of_strings, 0..) |s, i| { @@ -329,7 +328,6 @@ test "read/write through global variable array of struct fields initialized via if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -738,8 +736,6 @@ test "pointer to array has ptr field" { } test "discarded array init preserves result location" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn f(p: *u32) u16 { p.* += 1; diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index 337cc5eb5a..eabac35787 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -85,8 +85,6 @@ test "type equality" { } test "pointer dereferencing" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var x = @as(i32, 3); const y = &x; @@ -138,21 +136,18 @@ fn first4KeysOfHomeRow() []const u8 { test "return string from function" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, first4KeysOfHomeRow(), "aoeu")); } test "hex escape" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, "\x68\x65\x6c\x6c\x6f", "hello")); } test "multiline string" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const s1 = \\one @@ -165,7 +160,6 @@ test "multiline string" { test "multiline string comments at start" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const s1 = //\\one @@ -178,7 +172,6 @@ test "multiline string comments at start" { test "multiline string comments at end" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const s1 = \\one @@ -191,7 +184,6 @@ test "multiline string comments at end" { test "multiline string comments in middle" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const s1 = \\one @@ -204,7 +196,6 @@ test "multiline string comments in middle" { test "multiline string comments at multiple places" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const s1 = \\one @@ -218,14 +209,11 @@ test "multiline string comments at multiple places" { } test "string concatenation simple" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try expect(mem.eql(u8, "OK" ++ " IT " ++ "WORKED", "OK IT WORKED")); } test "array mult operator" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, "ab" ** 5, "ababababab")); } @@ -308,8 +296,6 @@ test "function closes over local const" { } test "volatile load and store" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var number: i32 = 1234; const ptr = @as(*volatile i32, &number); ptr.* += 1; @@ -326,7 +312,6 @@ fn fB() []const u8 { test "call function pointer in struct" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, f3(true), "a")); try expect(mem.eql(u8, f3(false), "b")); @@ -350,7 +335,6 @@ const FnPtrWrapper = struct { test "const ptr from var variable" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u64 = undefined; var y: u64 = undefined; @@ -370,7 +354,6 @@ test "call result of if else expression" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, f2(true), "a")); try expect(mem.eql(u8, f2(false), "b")); @@ -479,7 +462,6 @@ fn nine() u8 { test "struct inside function" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testStructInFn(); try comptime testStructInFn(); @@ -1219,7 +1201,6 @@ test "integer compare" { test "reference to inferred local variable works as expected" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Crasher = struct { lets_crash: u64 = 0, diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index 2c7e069b02..28c797cef3 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -520,7 +520,6 @@ test "@bitCast of packed struct of bools all false" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const P = packed struct { b0: bool, diff --git a/test/behavior/call.zig b/test/behavior/call.zig index 7d7325721c..2f737f098c 100644 --- a/test/behavior/call.zig +++ b/test/behavior/call.zig @@ -572,8 +572,6 @@ test "call function pointer in comptime field" { } test "generic function pointer can be called" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { var ok = false; fn foo(x: anytype) void { diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 1c4041f33d..3ea0d800cc 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -400,7 +400,6 @@ test "cast from ?[*]T to ??[*]T" { test "peer type unsigned int to signed" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var w: u31 = 5; var x: u8 = 7; @@ -443,7 +442,6 @@ test "peer resolve array and const slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testPeerResolveArrayConstSlice(true); try comptime testPeerResolveArrayConstSlice(true); @@ -535,7 +533,6 @@ fn peerTypeEmptyArrayAndSliceAndError(a: bool, slice: []u8) anyerror![]u8 { test "implicit cast from *const [N]T to []const T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCastConstArrayRefToConstSlice(); try comptime testCastConstArrayRefToConstSlice(); @@ -718,7 +715,6 @@ test "peer type resolution: error set supersets" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a: error{ One, Two } = undefined; const b: error{One} = undefined; @@ -748,7 +744,6 @@ test "peer type resolution: disjoint error sets" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a: error{ One, Two } = undefined; const b: error{Three} = undefined; @@ -813,7 +808,6 @@ test "peer type resolution: error union after non-error" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const a: u32 = undefined; const b: error{ One, Two }!u32 = undefined; @@ -1393,7 +1387,6 @@ test "cast between *[N]void and []void" { test "peer resolve arrays of different size to const slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, boolToStr(true), "true")); try expect(mem.eql(u8, boolToStr(false), "false")); diff --git a/test/behavior/decltest.zig b/test/behavior/decltest.zig index 57afc4eade..b01a431e28 100644 --- a/test/behavior/decltest.zig +++ b/test/behavior/decltest.zig @@ -5,7 +5,5 @@ pub fn the_add_function(a: u32, b: u32) u32 { } test the_add_function { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (the_add_function(1, 2) != 3) unreachable; } diff --git a/test/behavior/defer.zig b/test/behavior/defer.zig index 8f8ba8647d..ba0d949a7d 100644 --- a/test/behavior/defer.zig +++ b/test/behavior/defer.zig @@ -5,8 +5,6 @@ const expectEqual = std.testing.expectEqual; const expectError = std.testing.expectError; test "break and continue inside loop inside defer expression" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - testBreakContInDefer(10); comptime testBreakContInDefer(10); } @@ -23,8 +21,6 @@ fn testBreakContInDefer(x: usize) void { } test "defer and labeled break" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var i = @as(usize, 0); blk: { @@ -38,7 +34,6 @@ test "defer and labeled break" { test "errdefer does not apply to fn inside fn" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (testNestedFnErrDefer()) |_| @panic("expected error") else |e| try expect(e == error.Bad); } diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index 8e93739687..8b5890e231 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -1055,7 +1055,6 @@ test "tag name with assigned enum values" { test "@tagName on enum literals" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(mem.eql(u8, @tagName(.FooBar), "FooBar")); comptime assert(mem.eql(u8, @tagName(.FooBar), "FooBar")); diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index ef4e182df2..75933944fd 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -306,8 +306,6 @@ fn performFn(comptime prefix_char: u8, start_value: i32) i32 { } test "comptime iterate over fn ptr list" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try expect(performFn('t', 1) == 6); try expect(performFn('o', 0) == 1); try expect(performFn('w', 99) == 99); @@ -413,8 +411,6 @@ var st_init_str_foo = StInitStrFoo{ }; test "inline for with same type but different values" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var res: usize = 0; inline for ([_]type{ [2]u8, [1]u8, [2]u8 }) |T| { var a: T = undefined; @@ -544,7 +540,6 @@ test "runtime 128 bit integer division" { test "@tagName of @typeInfo" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const str = @tagName(@typeInfo(u8)); try expect(std.mem.eql(u8, str, "Int")); @@ -1007,7 +1002,6 @@ test "closure capture type of runtime-known var" { test "comptime break passing through runtime condition converted to runtime break" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1042,7 +1036,6 @@ test "comptime break to outer loop passing through runtime condition converted t if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1266,7 +1259,6 @@ test "pass pointer to field of comptime-only type as a runtime parameter" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Mixed = struct { @@ -1508,7 +1500,6 @@ test "continue nested inline for loop in named block expr" { test "x and false is comptime-known false" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = struct { var x: u32 = 0; @@ -1536,7 +1527,6 @@ test "x and false is comptime-known false" { test "x or true is comptime-known true" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const T = struct { var x: u32 = 0; @@ -1598,8 +1588,6 @@ test "comptime function turns function value to function pointer" { } test "container level const and var have unique addresses" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { x: i32, y: i32, diff --git a/test/behavior/export_keyword.zig b/test/behavior/export_keyword.zig index 472418d9b2..70839959d2 100644 --- a/test/behavior/export_keyword.zig +++ b/test/behavior/export_keyword.zig @@ -26,7 +26,6 @@ const PackedUnion = packed union { test "packed struct, enum, union parameters in extern function" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; testPackedStuff(&(PackedStruct{ .a = 1, diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index fc7b1605bf..0e14af68fa 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -6,8 +6,6 @@ const expect = testing.expect; const expectEqual = testing.expectEqual; test "params" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try expect(testParamsAdd(22, 11) == 33); } fn testParamsAdd(a: i32, b: i32) i32 { @@ -15,8 +13,6 @@ fn testParamsAdd(a: i32, b: i32) i32 { } test "local variables" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - testLocVars(2); } fn testLocVars(b: i32) void { @@ -25,8 +21,6 @@ fn testLocVars(b: i32) void { } test "mutable local variables" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var zero: i32 = 0; _ = &zero; try expect(zero == 0); @@ -325,7 +319,6 @@ test "function pointers" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const fns = [_]*const @TypeOf(fn1){ &fn1, @@ -403,8 +396,6 @@ test "function call with anon list literal - 2D" { } test "ability to give comptime types and non comptime types to same parameter" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { var x: i32 = 1; diff --git a/test/behavior/for.zig b/test/behavior/for.zig index 200bfd0ce2..1eac03ec79 100644 --- a/test/behavior/for.zig +++ b/test/behavior/for.zig @@ -7,7 +7,6 @@ const mem = std.mem; test "continue in for loop" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const array = [_]i32{ 1, 2, 3, 4, 5 }; var sum: i32 = 0; @@ -22,8 +21,6 @@ test "continue in for loop" { } test "break from outer for loop" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testBreakOuter(); try comptime testBreakOuter(); } @@ -41,8 +38,6 @@ fn testBreakOuter() !void { } test "continue outer for loop" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try testContinueOuter(); try comptime testContinueOuter(); } @@ -263,7 +258,6 @@ test "for loop with else branch" { test "count over fixed range" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var sum: usize = 0; for (0..6) |i| { @@ -276,7 +270,6 @@ test "count over fixed range" { test "two counters" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var sum: usize = 0; for (0..10, 10..20) |i, j| { diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index 46c5babdf1..7ed75f0ead 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -5,8 +5,6 @@ const expect = testing.expect; const expectEqual = testing.expectEqual; test "one param, explicit comptime" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var x: usize = 0; x += checkSize(i32); x += checkSize(bool); @@ -151,8 +149,6 @@ fn GenericDataThing(comptime count: isize) type { } test "use generic param in generic param" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try expect(aGenericFn(i32, 3, 4) == 7); } fn aGenericFn(comptime T: type, comptime a: T, b: T) T { @@ -258,7 +254,6 @@ test "generic function instantiation turns into comptime call" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { diff --git a/test/behavior/globals.zig b/test/behavior/globals.zig index b832323323..0c988450c0 100644 --- a/test/behavior/globals.zig +++ b/test/behavior/globals.zig @@ -50,7 +50,6 @@ test "global loads can affect liveness" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const ByRef = struct { diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index fa78c023a6..c13919d0f8 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -889,7 +889,6 @@ test "runtime init of unnamed packed struct type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var z: u8 = 123; _ = &z; diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index bc45a978e6..331a5689c8 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -6,8 +6,6 @@ const expect = testing.expect; const expectError = testing.expectError; test "dereference pointer" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try comptime testDerefPtr(); try testDerefPtr(); } @@ -55,7 +53,6 @@ fn PtrOf(comptime T: type) type { test "implicit cast single item pointer to C pointer and back" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var y: u8 = 11; const x: [*c]u8 = &y; diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig index 4d191ce582..fc8a8b7482 100644 --- a/test/behavior/ptrcast.zig +++ b/test/behavior/ptrcast.zig @@ -176,7 +176,6 @@ test "reinterpret struct field at comptime" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const numNative = comptime Bytes.init(0x12345678); if (native_endian != .little) { diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig index c050487779..506baa2666 100644 --- a/test/behavior/sizeof_and_typeof.zig +++ b/test/behavior/sizeof_and_typeof.zig @@ -144,8 +144,6 @@ test "@sizeOf(T) == 0 doesn't force resolving struct size" { } test "@TypeOf() has no runtime side effects" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn foo(comptime T: type, ptr: *T) T { ptr.* += 1; @@ -438,8 +436,6 @@ test "Extern function calls, dereferences and field access in @TypeOf" { } test "@sizeOf struct is resolved when used as operand of slicing" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const dummy = struct {}; const S = struct { var buf: [1]u8 = undefined; diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index 8453ffc451..1bcfc42dd5 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -176,7 +176,6 @@ test "comptime pointer cast array and then slice" { test "slicing zero length array" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const s1 = ""[0..]; const s2 = ([_]u32{})[0..]; @@ -738,7 +737,6 @@ test "array mult of slice gives ptr to array" { test "slice bounds in comptime concatenation" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const bs = comptime blk: { const b = "........1........"; diff --git a/test/behavior/string_literals.zig b/test/behavior/string_literals.zig index 1dba4c1a7f..b1bb508503 100644 --- a/test/behavior/string_literals.zig +++ b/test/behavior/string_literals.zig @@ -8,7 +8,6 @@ const ptr_tag_name: [*:0]const u8 = tag_name; test "@tagName() returns a string literal" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try std.testing.expect(*const [13:0]u8 == @TypeOf(tag_name)); try std.testing.expect(std.mem.eql(u8, "TestEnumValue", tag_name)); @@ -22,7 +21,6 @@ const ptr_error_name: [*:0]const u8 = error_name; test "@errorName() returns a string literal" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try std.testing.expect(*const [13:0]u8 == @TypeOf(error_name)); try std.testing.expect(std.mem.eql(u8, "TestErrorCode", error_name)); diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 4312612141..dceac36c97 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -12,7 +12,6 @@ top_level_field: i32, test "top level fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var instance = @This(){ .top_level_field = 1234, @@ -125,8 +124,6 @@ test "struct byval assign" { } test "call struct static method" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const result = StructWithNoFields.add(3, 4); try expect(result == 7); } @@ -759,7 +756,6 @@ test "packed struct with u0 field access" { test "access to global struct fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; g_foo.bar.value = 42; try expect(g_foo.bar.value == 42); @@ -2134,7 +2130,6 @@ test "struct field default value is a call" { test "aggregate initializers should allow initializing comptime fields, verifying equality" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u32 = 15; _ = &x; diff --git a/test/behavior/this.zig b/test/behavior/this.zig index c8e1459ec8..330f9a714d 100644 --- a/test/behavior/this.zig +++ b/test/behavior/this.zig @@ -21,8 +21,6 @@ fn add(x: i32, y: i32) i32 { } test "this refer to module call private fn" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try expect(module.add(1, 2) == 3); } diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig index ab407f8e60..82e9dd02eb 100644 --- a/test/behavior/tuple.zig +++ b/test/behavior/tuple.zig @@ -258,7 +258,6 @@ test "offsetOf anon struct" { test "initializing tuple with mixed comptime-runtime fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u32 = 15; _ = &x; @@ -271,7 +270,6 @@ test "initializing tuple with mixed comptime-runtime fields" { test "initializing anon struct with mixed comptime-runtime fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u32 = 15; _ = &x; diff --git a/test/behavior/type_info.zig b/test/behavior/type_info.zig index 805f24bc18..9ac5e25e89 100644 --- a/test/behavior/type_info.zig +++ b/test/behavior/type_info.zig @@ -161,7 +161,6 @@ test "type info: error set, error union info, anyerror" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testErrorSet(); try comptime testErrorSet(); @@ -193,7 +192,6 @@ test "type info: error set single value" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const TestSet = error.One; @@ -207,7 +205,6 @@ test "type info: error set merged" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const TestSet = error{ One, Two } || error{Three}; @@ -223,7 +220,6 @@ test "type info: enum info" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testEnum(); try comptime testEnum(); @@ -286,7 +282,6 @@ fn testUnion() !void { test "type info: struct info" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testStruct(); try comptime testStruct(); @@ -535,7 +530,6 @@ test "type info for async frames" { test "Declarations are returned in declaration order" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { pub const a = 1; @@ -558,7 +552,6 @@ test "Struct.is_tuple for anon list literal" { test "Struct.is_tuple for anon struct literal" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const info = @typeInfo(@TypeOf(.{ .a = 0 })); try expect(!info.Struct.is_tuple); diff --git a/test/behavior/undefined.zig b/test/behavior/undefined.zig index e8733778e2..bc613585d3 100644 --- a/test/behavior/undefined.zig +++ b/test/behavior/undefined.zig @@ -48,7 +48,6 @@ test "assign undefined to struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; comptime { var foo: Foo = undefined; @@ -66,7 +65,6 @@ test "assign undefined to struct with method" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; comptime { var foo: Foo = undefined; @@ -82,7 +80,6 @@ test "assign undefined to struct with method" { test "type name of undefined" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const x = undefined; try expect(mem.eql(u8, @typeName(@TypeOf(x)), "@TypeOf(undefined)")); diff --git a/test/behavior/union.zig b/test/behavior/union.zig index cafaeed953..8c9eacf7ce 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -486,7 +486,6 @@ test "global union with single field is correctly initialized" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; glbl = Foo1{ .f = @typeInfo(Foo1).Union.fields[0].type{ .x = 123 }, @@ -546,7 +545,6 @@ test "union initializer generates padding only if needed" { test "runtime tag name with single field" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = union(enum) { A: i32, @@ -1467,7 +1465,6 @@ test "packed union in packed struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = packed struct { nested: packed union { @@ -1556,7 +1553,6 @@ test "packed union with zero-bit field" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = packed struct { nested: packed union { diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index aeecc641d6..688b36a911 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -433,7 +433,6 @@ test "load vector elements via runtime index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { diff --git a/test/behavior/while.zig b/test/behavior/while.zig index 65e04f7bb2..e1e5ebbfb3 100644 --- a/test/behavior/while.zig +++ b/test/behavior/while.zig @@ -5,7 +5,6 @@ const assert = std.debug.assert; test "while loop" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var i: i32 = 0; while (i < 4) { @@ -39,8 +38,6 @@ fn staticWhileLoop2() i32 { } test "while with continue expression" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var sum: i32 = 0; { var i: i32 = 0; @@ -53,8 +50,6 @@ test "while with continue expression" { } test "while with else" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var sum: i32 = 0; var i: i32 = 0; var got_else: i32 = 0; @@ -82,8 +77,6 @@ fn getNumberOrNull() ?i32 { } test "continue outer while loop" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - testContinueOuter(); comptime testContinueOuter(); } @@ -131,7 +124,6 @@ test "while copies its payload" { test "continue and break" { if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try runContinueAndBreakTest(); try expect(continue_and_break_counter == 8); @@ -349,8 +341,6 @@ test "continue inline while loop" { } test "else continue outer while" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var i: usize = 0; while (true) { i += 1; From 2fd83d8c0a8dd28c2474b26ead8cb24d6bde0901 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Tue, 16 Apr 2024 18:48:33 -0700 Subject: [PATCH 41/44] riscv: by-value structs + `@min` --- src/arch/riscv64/CodeGen.zig | 99 ++++++++++++++++++++++++++--- src/arch/riscv64/Encoding.zig | 7 +- src/arch/riscv64/Mir.zig | 6 ++ src/arch/riscv64/abi.zig | 77 +++++++++++++++++++++- test/behavior/array.zig | 1 - test/behavior/basic.zig | 1 - test/behavior/cast.zig | 2 - test/behavior/fn.zig | 3 - test/behavior/fn_delegation.zig | 1 - test/behavior/generics.zig | 1 - test/behavior/pointers.zig | 1 - test/behavior/sizeof_and_typeof.zig | 1 - test/behavior/slice.zig | 1 - test/behavior/struct.zig | 7 -- test/behavior/type.zig | 1 - 15 files changed, 174 insertions(+), 35 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 7ed1174433..38d77c9c5f 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1800,8 +1800,95 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { } fn airMin(self: *Self, inst: Air.Inst.Index) !void { + const zcu = self.bin_file.comp.module.?; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement min for {}", .{self.target.cpu.arch}); + + const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: { + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); + + const int_info = lhs_ty.intInfo(zcu); + + if (int_info.bits > 64) return self.fail("TODO: > 64 bit @min", .{}); + + const lhs_reg, const lhs_lock = blk: { + if (lhs == .register) break :blk .{ lhs.register, null }; + + const lhs_reg, const lhs_lock = try self.allocReg(); + try self.genSetReg(lhs_ty, lhs_reg, lhs); + break :blk .{ lhs_reg, lhs_lock }; + }; + defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); + + const rhs_reg, const rhs_lock = blk: { + if (rhs == .register) break :blk .{ rhs.register, null }; + + const rhs_reg, const rhs_lock = try self.allocReg(); + try self.genSetReg(rhs_ty, rhs_reg, rhs); + break :blk .{ rhs_reg, rhs_lock }; + }; + defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); + + const mask_reg, const mask_lock = try self.allocReg(); + defer self.register_manager.unlockReg(mask_lock); + + const result_reg, const result_lock = try self.allocReg(); + defer self.register_manager.unlockReg(result_lock); + + _ = try self.addInst(.{ + .tag = if (int_info.signedness == .unsigned) .sltu else .slt, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = mask_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + } }, + }); + + _ = try self.addInst(.{ + .tag = .sub, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = mask_reg, + .rs1 = .zero, + .rs2 = mask_reg, + } }, + }); + + _ = try self.addInst(.{ + .tag = .xor, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = result_reg, + .rs1 = lhs_reg, + .rs2 = rhs_reg, + } }, + }); + + _ = try self.addInst(.{ + .tag = .@"and", + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = mask_reg, + .rs1 = result_reg, + .rs2 = mask_reg, + } }, + }); + + _ = try self.addInst(.{ + .tag = .xor, + .ops = .rrr, + .data = .{ .r_type = .{ + .rd = result_reg, + .rs1 = rhs_reg, + .rs2 = mask_reg, + } }, + }); + + break :result .{ .register = result_reg }; + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -3513,17 +3600,9 @@ fn genCall( .imm12 = Immediate.s(0), } }, }); - } else if (self.bin_file.cast(link.File.Coff)) |_| { - return self.fail("TODO implement calling in COFF for {}", .{self.target.cpu.arch}); - } else if (self.bin_file.cast(link.File.MachO)) |_| { - unreachable; // unsupported architecture for MachO - } else if (self.bin_file.cast(link.File.Plan9)) |_| { - return self.fail("TODO implement call on plan9 for {}", .{self.target.cpu.arch}); } else unreachable; }, - .extern_func => { - return self.fail("TODO: extern func calls", .{}); - }, + .extern_func => return self.fail("TODO: extern func calls", .{}), else => return self.fail("TODO implement calling bitcasted functions", .{}), } } else { diff --git a/src/arch/riscv64/Encoding.zig b/src/arch/riscv64/Encoding.zig index 7953bb0cca..91f100993b 100644 --- a/src/arch/riscv64/Encoding.zig +++ b/src/arch/riscv64/Encoding.zig @@ -11,7 +11,6 @@ pub const Mnemonic = enum { lb, lbu, sltiu, - sltu, xori, andi, slli, @@ -38,9 +37,11 @@ pub const Mnemonic = enum { // R Type add, + @"and", sub, slt, mul, + sltu, xor, // System @@ -52,6 +53,8 @@ pub const Mnemonic = enum { return switch (mnem) { // zig fmt: off .add => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0000000 }, + .sltu => .{ .opcode = 0b0110011, .funct3 = 0b011, .funct7 = 0b0000000 }, + .@"and" => .{ .opcode = 0b0110011, .funct3 = 0b111, .funct7 = 0b0000000 }, .sub => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0100000 }, .ld => .{ .opcode = 0b0000011, .funct3 = 0b011, .funct7 = null }, @@ -84,7 +87,6 @@ pub const Mnemonic = enum { .beq => .{ .opcode = 0b1100011, .funct3 = 0b000, .funct7 = null }, .slt => .{ .opcode = 0b0110011, .funct3 = 0b010, .funct7 = 0b0000000 }, - .sltu => .{ .opcode = 0b0110011, .funct3 = 0b011, .funct7 = 0b0000000 }, .xor => .{ .opcode = 0b0110011, .funct3 = 0b100, .funct7 = 0b0000000 }, @@ -149,6 +151,7 @@ pub const InstEnc = enum { .xor, .add, .sub, + .@"and", => .R, .ecall, diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 08bda25de6..0ce2185197 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -32,6 +32,9 @@ pub const Inst = struct { lui, mv, + @"and", + xor, + ebreak, ecall, unimp, @@ -49,6 +52,9 @@ pub const Inst = struct { /// Absolute Value, uses i_type payload. abs, + sltu, + slt, + /// Immediate Logical Right Shift, uses i_type payload srli, /// Immediate Logical Left Shift, uses i_type payload diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index 9fbb63638e..5c5b0b0acd 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -3,6 +3,7 @@ const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; +const InternPool = @import("../../InternPool.zig"); const Module = @import("../../Module.zig"); const assert = std.debug.assert; @@ -97,7 +98,10 @@ pub fn classifyType(ty: Type, mod: *Module) Class { pub fn classifySystem(ty: Type, zcu: *Module) [8]Class { const ip = zcu.intern_pool; var result = [1]Class{.none} ** 8; - + const memory_class = [_]Class{ + .memory, .none, .none, .none, + .none, .none, .none, .none, + }; switch (ty.zigTypeTag(zcu)) { .Bool, .Void, .NoReturn => { result[0] = .integer; @@ -146,7 +150,12 @@ pub fn classifySystem(ty: Type, zcu: *Module) [8]Class { // anyerror!void can fit into one register if (payload_bits == 0) return result; - std.debug.panic("support ErrorUnion payload {}", .{payload_ty.fmt(zcu)}); + if (payload_bits <= 64) { + result[1] = .integer; + return result; + } + + std.debug.panic("TODO: classifySystem ErrorUnion > 64 bit payload", .{}); }, .Struct => { const loaded_struct = ip.loadStructType(ty.toIntern()); @@ -158,13 +167,75 @@ pub fn classifySystem(ty: Type, zcu: *Module) [8]Class { if (ty_size > 8) result[1] = .integer; return result; } + if (ty_size > 64) + return memory_class; - std.debug.panic("support Struct in classifySystem", .{}); + var byte_offset: u64 = 0; + classifyStruct(&result, &byte_offset, loaded_struct, zcu); + + return result; }, else => |bad_ty| std.debug.panic("classifySystem {s}", .{@tagName(bad_ty)}), } } +fn classifyStruct( + result: *[8]Class, + byte_offset: *u64, + loaded_struct: InternPool.LoadedStructType, + zcu: *Module, +) void { + const ip = &zcu.intern_pool; + var field_it = loaded_struct.iterateRuntimeOrder(ip); + + while (field_it.next()) |field_index| { + const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); + const field_align = loaded_struct.fieldAlign(ip, field_index); + byte_offset.* = std.mem.alignForward( + u64, + byte_offset.*, + field_align.toByteUnits() orelse field_ty.abiAlignment(zcu).toByteUnits().?, + ); + if (zcu.typeToStruct(field_ty)) |field_loaded_struct| { + if (field_loaded_struct.layout != .@"packed") { + classifyStruct(result, byte_offset, field_loaded_struct, zcu); + continue; + } + } + const field_class = std.mem.sliceTo(&classifySystem(field_ty, zcu), .none); + const field_size = field_ty.abiSize(zcu); + + combine: { + const result_class = &result[@intCast(byte_offset.* / 8)]; + if (result_class.* == field_class[0]) { + break :combine; + } + + if (result_class.* == .none) { + result_class.* = field_class[0]; + break :combine; + } + assert(field_class[0] != .none); + + // "If one of the classes is MEMORY, the result is the MEMORY class." + if (result_class.* == .memory or field_class[0] == .memory) { + result_class.* = .memory; + break :combine; + } + + // "If one of the classes is INTEGER, the result is the INTEGER." + if (result_class.* == .integer or field_class[0] == .integer) { + result_class.* = .integer; + break :combine; + } + + result_class.* = .integer; + } + @memcpy(result[@intCast(byte_offset.* / 8 + 1)..][0 .. field_class.len - 1], field_class[1..]); + byte_offset.* += field_size; + } +} + pub const callee_preserved_regs = [_]Register{ // .s0 is ommited to be used as a frame pointer .s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11, diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 1759e5c696..d524023c9b 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -75,7 +75,6 @@ test "array concat with tuple" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const array: [2]u8 = .{ 1, 2 }; { diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index eabac35787..ad955a8648 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -593,7 +593,6 @@ test "equality compare fn ptrs" { test "self reference through fn ptr field" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = struct { diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 3ea0d800cc..1113fcfeaa 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -2073,7 +2073,6 @@ test "peer type resolution: empty tuple pointer and slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [:0]const u8 = "Hello"; var b = &.{}; @@ -2095,7 +2094,6 @@ test "peer type resolution: tuple pointer and slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: [:0]const u8 = "Hello"; var b = &.{ @as(u8, 'x'), @as(u8, 'y'), @as(u8, 'z') }; diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index 0e14af68fa..b242d29d83 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -191,7 +191,6 @@ test "function with complex callconv and return type expressions" { test "pass by non-copying value" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(addPointCoords(Point{ .x = 1, .y = 2 }) == 3); } @@ -219,7 +218,6 @@ fn addPointCoordsVar(pt: anytype) !i32 { test "pass by non-copying value as method" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var pt = Point2{ .x = 1, .y = 2 }; try expect(pt.addPointCoords() == 3); @@ -236,7 +234,6 @@ const Point2 = struct { test "pass by non-copying value as method, which is generic" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var pt = Point3{ .x = 1, .y = 2 }; try expect(pt.addPointCoords(i32) == 3); diff --git a/test/behavior/fn_delegation.zig b/test/behavior/fn_delegation.zig index 6a3d46c15d..95dbfeb4b2 100644 --- a/test/behavior/fn_delegation.zig +++ b/test/behavior/fn_delegation.zig @@ -34,7 +34,6 @@ fn custom(comptime T: type, comptime num: u64) fn (T) u64 { test "fn delegation" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const foo = Foo{}; try expect(foo.one() == 11); diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index 7ed75f0ead..2c3dfaba01 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -395,7 +395,6 @@ test "extern function used as generic parameter" { test "generic struct as parameter type" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(comptime Int: type, thing: struct { int: Int }) !void { diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index 331a5689c8..ffeeca3986 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -434,7 +434,6 @@ test "indexing array with sentinel returns correct type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var s: [:0]const u8 = "abc"; try testing.expectEqualSlices(u8, "*const u8", @typeName(@TypeOf(&s[0]))); diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig index 506baa2666..1be9ab1c3a 100644 --- a/test/behavior/sizeof_and_typeof.zig +++ b/test/behavior/sizeof_and_typeof.zig @@ -412,7 +412,6 @@ test "Extern function calls, dereferences and field access in @TypeOf" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Test = struct { fn test_fn_1(a: c_long) @TypeOf(c_fopen("test", "r").*) { diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index 1bcfc42dd5..437d248127 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -939,7 +939,6 @@ test "modify slice length at comptime" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const arr: [2]u8 = .{ 10, 20 }; comptime var s: []const u8 = arr[0..0]; diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index dceac36c97..d0509e308e 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -176,7 +176,6 @@ const MemberFnTestFoo = struct { test "call member function directly" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const instance = MemberFnTestFoo{ .x = 1234 }; const result = MemberFnTestFoo.member(instance); @@ -185,7 +184,6 @@ test "call member function directly" { test "store member function in variable" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const instance = MemberFnTestFoo{ .x = 1234 }; const memberFn = MemberFnTestFoo.member; @@ -1561,7 +1559,6 @@ test "discarded struct initialization works as expected" { test "function pointer in struct returns the struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = struct { const A = @This(); @@ -1784,8 +1781,6 @@ fn countFields(v: anytype) usize { } test "struct init with no result pointer sets field result types" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { // A function parameter has a result type, but no result pointer. fn f(s: struct { x: u32 }) u32 { @@ -1933,8 +1928,6 @@ test "circular dependency through pointer field of a struct" { } test "field calls do not force struct field init resolution" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { x: u32 = blk: { _ = @TypeOf(make().dummyFn()); // runtime field call - S not fully resolved - dummyFn call should not force field init resolution diff --git a/test/behavior/type.zig b/test/behavior/type.zig index bf1b8a76f4..d3b8beb1c0 100644 --- a/test/behavior/type.zig +++ b/test/behavior/type.zig @@ -203,7 +203,6 @@ test "Type.Opaque" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Opaque = @Type(.{ .Opaque = .{ From ffb63a05a3327e64bcf8ec7fd05c6aab8d304480 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Tue, 16 Apr 2024 22:44:55 -0700 Subject: [PATCH 42/44] riscv: finally fix bug + `airAggregateInit` i just hadn't realized that I placed the `riscv_start` branch in the non-simplified starts --- lib/std/start.zig | 26 ++++-- lib/std/testing.zig | 2 +- src/arch/riscv64/CodeGen.zig | 86 ++++++++++++++----- src/arch/riscv64/abi.zig | 25 ++++-- test/behavior/align.zig | 1 - test/behavior/basic.zig | 1 + test/behavior/bitcast.zig | 2 + test/behavior/cast.zig | 1 - test/behavior/comptime_memory.zig | 8 +- test/behavior/enum.zig | 2 - test/behavior/error.zig | 1 + test/behavior/eval.zig | 2 - test/behavior/fn.zig | 5 +- test/behavior/fn_delegation.zig | 1 + test/behavior/generics.zig | 1 + test/behavior/if.zig | 2 - test/behavior/maximum_minimum.zig | 6 -- test/behavior/optional.zig | 1 + test/behavior/packed-struct.zig | 2 - test/behavior/packed-union.zig | 2 + .../packed_struct_explicit_backing_int.zig | 1 - test/behavior/pointers.zig | 4 + test/behavior/ptrcast.zig | 4 + test/behavior/sizeof_and_typeof.zig | 1 + test/behavior/struct.zig | 10 ++- test/behavior/tuple.zig | 1 - test/behavior/type.zig | 4 +- test/behavior/union.zig | 3 - 28 files changed, 139 insertions(+), 66 deletions(-) diff --git a/lib/std/start.zig b/lib/std/start.zig index 5fad443956..ff97e3c8ae 100644 --- a/lib/std/start.zig +++ b/lib/std/start.zig @@ -22,7 +22,8 @@ pub const simplified_logic = builtin.zig_backend == .stage2_arm or builtin.zig_backend == .stage2_sparc64 or builtin.cpu.arch == .spirv32 or - builtin.cpu.arch == .spirv64; + builtin.cpu.arch == .spirv64 or + builtin.zig_backend == .stage2_riscv64; comptime { // No matter what, we import the root file, so that any export, test, comptime @@ -42,6 +43,10 @@ comptime { } else if (builtin.os.tag == .opencl) { if (@hasDecl(root, "main")) @export(spirvMain2, .{ .name = "main" }); + } else if (native_arch.isRISCV()) { + if (!@hasDecl(root, "_start")) { + @export(riscv_start, .{ .name = "_start" }); + } } else { if (!@hasDecl(root, "_start")) { @export(_start2, .{ .name = "_start" }); @@ -60,10 +65,6 @@ comptime { } else if (@typeInfo(@TypeOf(root.main)).Fn.calling_convention != .C) { @export(main, .{ .name = "main" }); } - } else if (native_arch.isRISCV()) { - if (!@hasDecl(root, "_start")) { - @export(riscv_start, .{ .name = "_start" }); - } } else if (native_os == .windows) { if (!@hasDecl(root, "WinMain") and !@hasDecl(root, "WinMainCRTStartup") and !@hasDecl(root, "wWinMain") and !@hasDecl(root, "wWinMainCRTStartup")) @@ -208,7 +209,20 @@ fn wasi_start() callconv(.C) void { } fn riscv_start() callconv(.C) noreturn { - std.process.exit(@call(.always_inline, callMain, .{})); + std.process.exit(switch (@typeInfo(@typeInfo(@TypeOf(root.main)).Fn.return_type.?)) { + .NoReturn => root.main(), + .Void => ret: { + root.main(); + break :ret 0; + }, + .Int => |info| ret: { + if (info.bits != 8 or info.signedness == .signed) { + @compileError(bad_main_ret); + } + break :ret root.main(); + }, + else => @compileError("expected return type of main to be 'void', 'noreturn', 'u8'"), + }); } fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) callconv(.C) usize { diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 662351f153..4e895ef3a7 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -22,7 +22,7 @@ pub var base_allocator_instance = std.heap.FixedBufferAllocator.init(""); pub var log_level = std.log.Level.warn; // Disable printing in tests for simple backends. -pub const backend_can_print = builtin.zig_backend != .stage2_spirv64 and builtin.zig_backend != .stage2_riscv64; +pub const backend_can_print = !(builtin.zig_backend == .stage2_spirv64 or builtin.zig_backend == .stage2_riscv64); fn print(comptime fmt: []const u8, args: anytype) void { if (@inComptime()) { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 38d77c9c5f..ed177ed1f1 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -43,6 +43,8 @@ const callee_preserved_regs = abi.callee_preserved_regs; const gp = abi.RegisterClass.gp; /// Function Args const fa = abi.RegisterClass.fa; +/// Function Returns +const fr = abi.RegisterClass.fr; /// Temporary Use const tp = abi.RegisterClass.tp; @@ -1083,8 +1085,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .mod => try self.airMod(inst), .shl, .shl_exact => try self.airShl(inst), .shl_sat => try self.airShlSat(inst), - .min => try self.airMin(inst), - .max => try self.airMax(inst), + .min => try self.airMinMax(inst, .min), + .max => try self.airMinMax(inst, .max), .slice => try self.airSlice(inst), .sqrt, @@ -1672,7 +1674,6 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = switch (self.ret_mcv.long) { - else => unreachable, .none => .{ .lea_frame = .{ .index = try self.allocMemPtr(inst) } }, .load_frame => .{ .register_offset = .{ .reg = (try self.copyToNewRegister( @@ -1681,6 +1682,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { )).register, .off = self.ret_mcv.short.indirect.off, } }, + else => |t| return self.fail("TODO: airRetPtr {s}", .{@tagName(t)}), }; return self.finishAir(inst, result, .{ .none, .none, .none }); } @@ -1799,7 +1801,14 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airMin(self: *Self, inst: Air.Inst.Index) !void { +fn airMinMax( + self: *Self, + inst: Air.Inst.Index, + comptime tag: enum { + max, + min, + }, +) !void { const zcu = self.bin_file.comp.module.?; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -1882,7 +1891,7 @@ fn airMin(self: *Self, inst: Air.Inst.Index) !void { .ops = .rrr, .data = .{ .r_type = .{ .rd = result_reg, - .rs1 = rhs_reg, + .rs1 = if (tag == .min) rhs_reg else lhs_reg, .rs2 = mask_reg, } }, }); @@ -1892,12 +1901,6 @@ fn airMin(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airMax(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement max for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -3577,10 +3580,10 @@ fn genCall( const func_key = zcu.intern_pool.indexToKey(func_value.ip_index); switch (switch (func_key) { else => func_key, - .ptr => |ptr| switch (ptr.addr) { + .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { .decl => |decl| zcu.intern_pool.indexToKey(zcu.declPtr(decl).val.toIntern()), else => func_key, - }, + } else func_key, }) { .func => |func| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { @@ -4174,8 +4177,7 @@ fn performReloc(self: *Self, inst: Mir.Inst.Index) void { .bne, .beq, => self.mir_instructions.items(.data)[inst].b_type.inst = target, - .jal, - => self.mir_instructions.items(.data)[inst].j_type.inst = target, + .jal => self.mir_instructions.items(.data)[inst].j_type.inst = target, .pseudo => switch (ops) { .pseudo_j => self.mir_instructions.items(.data)[inst].inst = target, else => std.debug.panic("TODO: performReloc {s}", .{@tagName(ops)}), @@ -5021,13 +5023,36 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const zcu = self.bin_file.comp.module.?; - const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(zcu); + const result_ty = self.typeOfIndex(inst); + const len: usize = @intCast(result_ty.arrayLen(zcu)); const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]); - const result: MCValue = res: { - if (self.liveness.isUnused(inst)) break :res .unreach; - return self.fail("TODO implement airAggregateInit for riscv64", .{}); + const result: MCValue = result: { + switch (result_ty.zigTypeTag(zcu)) { + .Struct => { + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu)); + + if (result_ty.containerLayout(zcu) == .@"packed") {} else for (elements, 0..) |elem, elem_i| { + if ((try result_ty.structFieldValueComptime(zcu, elem_i)) != null) continue; + + const elem_ty = result_ty.structFieldType(elem_i, zcu); + const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, zcu)); + const elem_mcv = try self.resolveInst(elem); + + const elem_frame: FrameAddr = .{ + .index = frame_index, + .off = elem_off, + }; + try self.genSetStack( + elem_ty, + elem_frame, + elem_mcv, + ); + } + }, + else => return self.fail("TODO: airAggregateInit {}", .{result_ty.fmt(zcu)}), + } + break :result .{ .register = .zero }; }; if (elements.len <= Liveness.bpi - 1) { @@ -5189,12 +5214,24 @@ fn resolveCallingConventionValues( for (classes) |class| switch (class) { .integer => { - const ret_int_reg = abi.function_arg_regs[ret_int_reg_i]; + const ret_int_reg = abi.function_ret_regs[ret_int_reg_i]; ret_int_reg_i += 1; ret_tracking[ret_tracking_i] = InstTracking.init(.{ .register = ret_int_reg }); ret_tracking_i += 1; }, + .memory => { + const ret_int_reg = abi.function_ret_regs[ret_int_reg_i]; + ret_int_reg_i += 1; + const ret_indirect_reg = abi.function_arg_regs[param_int_reg_i]; + param_int_reg_i += 1; + + ret_tracking[ret_tracking_i] = .{ + .short = .{ .indirect = .{ .reg = ret_int_reg } }, + .long = .{ .indirect = .{ .reg = ret_indirect_reg } }, + }; + ret_tracking_i += 1; + }, else => return self.fail("TODO: C calling convention return class {}", .{class}), }; @@ -5226,6 +5263,13 @@ fn resolveCallingConventionValues( arg_mcv[arg_mcv_i] = .{ .register = param_int_reg }; arg_mcv_i += 1; }, + .memory => { + const param_int_regs = abi.function_arg_regs; + const param_int_reg = param_int_regs[param_int_reg_i]; + + arg_mcv[arg_mcv_i] = .{ .indirect = .{ .reg = param_int_reg } }; + arg_mcv_i += 1; + }, else => return self.fail("TODO: C calling convention arg class {}", .{class}), } else { arg.* = switch (arg_mcv_i) { diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index 5c5b0b0acd..35f5659685 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -96,7 +96,6 @@ pub fn classifyType(ty: Type, mod: *Module) Class { /// There are a maximum of 8 possible return slots. Returned values are in /// the beginning of the array; unused slots are filled with .none. pub fn classifySystem(ty: Type, zcu: *Module) [8]Class { - const ip = zcu.intern_pool; var result = [1]Class{.none} ** 8; const memory_class = [_]Class{ .memory, .none, .none, .none, @@ -158,22 +157,17 @@ pub fn classifySystem(ty: Type, zcu: *Module) [8]Class { std.debug.panic("TODO: classifySystem ErrorUnion > 64 bit payload", .{}); }, .Struct => { - const loaded_struct = ip.loadStructType(ty.toIntern()); + const layout = ty.containerLayout(zcu); const ty_size = ty.abiSize(zcu); - if (loaded_struct.layout == .@"packed") { + if (layout == .@"packed") { assert(ty_size <= 16); result[0] = .integer; if (ty_size > 8) result[1] = .integer; return result; } - if (ty_size > 64) - return memory_class; - var byte_offset: u64 = 0; - classifyStruct(&result, &byte_offset, loaded_struct, zcu); - - return result; + return memory_class; }, else => |bad_ty| std.debug.panic("classifySystem {s}", .{@tagName(bad_ty)}), } @@ -245,6 +239,10 @@ pub const function_arg_regs = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7, }; +pub const function_ret_regs = [_]Register{ + .a0, .a1, +}; + pub const temporary_regs = [_]Register{ .t0, .t1, .t2, .t3, .t4, .t5, .t6, }; @@ -273,6 +271,15 @@ pub const RegisterClass = struct { break :blk set; }; + pub const fr: RegisterBitSet = blk: { + var set = RegisterBitSet.initEmpty(); + set.setRangeValue(.{ + .start = callee_preserved_regs.len, + .end = callee_preserved_regs.len + function_ret_regs.len, + }, true); + break :blk set; + }; + pub const tp: RegisterBitSet = blk: { var set = RegisterBitSet.initEmpty(); set.setRangeValue(.{ diff --git a/test/behavior/align.zig b/test/behavior/align.zig index ace8fe7866..659733962b 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -388,7 +388,6 @@ test "function align expression depends on generic parameter" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // function alignment is a compile error on wasm32/wasm64 if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest; diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index ad955a8648..eabac35787 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -593,6 +593,7 @@ test "equality compare fn ptrs" { test "self reference through fn ptr field" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = struct { diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index 28c797cef3..b6f7862bd0 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -541,6 +541,7 @@ test "@bitCast of packed struct containing pointer" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = packed struct { @@ -570,6 +571,7 @@ test "@bitCast of extern struct containing pointer" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = extern struct { diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 1113fcfeaa..46cf272e57 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -2713,7 +2713,6 @@ test "bitcast vector" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const u8x32 = @Vector(32, u8); const u32x8 = @Vector(8, u32); diff --git a/test/behavior/comptime_memory.zig b/test/behavior/comptime_memory.zig index 73b9ea60f2..502c44dc35 100644 --- a/test/behavior/comptime_memory.zig +++ b/test/behavior/comptime_memory.zig @@ -32,6 +32,8 @@ test "type pun signed and unsigned as array pointer" { } test "type pun signed and unsigned as offset many pointer" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + comptime { var x: [11]u32 = undefined; var y: [*]i32 = @ptrCast(&x[10]); @@ -42,6 +44,8 @@ test "type pun signed and unsigned as offset many pointer" { } test "type pun signed and unsigned as array pointer with pointer arithemtic" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + comptime { var x: [11]u32 = undefined; const y = @as([*]i32, @ptrCast(&x[10])) - 10; @@ -289,6 +293,8 @@ test "dance on linker values" { } test "offset array ptr by element size" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + comptime { const VirtualStruct = struct { x: u32 }; var arr: [4]VirtualStruct = .{ @@ -418,8 +424,6 @@ test "dereference undefined pointer to zero-bit type" { } test "type pun extern struct" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = extern struct { f: u8 }; comptime var s = S{ .f = 123 }; @as(*u8, @ptrCast(&s)).* = 72; diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index 8b5890e231..dd2d83a289 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -1246,8 +1246,6 @@ test "auto-numbered enum with signed tag type" { } test "lazy initialized field" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try std.testing.expectEqual(@as(u8, @alignOf(struct {})), getLazyInitialized(.a)); } diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 08f842d93b..d5e8308309 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -1102,6 +1102,7 @@ test "result location initialization of error union with OPV payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: u0, diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 75933944fd..c62e116a5f 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -1705,8 +1705,6 @@ test "early exit in container level const" { } test "@inComptime" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn inComptime() bool { return @inComptime(); diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index b242d29d83..b6eafeefc1 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -191,6 +191,7 @@ test "function with complex callconv and return type expressions" { test "pass by non-copying value" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(addPointCoords(Point{ .x = 1, .y = 2 }) == 3); } @@ -218,6 +219,7 @@ fn addPointCoordsVar(pt: anytype) !i32 { test "pass by non-copying value as method" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var pt = Point2{ .x = 1, .y = 2 }; try expect(pt.addPointCoords() == 3); @@ -234,6 +236,7 @@ const Point2 = struct { test "pass by non-copying value as method, which is generic" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var pt = Point3{ .x = 1, .y = 2 }; try expect(pt.addPointCoords(i32) == 3); @@ -624,8 +627,6 @@ test "comptime parameters don't have to be marked comptime if only called at com } test "inline function with comptime-known comptime-only return type called at runtime" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { inline fn foo(x: *i32, y: *const i32) type { x.* = y.*; diff --git a/test/behavior/fn_delegation.zig b/test/behavior/fn_delegation.zig index 95dbfeb4b2..6a3d46c15d 100644 --- a/test/behavior/fn_delegation.zig +++ b/test/behavior/fn_delegation.zig @@ -34,6 +34,7 @@ fn custom(comptime T: type, comptime num: u64) fn (T) u64 { test "fn delegation" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const foo = Foo{}; try expect(foo.one() == 11); diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index 2c3dfaba01..7ed75f0ead 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -395,6 +395,7 @@ test "extern function used as generic parameter" { test "generic struct as parameter type" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(comptime Int: type, thing: struct { int: Int }) !void { diff --git a/test/behavior/if.zig b/test/behavior/if.zig index 61a5fc8f1b..8cb923dd43 100644 --- a/test/behavior/if.zig +++ b/test/behavior/if.zig @@ -179,8 +179,6 @@ fn returnTrue() bool { } test "if value shouldn't be load-elided if used later (structs)" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const Foo = struct { x: i32 }; var a = Foo{ .x = 1 }; diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig index 54973a8b3f..d08bc82828 100644 --- a/test/behavior/maximum_minimum.zig +++ b/test/behavior/maximum_minimum.zig @@ -160,8 +160,6 @@ test "@min/@max on lazy values" { } test "@min/@max more than two arguments" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const x: u32 = 30; const y: u32 = 10; const z: u32 = 20; @@ -187,7 +185,6 @@ test "@min/@max notices bounds" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u16 = 20; const y = 30; @@ -239,7 +236,6 @@ test "@min/@max notices bounds from types" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: u16 = 123; var y: u32 = 456; @@ -325,8 +321,6 @@ test "@min/@max notices bounds from vector types when element of comptime-known } test "@min/@max of signed and unsigned runtime integers" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var x: i32 = -1; var y: u31 = 1; _ = .{ &x, &y }; diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig index c5fb888bc9..02c329a7d5 100644 --- a/test/behavior/optional.zig +++ b/test/behavior/optional.zig @@ -640,6 +640,7 @@ test "result location initialization of optional with OPV payload" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { x: u0, diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index c13919d0f8..4870cd5984 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -124,7 +124,6 @@ test "correct sizeOf and offsets in packed structs" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const PStruct = packed struct { bool_a: bool, @@ -193,7 +192,6 @@ test "nested packed structs" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S1 = packed struct { a: u8, b: u8, c: u8 }; diff --git a/test/behavior/packed-union.zig b/test/behavior/packed-union.zig index d76f28ae59..5dd1641c5f 100644 --- a/test/behavior/packed-union.zig +++ b/test/behavior/packed-union.zig @@ -177,6 +177,8 @@ test "assigning to non-active field at comptime" { } test "comptime packed union of pointers" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const U = packed union { a: *const u32, b: *const [1]u32, diff --git a/test/behavior/packed_struct_explicit_backing_int.zig b/test/behavior/packed_struct_explicit_backing_int.zig index 35762a1b14..29b8c4aa9b 100644 --- a/test/behavior/packed_struct_explicit_backing_int.zig +++ b/test/behavior/packed_struct_explicit_backing_int.zig @@ -10,7 +10,6 @@ test "packed struct explicit backing integer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S1 = packed struct { a: u8, b: u8, c: u8 }; diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index ffeeca3986..35c32041ff 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -640,6 +640,8 @@ test "cast pointers with zero sized elements" { } test "comptime pointer equality through distinct fields with well-defined layout" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const A = extern struct { x: u32, z: u16, @@ -664,6 +666,8 @@ test "comptime pointer equality through distinct fields with well-defined layout } test "comptime pointer equality through distinct elements with well-defined layout" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const buf: [2]u32 = .{ 123, 456 }; const ptr: *const [2]u32 = &buf; diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig index fc8a8b7482..ddf842f2d4 100644 --- a/test/behavior/ptrcast.zig +++ b/test/behavior/ptrcast.zig @@ -298,6 +298,8 @@ test "comptime @ptrCast with packed struct leaves value unmodified" { } test "@ptrCast restructures comptime-only array" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + { const a3a2: [3][2]comptime_int = .{ .{ 1, 2 }, @@ -340,6 +342,8 @@ test "@ptrCast restructures comptime-only array" { } test "@ptrCast restructures sliced comptime-only array" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const a3a2: [4][2]comptime_int = .{ .{ 1, 2 }, .{ 3, 4 }, diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig index 1be9ab1c3a..506baa2666 100644 --- a/test/behavior/sizeof_and_typeof.zig +++ b/test/behavior/sizeof_and_typeof.zig @@ -412,6 +412,7 @@ test "Extern function calls, dereferences and field access in @TypeOf" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Test = struct { fn test_fn_1(a: c_long) @TypeOf(c_fopen("test", "r").*) { diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index d0509e308e..602be7e95e 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -176,6 +176,7 @@ const MemberFnTestFoo = struct { test "call member function directly" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const instance = MemberFnTestFoo{ .x = 1234 }; const result = MemberFnTestFoo.member(instance); @@ -184,6 +185,7 @@ test "call member function directly" { test "store member function in variable" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const instance = MemberFnTestFoo{ .x = 1234 }; const memberFn = MemberFnTestFoo.member; @@ -1559,6 +1561,7 @@ test "discarded struct initialization works as expected" { test "function pointer in struct returns the struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const A = struct { const A = @This(); @@ -1699,7 +1702,6 @@ test "struct field pointer has correct alignment" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1781,6 +1783,8 @@ fn countFields(v: anytype) usize { } test "struct init with no result pointer sets field result types" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { // A function parameter has a result type, but no result pointer. fn f(s: struct { x: u32 }) u32 { @@ -1928,6 +1932,8 @@ test "circular dependency through pointer field of a struct" { } test "field calls do not force struct field init resolution" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const S = struct { x: u32 = blk: { _ = @TypeOf(make().dummyFn()); // runtime field call - S not fully resolved - dummyFn call should not force field init resolution @@ -1958,7 +1964,6 @@ test "extern struct fields are aligned to 1" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Foo = extern struct { a: u8 align(1), @@ -2090,7 +2095,6 @@ test "struct field default value is a call" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Z = packed struct { a: u32, diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig index 82e9dd02eb..736bbad806 100644 --- a/test/behavior/tuple.zig +++ b/test/behavior/tuple.zig @@ -451,7 +451,6 @@ test "tuple pointer is indexable" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { u32, bool }; diff --git a/test/behavior/type.zig b/test/behavior/type.zig index d3b8beb1c0..364460ff98 100644 --- a/test/behavior/type.zig +++ b/test/behavior/type.zig @@ -203,6 +203,7 @@ test "Type.Opaque" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Opaque = @Type(.{ .Opaque = .{ @@ -348,7 +349,6 @@ test "Type.Struct" { test "Type.Enum" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Foo = @Type(.{ .Enum = .{ @@ -763,6 +763,8 @@ test "matching captures causes opaque equivalence" { } test "reify enum where fields refers to part of array" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + const fields: [3]std.builtin.Type.EnumField = .{ .{ .name = "foo", .value = 0 }, .{ .name = "bar", .value = 1 }, diff --git a/test/behavior/union.zig b/test/behavior/union.zig index 8c9eacf7ce..6ea092e13d 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -1622,7 +1622,6 @@ test "defined-layout union field pointer has correct alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(comptime U: type) !void { @@ -1658,7 +1657,6 @@ test "undefined-layout union field pointer has correct alignment" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(comptime U: type) !void { @@ -1694,7 +1692,6 @@ test "packed union field pointer has correct alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = packed union { x: u20 }; const S = packed struct(u24) { a: u2, u: U, b: u2 }; From 1dfdc21c31a027a34213a2e1f27433d4e609d634 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Fri, 10 May 2024 21:40:53 -0700 Subject: [PATCH 43/44] riscv: intcast `got_addr` the recent merge strings PR made `got_addr` `i64` and now requires an `@intCast`. --- src/arch/riscv64/CodeGen.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index ed177ed1f1..762251bc44 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -3592,7 +3592,7 @@ fn genCall( _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); const got_addr = sym.zigGotAddress(elf_file); - try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .ra, .{ .memory = @intCast(got_addr) }); _ = try self.addInst(.{ .tag = .jalr, From 75372f12ef1301118eed33e429f7498e76836cb3 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Fri, 10 May 2024 23:19:23 -0700 Subject: [PATCH 44/44] riscv: update behaviour tests again --- test/behavior/bitcast.zig | 2 -- test/behavior/comptime_memory.zig | 6 ------ test/behavior/error.zig | 1 + test/behavior/packed-union.zig | 2 -- test/behavior/pointers.zig | 4 ---- test/behavior/ptrcast.zig | 4 ---- test/behavior/type.zig | 2 -- test/behavior/union.zig | 1 + 8 files changed, 2 insertions(+), 20 deletions(-) diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index b6f7862bd0..28c797cef3 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -541,7 +541,6 @@ test "@bitCast of packed struct containing pointer" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = packed struct { @@ -571,7 +570,6 @@ test "@bitCast of extern struct containing pointer" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = extern struct { diff --git a/test/behavior/comptime_memory.zig b/test/behavior/comptime_memory.zig index 502c44dc35..597ba62dd4 100644 --- a/test/behavior/comptime_memory.zig +++ b/test/behavior/comptime_memory.zig @@ -32,8 +32,6 @@ test "type pun signed and unsigned as array pointer" { } test "type pun signed and unsigned as offset many pointer" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - comptime { var x: [11]u32 = undefined; var y: [*]i32 = @ptrCast(&x[10]); @@ -44,8 +42,6 @@ test "type pun signed and unsigned as offset many pointer" { } test "type pun signed and unsigned as array pointer with pointer arithemtic" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - comptime { var x: [11]u32 = undefined; const y = @as([*]i32, @ptrCast(&x[10])) - 10; @@ -293,8 +289,6 @@ test "dance on linker values" { } test "offset array ptr by element size" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - comptime { const VirtualStruct = struct { x: u32 }; var arr: [4]VirtualStruct = .{ diff --git a/test/behavior/error.zig b/test/behavior/error.zig index d5e8308309..b579f1478e 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -1124,6 +1124,7 @@ test "result location initialization of error union with OPV payload" { test "return error union with i65" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(try add(1000, 234) == 1234); } diff --git a/test/behavior/packed-union.zig b/test/behavior/packed-union.zig index 5dd1641c5f..d76f28ae59 100644 --- a/test/behavior/packed-union.zig +++ b/test/behavior/packed-union.zig @@ -177,8 +177,6 @@ test "assigning to non-active field at comptime" { } test "comptime packed union of pointers" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const U = packed union { a: *const u32, b: *const [1]u32, diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index 35c32041ff..ffeeca3986 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -640,8 +640,6 @@ test "cast pointers with zero sized elements" { } test "comptime pointer equality through distinct fields with well-defined layout" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const A = extern struct { x: u32, z: u16, @@ -666,8 +664,6 @@ test "comptime pointer equality through distinct fields with well-defined layout } test "comptime pointer equality through distinct elements with well-defined layout" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const buf: [2]u32 = .{ 123, 456 }; const ptr: *const [2]u32 = &buf; diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig index ddf842f2d4..fc8a8b7482 100644 --- a/test/behavior/ptrcast.zig +++ b/test/behavior/ptrcast.zig @@ -298,8 +298,6 @@ test "comptime @ptrCast with packed struct leaves value unmodified" { } test "@ptrCast restructures comptime-only array" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - { const a3a2: [3][2]comptime_int = .{ .{ 1, 2 }, @@ -342,8 +340,6 @@ test "@ptrCast restructures comptime-only array" { } test "@ptrCast restructures sliced comptime-only array" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const a3a2: [4][2]comptime_int = .{ .{ 1, 2 }, .{ 3, 4 }, diff --git a/test/behavior/type.zig b/test/behavior/type.zig index 364460ff98..6150a490cf 100644 --- a/test/behavior/type.zig +++ b/test/behavior/type.zig @@ -763,8 +763,6 @@ test "matching captures causes opaque equivalence" { } test "reify enum where fields refers to part of array" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const fields: [3]std.builtin.Type.EnumField = .{ .{ .name = "foo", .value = 0 }, .{ .name = "bar", .value = 1 }, diff --git a/test/behavior/union.zig b/test/behavior/union.zig index 6ea092e13d..62997d097a 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -2373,6 +2373,7 @@ test "signed enum tag with negative value" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Enum = enum(i8) { a = -1,