riscv: implement more arithmetic instructions

This commit is contained in:
David Rubin 2024-05-12 13:46:49 -07:00
parent 083b7b483e
commit c10d1c6a75
No known key found for this signature in database
GPG Key ID: C326E694CED89F6D
7 changed files with 363 additions and 125 deletions

View File

@ -2075,6 +2075,7 @@ fn binOp(
.add,
.sub,
.mul,
.div_float,
.cmp_eq,
.cmp_neq,
.cmp_gt,
@ -2086,10 +2087,11 @@ fn binOp(
switch (lhs_ty.zigTypeTag(zcu)) {
.Float => {
const float_bits = lhs_ty.floatBits(zcu.getTarget());
if (float_bits <= 32) {
const float_reg_bits: u32 = if (self.hasFeature(.d)) 64 else 32;
if (float_bits <= float_reg_bits) {
return self.binOpFloat(tag, lhs, lhs_ty, rhs, rhs_ty);
} else {
return self.fail("TODO: binary operations for floats with bits > 32", .{});
return self.fail("TODO: binary operations for floats with bits > {d}", .{float_reg_bits});
}
},
.Vector => return self.fail("TODO binary operations on vectors", .{}),
@ -2255,6 +2257,7 @@ fn binOpRegister(
.cmp_lte => .lte,
else => unreachable,
},
.size = self.memSize(lhs_ty),
},
},
});
@ -2285,28 +2288,90 @@ fn binOpFloat(
const mir_tag: Mir.Inst.Tag = switch (tag) {
.add => if (float_bits == 32) .fadds else .faddd,
.cmp_eq => if (float_bits == 32) .feqs else .feqd,
.sub => if (float_bits == 32) .fsubs else .fsubd,
.mul => if (float_bits == 32) .fmuls else .fmuld,
.div_float => if (float_bits == 32) .fdivs else .fdivd,
.cmp_eq,
.cmp_neq,
.cmp_gt,
.cmp_gte,
.cmp_lt,
.cmp_lte,
=> .pseudo,
else => return self.fail("TODO: binOpFloat mir_tag {s}", .{@tagName(tag)}),
};
const return_class: abi.RegisterClass = switch (tag) {
.add => .float,
.cmp_eq => .int,
.add,
.sub,
.mul,
.div_float,
=> .float,
.cmp_eq,
.cmp_neq,
.cmp_gt,
.cmp_gte,
.cmp_lt,
.cmp_lte,
=> .int,
else => unreachable,
};
const dest_reg, const dest_lock = try self.allocReg(return_class);
defer self.register_manager.unlockReg(dest_lock);
_ = try self.addInst(.{
.tag = mir_tag,
.ops = .rrr,
.data = .{ .r_type = .{
.rd = dest_reg,
.rs1 = lhs_reg,
.rs2 = rhs_reg,
} },
});
switch (tag) {
.add,
.sub,
.mul,
.div_float,
=> {
_ = try self.addInst(.{
.tag = mir_tag,
.ops = .rrr,
.data = .{ .r_type = .{
.rd = dest_reg,
.rs1 = lhs_reg,
.rs2 = rhs_reg,
} },
});
},
.cmp_eq,
.cmp_neq,
.cmp_gt,
.cmp_gte,
.cmp_lt,
.cmp_lte,
=> {
_ = try self.addInst(.{
.tag = .pseudo,
.ops = .pseudo_compare,
.data = .{
.compare = .{
.rd = dest_reg,
.rs1 = lhs_reg,
.rs2 = rhs_reg,
.op = switch (tag) {
.cmp_eq => .eq,
.cmp_neq => .neq,
.cmp_gt => .gt,
.cmp_gte => .gte,
.cmp_lt => .lt,
.cmp_lte => .lte,
else => unreachable,
},
.size = self.memSize(lhs_ty),
},
},
});
},
else => unreachable,
}
return MCValue{ .register = dest_reg };
}
@ -2360,7 +2425,27 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
fn airMul(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement mul for {}", .{self.target.cpu.arch});
const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: {
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.typeOf(bin_op.lhs);
const rhs_ty = self.typeOf(bin_op.rhs);
break :result try self.binOp(.mul, lhs, lhs_ty, rhs, rhs_ty);
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airDiv(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: {
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.typeOf(bin_op.lhs);
const rhs_ty = self.typeOf(bin_op.rhs);
break :result try self.binOp(.div_float, lhs, lhs_ty, rhs, rhs_ty);
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@ -2672,12 +2757,6 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
return self.fail("TODO implement airShlWithOverflow for {}", .{self.target.cpu.arch});
}
fn airDiv(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement div for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airRem(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else return self.fail("TODO implement rem for {}", .{self.target.cpu.arch});
@ -3742,6 +3821,9 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
const arg_ty = self.typeOfIndex(inst);
const dst_mcv = try self.allocRegOrMem(inst, false);
log.debug("airArg {} -> {}", .{ src_mcv, dst_mcv });
try self.genCopy(arg_ty, dst_mcv, src_mcv);
try self.genArgDbgInfo(inst, src_mcv);
@ -4135,10 +4217,10 @@ fn airCmp(self: *Self, inst: Air.Inst.Index) !void {
},
.Float => {
const float_bits = lhs_ty.floatBits(self.target.*);
if (float_bits > 32) {
return self.fail("TODO: airCmp float > 32 bits", .{});
const float_reg_size: u32 = if (self.hasFeature(.d)) 64 else 32;
if (float_bits > float_reg_size) {
return self.fail("TODO: airCmp float > 64/32 bits", .{});
}
break :result try self.binOpFloat(tag, lhs, lhs_ty, rhs, lhs_ty);
},
else => unreachable,
@ -6141,6 +6223,8 @@ fn resolveCallingConventionValues(
};
}
var param_float_reg_i: usize = 0;
for (param_types, result.args) |ty, *arg| {
if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
assert(cc == .Unspecified);
@ -6151,8 +6235,6 @@ fn resolveCallingConventionValues(
var arg_mcv: [2]MCValue = undefined;
var arg_mcv_i: usize = 0;
var param_float_reg_i: usize = 0;
const classes = mem.sliceTo(&abi.classifySystem(ty, zcu), .none);
for (classes) |class| switch (class) {

View File

@ -111,21 +111,46 @@ pub const Mnemonic = enum {
ebreak,
unimp,
// float mnemonics
// F extension (32-bit float)
fadds,
faddd,
fsubs,
fmuls,
fdivs,
feqs,
feqd,
fmins,
fmaxs,
fsqrts,
fld,
flw,
fsd,
fsw,
feqs,
flts,
fles,
fsgnjns,
// D extension (64-bit float)
faddd,
fsubd,
fmuld,
fdivd,
fmind,
fmaxd,
fsqrtd,
fld,
fsd,
feqd,
fltd,
fled,
fsgnjnd,
pub fn encoding(mnem: Mnemonic) Enc {
return switch (mnem) {
// zig fmt: off
@ -163,39 +188,66 @@ pub const Mnemonic = enum {
.fadds => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00000, .fmt = .S, .rm = 0b111 } } },
.faddd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00000, .fmt = .D, .rm = 0b111 } } },
.fsubs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00001, .fmt = .S, .rm = 0b111 } } },
.fsubd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00001, .fmt = .D, .rm = 0b111 } } },
.fmuls => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00010, .fmt = .S, .rm = 0b111 } } },
.fmuld => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00010, .fmt = .D, .rm = 0b111 } } },
.fdivs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00011, .fmt = .S, .rm = 0b111 } } },
.fdivd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00011, .fmt = .D, .rm = 0b111 } } },
.fmins => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .S, .rm = 0b000 } } },
.fmind => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .D, .rm = 0b000 } } },
.fmaxs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .S, .rm = 0b001 } } },
.fmaxd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .D, .rm = 0b001 } } },
.fsqrts => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b01011, .fmt = .S, .rm = 0b111 } } },
.fsqrtd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b01011, .fmt = .D, .rm = 0b111 } } },
.fles => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b000 } } },
.fled => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b000 } } },
.flts => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b001 } } },
.fltd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b001 } } },
.feqs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b010 } } },
.feqd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b010 } } },
.fsgnjns => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .S, .rm = 0b000 } } },
.fsgnjnd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .D, .rm = 0b000 } } },
// LOAD
.ld => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b011 } } },
.lw => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b010 } } },
.lwu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b110 } } },
.lh => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b001 } } },
.lhu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b101 } } },
.lb => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b000 } } },
.lh => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b001 } } },
.lw => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b010 } } },
.ld => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b011 } } },
.lbu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b100 } } },
.lhu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b101 } } },
.lwu => .{ .opcode = .LOAD, .data = .{ .fo = .{ .funct3 = 0b110 } } },
// STORE
.sd => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b011 } } },
.sw => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b010 } } },
.sh => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b001 } } },
.sb => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b000 } } },
.sh => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b001 } } },
.sw => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b010 } } },
.sd => .{ .opcode = .STORE, .data = .{ .fo = .{ .funct3 = 0b011 } } },
// LOAD_FP
.fld => .{ .opcode = .LOAD_FP, .data = .{ .fo = .{ .funct3 = 0b011 } } },
.flw => .{ .opcode = .LOAD_FP, .data = .{ .fo = .{ .funct3 = 0b010 } } },
.fld => .{ .opcode = .LOAD_FP, .data = .{ .fo = .{ .funct3 = 0b011 } } },
// STORE_FP
.fsd => .{ .opcode = .STORE_FP, .data = .{ .fo = .{ .funct3 = 0b011 } } },
.fsw => .{ .opcode = .STORE_FP, .data = .{ .fo = .{ .funct3 = 0b010 } } },
.fsd => .{ .opcode = .STORE_FP, .data = .{ .fo = .{ .funct3 = 0b011 } } },
// JALR
@ -310,9 +362,36 @@ pub const InstEnc = enum {
.fadds,
.faddd,
.fsubs,
.fsubd,
.fmuls,
.fmuld,
.fdivs,
.fdivd,
.fmins,
.fmind,
.fmaxs,
.fmaxd,
.fsqrts,
.fsqrtd,
.fles,
.fled,
.flts,
.fltd,
.feqs,
.feqd,
.fsgnjns,
.fsgnjnd,
=> .R,
.ecall,

View File

@ -139,7 +139,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
switch (dst_class) {
.float => {
try lower.emit(.fsgnjns, &.{
try lower.emit(if (lower.hasFeature(.d)) .fsgnjnd else .fsgnjns, &.{
.{ .reg = rr.rd },
.{ .reg = rr.rs },
.{ .reg = rr.rs },
@ -176,9 +176,11 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
.pseudo_load_symbol => {
const payload = inst.data.payload;
const data = lower.mir.extraData(Mir.LoadSymbolPayload, payload).data;
const dst_reg: bits.Register = @enumFromInt(data.register);
assert(dst_reg.class() == .int);
try lower.emit(.lui, &.{
.{ .reg = @enumFromInt(data.register) },
.{ .reg = dst_reg },
.{ .imm = lower.reloc(.{ .load_symbol_reloc = .{
.atom_index = data.atom_index,
.sym_index = data.sym_index,
@ -187,14 +189,16 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
// the above reloc implies this one
try lower.emit(.addi, &.{
.{ .reg = @enumFromInt(data.register) },
.{ .reg = @enumFromInt(data.register) },
.{ .reg = dst_reg },
.{ .reg = dst_reg },
.{ .imm = Immediate.s(0) },
});
},
.pseudo_lea_rm => {
const rm = inst.data.rm;
assert(rm.r.class() == .int);
const frame = rm.m.toFrameLoc(lower.mir);
try lower.emit(.addi, &.{
@ -212,78 +216,135 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
const rs1 = compare.rs1;
const rs2 = compare.rs2;
switch (op) {
.eq => {
try lower.emit(.xor, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
const class = rs1.class();
const size = compare.size.bitSize();
try lower.emit(.sltiu, &.{
.{ .reg = rd },
.{ .reg = rd },
.{ .imm = Immediate.s(1) },
});
},
.neq => {
try lower.emit(.xor, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
switch (class) {
.int => switch (op) {
.eq => {
try lower.emit(.xor, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
try lower.emit(.sltu, &.{
.{ .reg = rd },
.{ .reg = .zero },
.{ .reg = rd },
});
},
.gt => {
try lower.emit(.sltu, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
},
.gte => {
try lower.emit(.sltu, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
try lower.emit(.sltiu, &.{
.{ .reg = rd },
.{ .reg = rd },
.{ .imm = Immediate.s(1) },
});
},
.neq => {
try lower.emit(.xor, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
try lower.emit(.xori, &.{
.{ .reg = rd },
.{ .reg = rd },
.{ .imm = Immediate.s(1) },
});
},
.lt => {
try lower.emit(.slt, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
},
.lte => {
try lower.emit(.slt, &.{
.{ .reg = rd },
.{ .reg = rs2 },
.{ .reg = rs1 },
});
try lower.emit(.sltu, &.{
.{ .reg = rd },
.{ .reg = .zero },
.{ .reg = rd },
});
},
.gt => {
try lower.emit(.sltu, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
},
.gte => {
try lower.emit(.sltu, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
try lower.emit(.xori, &.{
.{ .reg = rd },
.{ .reg = rd },
.{ .imm = Immediate.s(1) },
});
try lower.emit(.xori, &.{
.{ .reg = rd },
.{ .reg = rd },
.{ .imm = Immediate.s(1) },
});
},
.lt => {
try lower.emit(.slt, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
},
.lte => {
try lower.emit(.slt, &.{
.{ .reg = rd },
.{ .reg = rs2 },
.{ .reg = rs1 },
});
try lower.emit(.xori, &.{
.{ .reg = rd },
.{ .reg = rd },
.{ .imm = Immediate.s(1) },
});
},
},
.float => switch (op) {
// eq
.eq => {
try lower.emit(if (size == 64) .feqd else .feqs, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
},
// !(eq)
.neq => {
try lower.emit(if (size == 64) .feqd else .feqs, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
try lower.emit(.xori, &.{
.{ .reg = rd },
.{ .reg = rd },
.{ .imm = Immediate.s(1) },
});
},
.lt => {
try lower.emit(if (size == 64) .fltd else .flts, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
},
.lte => {
try lower.emit(if (size == 64) .fled else .fles, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
},
.gt => {
try lower.emit(if (size == 64) .fltd else .flts, &.{
.{ .reg = rd },
.{ .reg = rs2 },
.{ .reg = rs1 },
});
},
.gte => {
try lower.emit(if (size == 64) .fled else .fles, &.{
.{ .reg = rd },
.{ .reg = rs2 },
.{ .reg = rs1 },
});
},
},
}
},
.pseudo_not => {
const rr = inst.data.rr;
assert(rr.rs.class() == .int and rr.rd.class() == .int);
try lower.emit(.xori, &.{
.{ .reg = rr.rd },
@ -408,6 +469,12 @@ pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error {
return error.LowerFail;
}
fn hasFeature(lower: *Lower, feature: std.Target.riscv.Feature) bool {
const target = lower.bin_file.comp.module.?.getTarget();
const features = target.cpu.features;
return std.Target.riscv.featureSetHas(features, feature);
}
const Lower = @This();
const abi = @import("abi.zig");

View File

@ -72,15 +72,39 @@ pub const Inst = struct {
// F extension (32-bit float)
fadds,
fsubs,
fmuls,
fdivs,
fmins,
fmaxs,
fsqrts,
flw,
fsw,
feqs,
flts,
fles,
// D extension (64-bit float)
faddd,
fsubd,
fmuld,
fdivd,
fmind,
fmaxd,
fsqrtd,
fld,
fsd,
feqd,
fltd,
fled,
/// A pseudo-instruction. Used for anything that isn't 1:1 with an
/// assembly instruction.
@ -182,6 +206,7 @@ pub const Inst = struct {
lt,
lte,
},
size: Memory.Size,
},
reloc: struct {

View File

@ -1717,7 +1717,6 @@ test "peer type resolution: float and comptime-known fixed-width integer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const i: u8 = 100;
var f: f32 = 1.234;
@ -2586,7 +2585,6 @@ test "@intFromBool on vector" {
test "numeric coercions with undefined" {
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const from: i32 = undefined;
var to: f32 = from;

View File

@ -22,8 +22,6 @@ test "add f16" {
}
test "add f32/f64" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testAdd(f32);
try comptime testAdd(f32);
try testAdd(f64);
@ -60,8 +58,6 @@ test "sub f16" {
}
test "sub f32/f64" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testSub(f32);
try comptime testSub(f32);
try testSub(f64);
@ -98,8 +94,6 @@ test "mul f16" {
}
test "mul f32/f64" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMul(f32);
try comptime testMul(f32);
try testMul(f64);
@ -1622,7 +1616,6 @@ test "comptime inf >= runtime 1" {
test "comptime isNan(nan * 1)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const nan_times_one = comptime std.math.nan(f64) * 1;
try std.testing.expect(std.math.isNan(nan_times_one));
@ -1630,7 +1623,6 @@ test "comptime isNan(nan * 1)" {
test "runtime isNan(nan * 1)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const nan_times_one = std.math.nan(f64) * 1;
try std.testing.expect(std.math.isNan(nan_times_one));
@ -1638,7 +1630,6 @@ test "runtime isNan(nan * 1)" {
test "comptime isNan(nan * 0)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const nan_times_zero = comptime std.math.nan(f64) * 0;
try std.testing.expect(std.math.isNan(nan_times_zero));
@ -1648,7 +1639,6 @@ test "comptime isNan(nan * 0)" {
test "runtime isNan(nan * 0)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const nan_times_zero = std.math.nan(f64) * 0;
try std.testing.expect(std.math.isNan(nan_times_zero));
@ -1658,7 +1648,6 @@ test "runtime isNan(nan * 0)" {
test "comptime isNan(inf * 0)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const inf_times_zero = comptime std.math.inf(f64) * 0;
try std.testing.expect(std.math.isNan(inf_times_zero));
@ -1668,7 +1657,6 @@ test "comptime isNan(inf * 0)" {
test "runtime isNan(inf * 0)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const inf_times_zero = std.math.inf(f64) * 0;
try std.testing.expect(std.math.isNan(inf_times_zero));

View File

@ -236,7 +236,6 @@ test "float equality" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const x: f64 = 0.012;
const y: f64 = x + 1.0;