Merge pull request #20474 from Rexicon226/riscv

more RISC-V backend progress
This commit is contained in:
Jakub Konka 2024-07-17 08:39:44 +02:00 committed by GitHub
commit 9d9b5a11e8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
34 changed files with 1629 additions and 498 deletions

View File

@ -1,7 +1,7 @@
//! SIMD (Single Instruction; Multiple Data) convenience functions.
//!
//! May offer a potential boost in performance on some targets by performing
//! the same operations on multiple elements at once.
//! the same operation on multiple elements at once.
//!
//! Some functions are known to not work on MIPS.
@ -10,7 +10,6 @@ const builtin = @import("builtin");
pub fn suggestVectorLengthForCpu(comptime T: type, comptime cpu: std.Target.Cpu) ?comptime_int {
// This is guesswork, if you have better suggestions can add it or edit the current here
// This can run in comptime only, but stage 1 fails at it, stage 2 can understand it
const element_bit_size = @max(8, std.math.ceilPowerOfTwo(u16, @bitSizeOf(T)) catch unreachable);
const vector_bit_size: u16 = blk: {
if (cpu.arch.isX86()) {
@ -37,8 +36,37 @@ pub fn suggestVectorLengthForCpu(comptime T: type, comptime cpu: std.Target.Cpu)
// the 2048 bits or using just 64 per vector or something in between
if (std.Target.mips.featureSetHas(cpu.features, std.Target.mips.Feature.mips3d)) break :blk 64;
} else if (cpu.arch.isRISCV()) {
// in risc-v the Vector Extension allows configurable vector sizes, but a standard size of 128 is a safe estimate
if (std.Target.riscv.featureSetHas(cpu.features, .v)) break :blk 128;
// In RISC-V Vector Registers are length agnostic so there's no good way to determine the best size.
// The usual vector length in most RISC-V cpus is 256 bits, however it can get to multiple kB.
if (std.Target.riscv.featureSetHas(cpu.features, .v)) {
var vec_bit_length: u32 = 256;
if (std.Target.riscv.featureSetHas(cpu.features, .zvl32b)) {
vec_bit_length = 32;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl64b)) {
vec_bit_length = 64;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl128b)) {
vec_bit_length = 128;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl256b)) {
vec_bit_length = 256;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl512b)) {
vec_bit_length = 512;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl1024b)) {
vec_bit_length = 1024;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl2048b)) {
vec_bit_length = 2048;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl4096b)) {
vec_bit_length = 4096;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl8192b)) {
vec_bit_length = 8192;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl16384b)) {
vec_bit_length = 16384;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl32768b)) {
vec_bit_length = 32768;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl65536b)) {
vec_bit_length = 65536;
}
break :blk vec_bit_length;
}
} else if (cpu.arch.isSPARC()) {
// TODO: Test Sparc capability to handle bigger vectors
// In theory Sparc have 32 registers of 64 bits which can use in parallel

View File

@ -221,7 +221,26 @@ fn riscv_start() callconv(.C) noreturn {
}
break :ret root.main();
},
else => @compileError("expected return type of main to be 'void', 'noreturn', 'u8'"),
.ErrorUnion => ret: {
const result = root.main() catch {
const stderr = std.io.getStdErr().writer();
stderr.writeAll("failed with error\n") catch {
@panic("failed to print when main returned error");
};
break :ret 1;
};
switch (@typeInfo(@TypeOf(result))) {
.Void => break :ret 0,
.Int => |info| {
if (info.bits != 8 or info.signedness == .signed) {
@compileError(bad_main_ret);
}
return result;
},
else => @compileError(bad_main_ret),
}
},
else => @compileError(bad_main_ret),
});
}

File diff suppressed because it is too large Load Diff

View File

@ -26,7 +26,7 @@ pub fn emitMir(emit: *Emit) Error!void {
mir_index,
@intCast(emit.code.items.len),
);
const lowered = try emit.lower.lowerMir(mir_index);
const lowered = try emit.lower.lowerMir(mir_index, .{ .allow_frame_locs = true });
var lowered_relocs = lowered.relocs;
for (lowered.insts, 0..) |lowered_inst, lowered_index| {
const start_offset: u32 = @intCast(emit.code.items.len);
@ -75,7 +75,7 @@ pub fn emitMir(emit: *Emit) Error!void {
.r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | lo_r_type,
.r_addend = 0,
});
} else return emit.fail("TODO: load_symbol_reloc non-ELF", .{});
} else unreachable;
},
.call_extern_fn_reloc => |symbol| {
if (emit.bin_file.cast(link.File.Elf)) |elf_file| {

View File

@ -2,37 +2,55 @@ mnemonic: Mnemonic,
data: Data,
const OpCode = enum(u7) {
OP = 0b0110011,
OP_IMM = 0b0010011,
OP_IMM_32 = 0b0011011,
OP_32 = 0b0111011,
BRANCH = 0b1100011,
LOAD = 0b0000011,
STORE = 0b0100011,
SYSTEM = 0b1110011,
OP_FP = 0b1010011,
LOAD_FP = 0b0000111,
STORE_FP = 0b0100111,
JALR = 0b1100111,
MISC_MEM = 0b0001111,
OP_IMM = 0b0010011,
AUIPC = 0b0010111,
OP_IMM_32 = 0b0011011,
STORE = 0b0100011,
STORE_FP = 0b0100111,
AMO = 0b0101111,
OP_V = 0b1010111,
OP = 0b0110011,
OP_32 = 0b0111011,
LUI = 0b0110111,
MADD = 0b1000011,
MSUB = 0b1000111,
NMSUB = 0b1001011,
NMADD = 0b1001111,
OP_FP = 0b1010011,
OP_IMM_64 = 0b1011011,
BRANCH = 0b1100011,
JALR = 0b1100111,
JAL = 0b1101111,
NONE = 0b0000000,
SYSTEM = 0b1110011,
OP_64 = 0b1111011,
NONE = 0b00000000,
};
const Fmt = enum(u2) {
const FpFmt = enum(u2) {
/// 32-bit single-precision
S = 0b00,
/// 64-bit double-precision
D = 0b01,
_reserved = 0b10,
// H = 0b10, unused in the G extension
/// 128-bit quad-precision
Q = 0b11,
};
const AmoWidth = enum(u3) {
W = 0b010,
D = 0b011,
};
const FenceMode = enum(u4) {
none = 0b0000,
tso = 0b1000,
};
const Enc = struct {
opcode: OpCode,
@ -42,11 +60,19 @@ const Enc = struct {
funct3: u3,
funct7: u7,
},
amo: struct {
funct5: u5,
width: AmoWidth,
},
fence: struct {
funct3: u3,
fm: FenceMode,
},
/// funct5 + rm + fmt
fmt: struct {
funct5: u5,
rm: u3,
fmt: Fmt,
fmt: FpFmt,
},
/// funct3
f: struct {
@ -58,9 +84,55 @@ const Enc = struct {
funct3: u3,
has_5: bool,
},
vecls: struct {
width: VecWidth,
umop: Umop,
vm: bool,
mop: Mop,
mew: bool,
nf: u3,
},
vecmath: struct {
vm: bool,
funct6: u6,
funct3: VecType,
},
/// U-type
none,
},
const Mop = enum(u2) {
unit = 0b00,
unord = 0b01,
stride = 0b10,
ord = 0b11,
};
const Umop = enum(u5) {
unit = 0b00000,
whole = 0b01000,
mask = 0b01011,
fault = 0b10000,
};
const VecWidth = enum(u3) {
// zig fmt: off
@"8" = 0b000,
@"16" = 0b101,
@"32" = 0b110,
@"64" = 0b111,
// zig fmt: on
};
const VecType = enum(u3) {
OPIVV = 0b000,
OPFVV = 0b001,
OPMVV = 0b010,
OPIVI = 0b011,
OPIVX = 0b100,
OPFVF = 0b101,
OPMVX = 0b110,
};
};
pub const Mnemonic = enum {
@ -90,6 +162,9 @@ pub const Mnemonic = enum {
addi,
jalr,
vsetivli,
vsetvli,
// U Type
lui,
auipc,
@ -130,6 +205,8 @@ pub const Mnemonic = enum {
ebreak,
unimp,
csrrs,
// M extension
mul,
mulw,
@ -192,6 +269,58 @@ pub const Mnemonic = enum {
fsgnjnd,
fsgnjxd,
// V Extension
vle8v,
vle16v,
vle32v,
vle64v,
vse8v,
vse16v,
vse32v,
vse64v,
vsoxei8v,
vaddvv,
vsubvv,
vfaddvv,
vfsubvv,
vadcvv,
vmvvx,
vslidedownvx,
// MISC
fence,
fencetso,
// AMO
amoswapw,
amoaddw,
amoandw,
amoorw,
amoxorw,
amomaxw,
amominw,
amomaxuw,
amominuw,
amoswapd,
amoaddd,
amoandd,
amoord,
amoxord,
amomaxd,
amomind,
amomaxud,
amominud,
// TODO: Q extension
pub fn encoding(mnem: Mnemonic) Enc {
return switch (mnem) {
// zig fmt: off
@ -322,14 +451,25 @@ pub const Mnemonic = enum {
// LOAD_FP
.flw => .{ .opcode = .LOAD_FP, .data = .{ .f = .{ .funct3 = 0b010 } } },
.fld => .{ .opcode = .LOAD_FP, .data = .{ .f = .{ .funct3 = 0b011 } } },
.fld => .{ .opcode = .LOAD_FP, .data = .{ .f = .{ .funct3 = 0b011 } } },
.vle8v => .{ .opcode = .LOAD_FP, .data = .{ .vecls = .{ .width = .@"8", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
.vle16v => .{ .opcode = .LOAD_FP, .data = .{ .vecls = .{ .width = .@"16", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
.vle32v => .{ .opcode = .LOAD_FP, .data = .{ .vecls = .{ .width = .@"32", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
.vle64v => .{ .opcode = .LOAD_FP, .data = .{ .vecls = .{ .width = .@"64", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
// STORE_FP
.fsw => .{ .opcode = .STORE_FP, .data = .{ .f = .{ .funct3 = 0b010 } } },
.fsd => .{ .opcode = .STORE_FP, .data = .{ .f = .{ .funct3 = 0b011 } } },
.fsw => .{ .opcode = .STORE_FP, .data = .{ .f = .{ .funct3 = 0b010 } } },
.fsd => .{ .opcode = .STORE_FP, .data = .{ .f = .{ .funct3 = 0b011 } } },
.vse8v => .{ .opcode = .STORE_FP, .data = .{ .vecls = .{ .width = .@"8", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
.vse16v => .{ .opcode = .STORE_FP, .data = .{ .vecls = .{ .width = .@"16", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
.vse32v => .{ .opcode = .STORE_FP, .data = .{ .vecls = .{ .width = .@"32", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
.vse64v => .{ .opcode = .STORE_FP, .data = .{ .vecls = .{ .width = .@"64", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
.vsoxei8v => .{ .opcode = .STORE_FP, .data = .{ .vecls = .{ .width = .@"8", .umop = .unit, .vm = true, .mop = .ord, .mew = false, .nf = 0b000 } } },
// JALR
@ -360,6 +500,8 @@ pub const Mnemonic = enum {
.ecall => .{ .opcode = .SYSTEM, .data = .{ .f = .{ .funct3 = 0b000 } } },
.ebreak => .{ .opcode = .SYSTEM, .data = .{ .f = .{ .funct3 = 0b000 } } },
.csrrs => .{ .opcode = .SYSTEM, .data = .{ .f = .{ .funct3 = 0b010 } } },
// NONE
@ -367,6 +509,52 @@ pub const Mnemonic = enum {
.unimp => .{ .opcode = .NONE, .data = .{ .f = .{ .funct3 = 0b000 } } },
// MISC_MEM
.fence => .{ .opcode = .MISC_MEM, .data = .{ .fence = .{ .funct3 = 0b000, .fm = .none } } },
.fencetso => .{ .opcode = .MISC_MEM, .data = .{ .fence = .{ .funct3 = 0b000, .fm = .tso } } },
// AMO
.amoaddw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b00000 } } },
.amoswapw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b00001 } } },
// LR.W
// SC.W
.amoxorw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b00100 } } },
.amoandw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b01100 } } },
.amoorw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b01000 } } },
.amominw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b10000 } } },
.amomaxw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b10100 } } },
.amominuw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b11000 } } },
.amomaxuw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b11100 } } },
.amoaddd => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b00000 } } },
.amoswapd => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b00001 } } },
// LR.D
// SC.D
.amoxord => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b00100 } } },
.amoandd => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b01100 } } },
.amoord => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b01000 } } },
.amomind => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b10000 } } },
.amomaxd => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b10100 } } },
.amominud => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b11000 } } },
.amomaxud => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b11100 } } },
// OP_V
.vsetivli => .{ .opcode = .OP_V, .data = .{ .f = .{ .funct3 = 0b111 } } },
.vsetvli => .{ .opcode = .OP_V, .data = .{ .f = .{ .funct3 = 0b111 } } },
.vaddvv => .{ .opcode = .OP_V, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b000000, .funct3 = .OPIVV } } },
.vsubvv => .{ .opcode = .OP_V, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b000010, .funct3 = .OPIVV } } },
.vfaddvv => .{ .opcode = .OP_V, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b000000, .funct3 = .OPFVV } } },
.vfsubvv => .{ .opcode = .OP_V, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b000010, .funct3 = .OPFVV } } },
.vadcvv => .{ .opcode = .OP_V, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b010000, .funct3 = .OPMVV } } },
.vmvvx => .{ .opcode = .OP_V, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b010111, .funct3 = .OPIVX } } },
.vslidedownvx => .{ .opcode = .OP_V, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b001111, .funct3 = .OPIVX } } },
// zig fmt: on
};
}
@ -380,8 +568,8 @@ pub const InstEnc = enum {
B,
U,
J,
/// extras that have unusual op counts
fence,
amo,
system,
pub fn fromMnemonic(mnem: Mnemonic) InstEnc {
@ -410,6 +598,10 @@ pub const InstEnc = enum {
.flw,
.fld,
.csrrs,
.vsetivli,
.vsetvli,
=> .I,
.lui,
@ -503,26 +695,73 @@ pub const InstEnc = enum {
.fsgnjxs,
.fsgnjxd,
.vle8v,
.vle16v,
.vle32v,
.vle64v,
.vse8v,
.vse16v,
.vse32v,
.vse64v,
.vsoxei8v,
.vaddvv,
.vsubvv,
.vfaddvv,
.vfsubvv,
.vadcvv,
.vmvvx,
.vslidedownvx,
=> .R,
.ecall,
.ebreak,
.unimp,
=> .system,
.fence,
.fencetso,
=> .fence,
.amoswapw,
.amoaddw,
.amoandw,
.amoorw,
.amoxorw,
.amomaxw,
.amominw,
.amomaxuw,
.amominuw,
.amoswapd,
.amoaddd,
.amoandd,
.amoord,
.amoxord,
.amomaxd,
.amomind,
.amomaxud,
.amominud,
=> .amo,
};
}
pub fn opsList(enc: InstEnc) [4]std.meta.FieldEnum(Operand) {
pub fn opsList(enc: InstEnc) [5]std.meta.FieldEnum(Operand) {
return switch (enc) {
// zig fmt: off
.R => .{ .reg, .reg, .reg, .none },
.R4 => .{ .reg, .reg, .reg, .reg },
.I => .{ .reg, .reg, .imm, .none },
.S => .{ .reg, .reg, .imm, .none },
.B => .{ .reg, .reg, .imm, .none },
.U => .{ .reg, .imm, .none, .none },
.J => .{ .reg, .imm, .none, .none },
.system => .{ .none, .none, .none, .none },
.R => .{ .reg, .reg, .reg, .none, .none, },
.R4 => .{ .reg, .reg, .reg, .reg, .none, },
.I => .{ .reg, .reg, .imm, .none, .none, },
.S => .{ .reg, .reg, .imm, .none, .none, },
.B => .{ .reg, .reg, .imm, .none, .none, },
.U => .{ .reg, .imm, .none, .none, .none, },
.J => .{ .reg, .imm, .none, .none, .none, },
.system => .{ .none, .none, .none, .none, .none, },
.fence => .{ .barrier, .barrier, .none, .none, .none, },
.amo => .{ .reg, .reg, .reg, .barrier, .barrier },
// zig fmt: on
};
}
@ -584,20 +823,38 @@ pub const Data = union(InstEnc) {
imm1_10: u10,
imm20: u1,
},
system: void,
fence: packed struct {
opcode: u7,
rd: u5 = 0,
funct3: u3,
rs1: u5 = 0,
succ: u4,
pred: u4,
fm: u4,
},
amo: packed struct {
opcode: u7,
rd: u5,
funct3: u3,
rs1: u5,
rs2: u5,
rl: bool,
aq: bool,
funct5: u5,
},
system: u32,
comptime {
for (std.meta.fields(Data)) |field| {
assert(@bitSizeOf(field.type) == 32);
}
}
pub fn toU32(self: Data) u32 {
return switch (self) {
// zig fmt: off
.R => |v| @bitCast(v),
.R4 => |v| @bitCast(v),
.I => |v| @bitCast(v),
.S => |v| @bitCast(v),
.B => |v| @as(u32, @intCast(v.opcode)) + (@as(u32, @intCast(v.imm11)) << 7) + (@as(u32, @intCast(v.imm1_4)) << 8) + (@as(u32, @intCast(v.funct3)) << 12) + (@as(u32, @intCast(v.rs1)) << 15) + (@as(u32, @intCast(v.rs2)) << 20) + (@as(u32, @intCast(v.imm5_10)) << 25) + (@as(u32, @intCast(v.imm12)) << 31),
.U => |v| @bitCast(v),
.J => |v| @bitCast(v),
.fence => |v| @as(u32, @intCast(v.opcode)) + (@as(u32, @intCast(v.rd)) << 7) + (@as(u32, @intCast(v.funct3)) << 12) + (@as(u32, @intCast(v.rs1)) << 15) + (@as(u32, @intCast(v.succ)) << 20) + (@as(u32, @intCast(v.pred)) << 24) + (@as(u32, @intCast(v.fm)) << 28),
inline else => |v| @bitCast(v),
.system => unreachable,
// zig fmt: on
};
}
@ -628,6 +885,25 @@ pub const Data = union(InstEnc) {
},
};
},
.csrrs => {
assert(ops.len == 3);
const csr = ops[0].csr;
const rs1 = ops[1].reg;
const rd = ops[2].reg;
return .{
.I = .{
.rd = rd.encodeId(),
.rs1 = rs1.encodeId(),
.imm0_11 = @intFromEnum(csr),
.opcode = @intFromEnum(enc.opcode),
.funct3 = enc.data.f.funct3,
},
};
},
else => {},
}
@ -654,6 +930,25 @@ pub const Data = union(InstEnc) {
.funct3 = fmt.rm,
.funct7 = (@as(u7, fmt.funct5) << 2) | @intFromEnum(fmt.fmt),
},
.vecls => |vec| .{
.rd = ops[0].reg.encodeId(),
.rs1 = ops[1].reg.encodeId(),
.rs2 = @intFromEnum(vec.umop),
.opcode = @intFromEnum(enc.opcode),
.funct3 = @intFromEnum(vec.width),
.funct7 = (@as(u7, vec.nf) << 4) | (@as(u7, @intFromBool(vec.mew)) << 3) | (@as(u7, @intFromEnum(vec.mop)) << 1) | @intFromBool(vec.vm),
},
.vecmath => |vec| .{
.rd = ops[0].reg.encodeId(),
.rs1 = ops[1].reg.encodeId(),
.rs2 = ops[2].reg.encodeId(),
.opcode = @intFromEnum(enc.opcode),
.funct3 = @intFromEnum(vec.funct3),
.funct7 = (@as(u7, vec.funct6) << 1) | @intFromBool(vec.vm),
},
else => unreachable,
},
};
@ -748,7 +1043,48 @@ pub const Data = union(InstEnc) {
},
};
},
.fence => {
assert(ops.len == 2);
const succ = ops[0].barrier;
const pred = ops[1].barrier;
return .{
.fence = .{
.succ = @intFromEnum(succ),
.pred = @intFromEnum(pred),
.opcode = @intFromEnum(enc.opcode),
.funct3 = enc.data.fence.funct3,
.fm = @intFromEnum(enc.data.fence.fm),
},
};
},
.amo => {
assert(ops.len == 5);
const rd = ops[0].reg;
const rs1 = ops[1].reg;
const rs2 = ops[2].reg;
const rl = ops[3].barrier;
const aq = ops[4].barrier;
return .{
.amo = .{
.rd = rd.encodeId(),
.rs1 = rs1.encodeId(),
.rs2 = rs2.encodeId(),
// TODO: https://github.com/ziglang/zig/issues/20113
.rl = if (rl == .rl) true else false,
.aq = if (aq == .aq) true else false,
.opcode = @intFromEnum(enc.opcode),
.funct3 = @intFromEnum(enc.data.amo.width),
.funct5 = enc.data.amo.funct5,
},
};
},
else => std.debug.panic("TODO: construct {s}", .{@tagName(inst_enc)}),
}
}

View File

@ -40,7 +40,9 @@ pub const Reloc = struct {
};
/// The returned slice is overwritten by the next call to lowerMir.
pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index, options: struct {
allow_frame_locs: bool,
}) Error!struct {
insts: []const Instruction,
relocs: []const Reloc,
} {
@ -69,64 +71,102 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
.pseudo_load_rm, .pseudo_store_rm => {
const rm = inst.data.rm;
const frame_loc = rm.m.toFrameLoc(lower.mir);
const frame_loc: Mir.FrameLoc = if (options.allow_frame_locs)
rm.m.toFrameLoc(lower.mir)
else
.{ .base = .s0, .disp = 0 };
switch (inst.ops) {
.pseudo_load_rm => {
const dest_reg = rm.r;
const dest_reg_class = dest_reg.class();
const float = dest_reg_class == .float;
const src_size = rm.m.mod.size;
const unsigned = rm.m.mod.unsigned;
const tag: Encoding.Mnemonic = if (!float)
switch (src_size) {
const tag: Encoding.Mnemonic = switch (dest_reg_class) {
.int => switch (src_size) {
.byte => if (unsigned) .lbu else .lb,
.hword => if (unsigned) .lhu else .lh,
.word => if (unsigned) .lwu else .lw,
.dword => .ld,
}
else switch (src_size) {
.byte => unreachable, // Zig does not support 8-bit floats
.hword => return lower.fail("TODO: lowerMir pseudo_load_rm support 16-bit floats", .{}),
.word => .flw,
.dword => .fld,
},
.float => switch (src_size) {
.byte => unreachable, // Zig does not support 8-bit floats
.hword => return lower.fail("TODO: lowerMir pseudo_load_rm support 16-bit floats", .{}),
.word => .flw,
.dword => .fld,
},
.vector => switch (src_size) {
.byte => .vle8v,
.hword => .vle32v,
.word => .vle32v,
.dword => .vle64v,
},
};
try lower.emit(tag, &.{
.{ .reg = rm.r },
.{ .reg = frame_loc.base },
.{ .imm = Immediate.s(frame_loc.disp) },
});
switch (dest_reg_class) {
.int, .float => {
try lower.emit(tag, &.{
.{ .reg = rm.r },
.{ .reg = frame_loc.base },
.{ .imm = Immediate.s(frame_loc.disp) },
});
},
.vector => {
assert(frame_loc.disp == 0);
try lower.emit(tag, &.{
.{ .reg = rm.r },
.{ .reg = frame_loc.base },
.{ .reg = .zero },
});
},
}
},
.pseudo_store_rm => {
const src_reg = rm.r;
const src_reg_class = src_reg.class();
const float = src_reg_class == .float;
// TODO: do we actually need this? are all stores not usize?
const dest_size = rm.m.mod.size;
const tag: Encoding.Mnemonic = if (!float)
switch (dest_size) {
const tag: Encoding.Mnemonic = switch (src_reg_class) {
.int => switch (dest_size) {
.byte => .sb,
.hword => .sh,
.word => .sw,
.dword => .sd,
}
else switch (dest_size) {
.byte => unreachable, // Zig does not support 8-bit floats
.hword => return lower.fail("TODO: lowerMir pseudo_load_rm support 16-bit floats", .{}),
.word => .fsw,
.dword => .fsd,
},
.float => switch (dest_size) {
.byte => unreachable, // Zig does not support 8-bit floats
.hword => return lower.fail("TODO: lowerMir pseudo_store_rm support 16-bit floats", .{}),
.word => .fsw,
.dword => .fsd,
},
.vector => switch (dest_size) {
.byte => .vse8v,
.hword => .vse16v,
.word => .vse32v,
.dword => .vse64v,
},
};
try lower.emit(tag, &.{
.{ .reg = frame_loc.base },
.{ .reg = rm.r },
.{ .imm = Immediate.s(frame_loc.disp) },
});
switch (src_reg_class) {
.int, .float => {
try lower.emit(tag, &.{
.{ .reg = frame_loc.base },
.{ .reg = rm.r },
.{ .imm = Immediate.s(frame_loc.disp) },
});
},
.vector => {
assert(frame_loc.disp == 0);
try lower.emit(tag, &.{
.{ .reg = rm.r },
.{ .reg = frame_loc.base },
.{ .reg = .zero },
});
},
}
},
else => unreachable,
}
@ -138,34 +178,47 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
const dst_class = rr.rd.class();
const src_class = rr.rs.class();
assert(dst_class == src_class);
switch (dst_class) {
.float => {
try lower.emit(if (lower.hasFeature(.d)) .fsgnjnd else .fsgnjns, &.{
.{ .reg = rr.rd },
.{ .reg = rr.rs },
.{ .reg = rr.rs },
});
switch (src_class) {
.float => switch (dst_class) {
.float => {
try lower.emit(if (lower.hasFeature(.d)) .fsgnjnd else .fsgnjns, &.{
.{ .reg = rr.rd },
.{ .reg = rr.rs },
.{ .reg = rr.rs },
});
},
.int, .vector => return lower.fail("TODO: lowerMir pseudo_mv float -> {s}", .{@tagName(dst_class)}),
},
.int => {
try lower.emit(.addi, &.{
.{ .reg = rr.rd },
.{ .reg = rr.rs },
.{ .imm = Immediate.s(0) },
});
.int => switch (dst_class) {
.int => {
try lower.emit(.addi, &.{
.{ .reg = rr.rd },
.{ .reg = rr.rs },
.{ .imm = Immediate.s(0) },
});
},
.vector => {
try lower.emit(.vmvvx, &.{
.{ .reg = rr.rd },
.{ .reg = rr.rs },
.{ .reg = .x0 },
});
},
.float => return lower.fail("TODO: lowerMir pseudo_mv int -> {s}", .{@tagName(dst_class)}),
},
.vector => switch (dst_class) {
.int => {
try lower.emit(.vadcvv, &.{
.{ .reg = rr.rd },
.{ .reg = .zero },
.{ .reg = rr.rs },
});
},
.float, .vector => return lower.fail("TODO: lowerMir pseudo_mv vector -> {s}", .{@tagName(dst_class)}),
},
}
},
.pseudo_ret => {
try lower.emit(.jalr, &.{
.{ .reg = .zero },
.{ .reg = .ra },
.{ .imm = Immediate.s(0) },
});
},
.pseudo_j => {
try lower.emit(.jal, &.{
.{ .reg = .zero },
@ -204,7 +257,10 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
const rm = inst.data.rm;
assert(rm.r.class() == .int);
const frame = rm.m.toFrameLoc(lower.mir);
const frame: Mir.FrameLoc = if (options.allow_frame_locs)
rm.m.toFrameLoc(lower.mir)
else
.{ .base = .s0, .disp = 0 };
try lower.emit(.addi, &.{
.{ .reg = rm.r },
@ -371,6 +427,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
});
},
},
.vector => return lower.fail("TODO: lowerMir pseudo_cmp vector", .{}),
}
},
@ -378,7 +435,14 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
const rr = inst.data.rr;
assert(rr.rs.class() == .int and rr.rd.class() == .int);
try lower.emit(.xori, &.{
// mask out any other bits that aren't the boolean
try lower.emit(.andi, &.{
.{ .reg = rr.rs },
.{ .reg = rr.rs },
.{ .imm = Immediate.s(1) },
});
try lower.emit(.sltiu, &.{
.{ .reg = rr.rd },
.{ .reg = rr.rs },
.{ .imm = Immediate.s(1) },
@ -405,6 +469,44 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
});
},
.pseudo_amo => {
const amo = inst.data.amo;
const is_d = amo.ty.abiSize(pt) == 8;
const is_un = amo.ty.isUnsignedInt(pt.zcu);
const mnem: Encoding.Mnemonic = switch (amo.op) {
// zig fmt: off
.SWAP => if (is_d) .amoswapd else .amoswapw,
.ADD => if (is_d) .amoaddd else .amoaddw,
.AND => if (is_d) .amoandd else .amoandw,
.OR => if (is_d) .amoord else .amoorw,
.XOR => if (is_d) .amoxord else .amoxorw,
.MAX => if (is_d) if (is_un) .amomaxud else .amomaxd else if (is_un) .amomaxuw else .amomaxw,
.MIN => if (is_d) if (is_un) .amominud else .amomind else if (is_un) .amominuw else .amominw,
// zig fmt: on
};
try lower.emit(mnem, &.{
.{ .reg = inst.data.amo.rd },
.{ .reg = inst.data.amo.rs1 },
.{ .reg = inst.data.amo.rs2 },
.{ .barrier = inst.data.amo.rl },
.{ .barrier = inst.data.amo.aq },
});
},
.pseudo_fence => {
const fence = inst.data.fence;
try lower.emit(switch (fence.fm) {
.tso => .fencetso,
.none => .fence,
}, &.{
.{ .barrier = fence.succ },
.{ .barrier = fence.pred },
});
},
else => return lower.fail("TODO lower: psuedo {s}", .{@tagName(inst.ops)}),
},
}
@ -447,6 +549,11 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
.{ .reg = inst.data.r_type.rs1 },
.{ .reg = inst.data.r_type.rs2 },
},
.csr => &.{
.{ .csr = inst.data.csr.csr },
.{ .reg = inst.data.csr.rs1 },
.{ .reg = inst.data.csr.rd },
},
else => return lower.fail("TODO: generic lower ops {s}", .{@tagName(inst.ops)}),
});
}
@ -473,17 +580,22 @@ fn pushPopRegList(lower: *Lower, comptime spilling: bool, reg_list: Mir.Register
while (it.next()) |i| {
const frame = lower.mir.frame_locs.get(@intFromEnum(bits.FrameIndex.spill_frame));
const reg = abi.Registers.all_preserved[i];
const reg_class = reg.class();
const is_float_reg = reg_class == .float;
const load_inst: Encoding.Mnemonic, const store_inst: Encoding.Mnemonic = switch (reg_class) {
.int => .{ .ld, .sd },
.float => .{ .fld, .fsd },
.vector => unreachable,
};
if (spilling) {
try lower.emit(if (is_float_reg) .fsd else .sd, &.{
try lower.emit(store_inst, &.{
.{ .reg = frame.base },
.{ .reg = abi.Registers.all_preserved[i] },
.{ .imm = Immediate.s(frame.disp + reg_i) },
});
} else {
try lower.emit(if (is_float_reg) .fld else .ld, &.{
try lower.emit(load_inst, &.{
.{ .reg = abi.Registers.all_preserved[i] },
.{ .reg = frame.base },
.{ .imm = Immediate.s(frame.disp + reg_i) },

View File

@ -31,6 +31,7 @@ pub const Inst = struct {
@"and",
andi,
xori,
xor,
@"or",
@ -133,6 +134,19 @@ pub const Inst = struct {
fltd,
fled,
// Zicsr Extension Instructions
csrrs,
// V Extension Instructions
vsetvli,
vsetivli,
vsetvl,
vaddvv,
vfaddvv,
vsubvv,
vfsubvv,
vslidedownvx,
/// A pseudo-instruction. Used for anything that isn't 1:1 with an
/// assembly instruction.
pseudo,
@ -142,91 +156,57 @@ pub const Inst = struct {
/// this union. `Ops` determines which union field is active, as well as
/// how to interpret the data within.
pub const Data = union {
/// No additional data
///
/// Used by e.g. ebreak
nop: void,
/// Another instruction.
///
/// Used by e.g. b
inst: Index,
/// Index into `extra`. Meaning of what can be found there is context-dependent.
///
/// Used by e.g. load_memory
payload: u32,
r_type: struct {
rd: Register,
rs1: Register,
rs2: Register,
},
i_type: struct {
rd: Register,
rs1: Register,
imm12: Immediate,
},
s_type: struct {
rs1: Register,
rs2: Register,
imm5: Immediate,
imm7: Immediate,
},
b_type: struct {
rs1: Register,
rs2: Register,
inst: Inst.Index,
},
u_type: struct {
rd: Register,
imm20: Immediate,
},
j_type: struct {
rd: Register,
inst: Inst.Index,
},
/// Debug info: line and column
///
/// Used by e.g. pseudo_dbg_line
pseudo_dbg_line_column: struct {
line: u32,
column: u32,
},
// Custom types to be lowered
/// Register + Memory
rm: struct {
r: Register,
m: Memory,
},
reg_list: Mir.RegisterList,
/// A register
///
/// Used by e.g. blr
reg: Register,
/// Two registers
///
/// Used by e.g. mv
rr: struct {
rd: Register,
rs: Register,
},
fabs: struct {
rd: Register,
rs: Register,
bits: u16,
},
compare: struct {
rd: Register,
rs1: Register,
@ -241,11 +221,32 @@ pub const Inst = struct {
},
ty: Type,
},
reloc: struct {
atom_index: u32,
sym_index: u32,
},
fence: struct {
pred: Barrier,
succ: Barrier,
fm: enum {
none,
tso,
},
},
amo: struct {
rd: Register,
rs1: Register,
rs2: Register,
aq: Barrier,
rl: Barrier,
op: AmoOp,
ty: Type,
},
csr: struct {
csr: CSR,
rs1: Register,
rd: Register,
},
};
pub const Ops = enum {
@ -270,6 +271,9 @@ pub const Inst = struct {
/// Another instruction.
inst,
/// Control and Status Register Instruction.
csr,
/// Pseudo-instruction that will generate a backpatched
/// function prologue.
pseudo_prologue,
@ -298,11 +302,6 @@ pub const Inst = struct {
/// Uses `rm` payload.
pseudo_lea_rm,
/// Shorthand for returning, aka jumping to ra register.
///
/// Uses nop payload.
pseudo_ret,
/// Jumps. Uses `inst` payload.
pseudo_j,
@ -326,19 +325,19 @@ pub const Inst = struct {
pseudo_spill_regs,
pseudo_compare,
/// NOT operation on booleans. Does an `andi reg, reg, 1` to mask out any other bits from the boolean.
pseudo_not,
/// Generates an auipc + jalr pair, with a R_RISCV_CALL_PLT reloc
pseudo_extern_fn_reloc,
};
// Make sure we don't accidentally make instructions bigger than expected.
// Note that in Debug builds, Zig is allowed to insert a secret field for safety checks.
// comptime {
// if (builtin.mode != .Debug) {
// assert(@sizeOf(Inst) == 8);
// }
// }
/// IORW, IORW
pseudo_fence,
/// Ordering, Src, Addr, Dest
pseudo_amo,
};
pub fn format(
inst: Inst,
@ -365,6 +364,28 @@ pub const FrameLoc = struct {
disp: i32,
};
pub const Barrier = enum(u4) {
// Fence
w = 0b0001,
r = 0b0010,
rw = 0b0011,
// Amo
none,
aq,
rl,
};
pub const AmoOp = enum(u5) {
SWAP,
ADD,
AND,
OR,
XOR,
MAX,
MIN,
};
/// Returns the requested data, as well as the new index which is at the start of the
/// trailers for the object.
pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end: usize } {
@ -437,6 +458,7 @@ const assert = std.debug.assert;
const bits = @import("bits.zig");
const Register = bits.Register;
const CSR = bits.CSR;
const Immediate = bits.Immediate;
const Memory = bits.Memory;
const FrameIndex = bits.FrameIndex;

View File

@ -193,6 +193,15 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
}
return memory_class;
},
.Vector => {
// we pass vectors through integer registers if they are small enough to fit.
const vec_bits = ty.totalVectorBits(pt);
if (vec_bits <= 64) {
result[0] = .integer;
return result;
}
return memory_class;
},
else => |bad_ty| std.debug.panic("classifySystem {s}", .{@tagName(bad_ty)}),
}
}
@ -254,15 +263,15 @@ fn classifyStruct(
}
}
const allocatable_registers = Registers.Integer.all_regs ++ Registers.Float.all_regs;
const allocatable_registers = Registers.Integer.all_regs ++ Registers.Float.all_regs ++ Registers.Vector.all_regs;
pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, &allocatable_registers);
// Register classes
const RegisterBitSet = RegisterManager.RegisterBitSet;
pub const RegisterClass = enum {
int,
float,
vector,
};
pub const Registers = struct {
@ -322,6 +331,19 @@ pub const Registers = struct {
pub const all_regs = callee_preserved_regs ++ function_arg_regs ++ temporary_regs;
};
pub const Vector = struct {
pub const general_purpose = initRegBitSet(Integer.all_regs.len + Float.all_regs.len, all_regs.len);
// zig fmt: off
pub const all_regs = [_]Register{
.v0, .v1, .v2, .v3, .v4, .v5, .v6, .v7,
.v8, .v9, .v10, .v11, .v12, .v13, .v14, .v15,
.v16, .v17, .v18, .v19, .v20, .v21, .v22, .v23,
.v24, .v25, .v26, .v27, .v28, .v29, .v30, .v31,
};
// zig fmt: on
};
};
fn initRegBitSet(start: usize, length: usize) RegisterBitSet {

View File

@ -41,7 +41,7 @@ pub const Memory = struct {
2...2 => .hword,
3...4 => .word,
5...8 => .dword,
else => unreachable,
else => std.debug.panic("fromByteSize {}", .{size}),
};
}
@ -128,6 +128,12 @@ pub const Immediate = union(enum) {
}
};
pub const CSR = enum(u12) {
vl = 0xC20,
vtype = 0xC21,
vlenb = 0xC22,
};
pub const Register = enum(u8) {
// zig fmt: off
@ -169,6 +175,13 @@ pub const Register = enum(u8) {
f16, f17, f18, f19, f20, f21, f22, f23,
f24, f25, f26, f27, f28, f29, f30, f31,
// V extension registers
v0, v1, v2, v3, v4, v5, v6, v7,
v8, v9, v10, v11, v12, v13, v14, v15,
v16, v17, v18, v19, v20, v21, v22, v23,
v24, v25, v26, v27, v28, v29, v30, v31,
// zig fmt: on
/// in RISC-V registers are stored as 5 bit IDs and a register can have
@ -180,11 +193,12 @@ pub const Register = enum(u8) {
/// The goal of this function is to return the same ID for `zero` and `x0` but two
/// seperate IDs for `x0` and `f0`. We will assume that each register set has 32 registers
/// and is repeated twice, once for the named version, once for the number version.
pub fn id(reg: Register) u7 {
pub fn id(reg: Register) u8 {
const base = switch (@intFromEnum(reg)) {
// zig fmt: off
@intFromEnum(Register.zero) ... @intFromEnum(Register.x31) => @intFromEnum(Register.zero),
@intFromEnum(Register.ft0) ... @intFromEnum(Register.f31) => @intFromEnum(Register.ft0),
@intFromEnum(Register.v0) ... @intFromEnum(Register.v31) => @intFromEnum(Register.v0),
else => unreachable,
// zig fmt: on
};
@ -207,6 +221,7 @@ pub const Register = enum(u8) {
// zig fmt: off
@intFromEnum(Register.zero) ... @intFromEnum(Register.x31) => 64,
@intFromEnum(Register.ft0) ... @intFromEnum(Register.f31) => if (Target.riscv.featureSetHas(features, .d)) 64 else 32,
@intFromEnum(Register.v0) ... @intFromEnum(Register.v31) => 256, // TODO: look at suggestVectorSize
else => unreachable,
// zig fmt: on
};
@ -217,6 +232,7 @@ pub const Register = enum(u8) {
// zig fmt: off
@intFromEnum(Register.zero) ... @intFromEnum(Register.x31) => .int,
@intFromEnum(Register.ft0) ... @intFromEnum(Register.f31) => .float,
@intFromEnum(Register.v0) ... @intFromEnum(Register.v31) => .vector,
else => unreachable,
// zig fmt: on
};
@ -272,3 +288,27 @@ pub const Symbol = struct {
/// Index into the linker's symbol table.
sym_index: u32,
};
pub const VType = packed struct(u8) {
vlmul: VlMul,
vsew: VSew,
vta: bool,
vma: bool,
};
const VSew = enum(u3) {
@"8" = 0b000,
@"16" = 0b001,
@"32" = 0b010,
@"64" = 0b011,
};
const VlMul = enum(u3) {
mf8 = 0b101,
mf4 = 0b110,
mf2 = 0b111,
m1 = 0b000,
m2 = 0b001,
m4 = 0b010,
m8 = 0b011,
};

View File

@ -1,26 +1,30 @@
pub const Instruction = struct {
encoding: Encoding,
ops: [3]Operand = .{.none} ** 3,
ops: [5]Operand = .{.none} ** 5,
pub const Operand = union(enum) {
none,
reg: Register,
csr: CSR,
mem: Memory,
imm: Immediate,
barrier: Mir.Barrier,
};
pub fn new(mnemonic: Encoding.Mnemonic, ops: []const Operand) !Instruction {
const encoding = (try Encoding.findByMnemonic(mnemonic, ops)) orelse {
std.log.err("no encoding found for: {s} [{s} {s} {s}]", .{
std.log.err("no encoding found for: {s} [{s} {s} {s} {s} {s}]", .{
@tagName(mnemonic),
@tagName(if (ops.len > 0) ops[0] else .none),
@tagName(if (ops.len > 1) ops[1] else .none),
@tagName(if (ops.len > 2) ops[2] else .none),
@tagName(if (ops.len > 3) ops[3] else .none),
@tagName(if (ops.len > 4) ops[4] else .none),
});
return error.InvalidInstruction;
};
var result_ops: [3]Operand = .{.none} ** 3;
var result_ops: [5]Operand = .{.none} ** 5;
@memcpy(result_ops[0..ops.len], ops);
return .{
@ -53,7 +57,9 @@ pub const Instruction = struct {
.none => unreachable, // it's sliced out above
.reg => |reg| try writer.writeAll(@tagName(reg)),
.imm => |imm| try writer.print("{d}", .{imm.asSigned(64)}),
.mem => unreachable, // there is no "mem" operand in the actual instructions
.mem => try writer.writeAll("mem"),
.barrier => |barrier| try writer.writeAll(@tagName(barrier)),
.csr => |csr| try writer.writeAll(@tagName(csr)),
}
}
}
@ -67,6 +73,7 @@ const bits = @import("bits.zig");
const Encoding = @import("Encoding.zig");
const Register = bits.Register;
const CSR = bits.CSR;
const Memory = bits.Memory;
const Immediate = bits.Immediate;

View File

@ -65,6 +65,8 @@ pub const Reloc = struct {
};
};
const Options = struct { allow_frame_locs: bool };
/// The returned slice is overwritten by the next call to lowerMir.
pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
insts: []const Instruction,

View File

@ -987,8 +987,9 @@ pub fn genTypedValue(
log.debug("genTypedValue: val = {}", .{val.fmtValue(pt)});
if (val.isUndef(zcu))
if (val.isUndef(zcu)) {
return GenResult.mcv(.undef);
}
const owner_decl = zcu.declPtr(owner_decl_index);
const namespace = zcu.namespacePtr(owner_decl.src_namespace);

View File

@ -540,8 +540,8 @@ inline fn isGlobal(index: Symbol.Index) bool {
pub fn symbol(self: ZigObject, index: Symbol.Index) Symbol.Index {
const actual_index = index & symbol_mask;
if (isGlobal(index)) return self.global_symbols.items[actual_index];
return self.local_symbols.items[actual_index];
if (isGlobal(index)) return self.globals()[actual_index];
return self.locals()[actual_index];
}
pub fn elfSym(self: *ZigObject, index: Symbol.Index) *elf.Elf64_Sym {
@ -1334,11 +1334,15 @@ fn lowerConst(
const sym_index = try self.addAtom(elf_file);
const res = try codegen.generateSymbol(&elf_file.base, pt, src_loc, val, &code_buffer, .{
.none = {},
}, .{
.parent_atom_index = sym_index,
});
const res = try codegen.generateSymbol(
&elf_file.base,
pt,
src_loc,
val,
&code_buffer,
.{ .none = {} },
.{ .parent_atom_index = sym_index },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| return .{ .fail = em },

View File

@ -580,7 +580,6 @@ test "type coercion of anon struct literal to array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const U = union {
@ -1011,7 +1010,6 @@ test "union that needs padding bytes inside an array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const B = union(enum) {
D: u8,

View File

@ -42,7 +42,6 @@ test "fence" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: i32 = 1234;
@fence(.seq_cst);
@ -188,21 +187,6 @@ test "atomic store" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: u32 = 0;
@atomicStore(u32, &x, 1, .seq_cst);
try expect(@atomicLoad(u32, &x, .seq_cst) == 1);
@atomicStore(u32, &x, 12345678, .seq_cst);
try expect(@atomicLoad(u32, &x, .seq_cst) == 12345678);
}
test "atomic store comptime" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testAtomicStore();
try testAtomicStore();
@ -451,7 +435,6 @@ test "return @atomicStore, using it as a void value" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const A = struct {

View File

@ -192,7 +192,6 @@ test "@bitCast packed structs at runtime and comptime" {
test "@bitCast extern structs at runtime and comptime" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Full = extern struct {
number: u16,
@ -227,7 +226,6 @@ test "bitcast packed struct to integer and back" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const LevelUpMove = packed struct {
move_id: u9,

View File

@ -11,7 +11,6 @@ test {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var val: u8 = undefined;
try testing.expectEqual({}, @atomicStore(u8, &val, 0, .unordered));

View File

@ -908,7 +908,6 @@ test "enum literal casting to tagged union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Arch = union(enum) {
x86_64,

View File

@ -535,7 +535,6 @@ test "return result loc as peer result loc in inferred error set function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {

View File

@ -395,7 +395,6 @@ test "return 0 from function that has u0 return type" {
test "statically initialized struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
st_init_str_foo.x += 1;
try expect(st_init_str_foo.x == 14);
@ -446,7 +445,6 @@ test "binary math operator in partially inlined function" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var s: [4]u32 = undefined;
var b: [16]u8 = undefined;

View File

@ -281,7 +281,6 @@ test "@sqrt f32/f64" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testSqrt(f32);
try comptime testSqrt(f32);

View File

@ -49,7 +49,6 @@ test "inline switch unions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: U = .a;
_ = &x;

View File

@ -1269,7 +1269,6 @@ test "@subWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var a: u8 = 1;

View File

@ -397,7 +397,6 @@ test "array of optional unaligned types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Enum = enum { one, two, three };

View File

@ -785,7 +785,6 @@ test "nested packed struct field access test" {
test "nested packed struct at non-zero offset" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Pair = packed struct(u24) {
a: u16 = 0,

View File

@ -28,7 +28,6 @@ fn dummy(a: bool, b: i32, c: f32) i32 {
test "reflection: @field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var f = Foo{
.one = 42,

View File

@ -875,7 +875,6 @@ test "packed struct field passed to generic function" {
test "anonymous struct literal syntax" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const Point = struct {
@ -985,7 +984,6 @@ test "struct with union field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Value = struct {
ref: u32 = 2,
@ -1368,7 +1366,6 @@ test "store to comptime field" {
test "struct field init value is size of the struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const namespace = struct {
const S = extern struct {

View File

@ -256,7 +256,6 @@ test "switch on enum using pointer capture" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testSwitchEnumPtrCapture();
try comptime testSwitchEnumPtrCapture();
@ -693,7 +692,6 @@ test "switch capture copies its payload" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {

View File

@ -27,7 +27,6 @@ test "this refer to module call private fn" {
test "this refer to container" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var pt: Point(i32) = undefined;
pt.x = 12;

View File

@ -131,7 +131,6 @@ test "tuple initializer for var" {
test "array-like initializer for tuple types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const T = @Type(.{
.Struct = .{

View File

@ -383,7 +383,6 @@ test "Type.Union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Untagged = @Type(.{
.Union = .{

View File

@ -43,7 +43,6 @@ test "basic unions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var foo = Foo{ .int = 1 };
try expect(foo.int == 1);
@ -276,7 +275,6 @@ test "comparison between union and enum literal" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testComparison();
try comptime testComparison();
@ -292,7 +290,6 @@ test "cast union to tag type of union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCastUnionToTag();
try comptime testCastUnionToTag();
@ -314,7 +311,6 @@ test "cast tag type of union to union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: Value2 = Letter2.B;
_ = &x;
@ -331,7 +327,6 @@ test "implicit cast union to its tag type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: Value2 = Letter2.B;
_ = &x;
@ -353,7 +348,6 @@ test "constant packed union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testConstPackedUnion(&[_]PackThis{PackThis{ .StringLiteral = 1 }});
}
@ -503,7 +497,6 @@ test "initialize global array of union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
glbl_array[1] = FooUnion{ .U1 = 2 };
glbl_array[0] = FooUnion{ .U0 = 1 };
@ -515,7 +508,6 @@ test "update the tag value for zero-sized unions" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = union(enum) {
U0: void,
@ -636,7 +628,6 @@ test "tagged union with all void fields but a meaningful tag" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const B = union(enum) {
@ -758,7 +749,6 @@ test "@intFromEnum works on unions" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Bar = union(enum) {
A: bool,
@ -874,7 +864,6 @@ test "@unionInit can modify a union type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const UnionInitEnum = union(enum) {
Boolean: bool,
@ -898,7 +887,6 @@ test "@unionInit can modify a pointer value" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const UnionInitEnum = union(enum) {
Boolean: bool,
@ -1089,7 +1077,6 @@ test "switching on non exhaustive union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const E = enum(u8) {
@ -1199,7 +1186,6 @@ test "global variable struct contains union initialized to non-most-aligned fiel
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const T = struct {
const U = union(enum) {
@ -1352,7 +1338,6 @@ test "noreturn field in union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const U = union(enum) {
a: u32,
@ -1434,7 +1419,6 @@ test "union field ptr - zero sized payload" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const U = union {
foo: void,
@ -1449,7 +1433,6 @@ test "union field ptr - zero sized field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const U = union {
foo: void,
@ -1589,7 +1572,6 @@ test "reinterpreting enum value inside packed union" {
test "access the tag of a global tagged union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const U = union(enum) {
a,
@ -1601,7 +1583,6 @@ test "access the tag of a global tagged union" {
test "coerce enum literal to union in result loc" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const U = union(enum) {
a,
@ -1864,7 +1845,6 @@ test "reinterpret extern union" {
test "reinterpret packed union" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const U = packed union {
foo: u8,
@ -2044,7 +2024,6 @@ test "extern union initialized via reintepreted struct field initializer" {
test "packed union initialized via reintepreted struct field initializer" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd };
@ -2065,7 +2044,6 @@ test "packed union initialized via reintepreted struct field initializer" {
test "store of comptime reinterpreted memory to extern union" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd };
@ -2088,7 +2066,6 @@ test "store of comptime reinterpreted memory to extern union" {
test "store of comptime reinterpreted memory to packed union" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd };

View File

@ -97,29 +97,40 @@ test "vector int operators" {
test "vector float operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
inline for ([_]type{ f16, f32, f64, f80, f128 }) |T| {
const S = struct {
fn doTheTest() !void {
var v: @Vector(4, T) = [4]T{ 10, 20, 30, 40 };
var x: @Vector(4, T) = [4]T{ 1, 2, 3, 4 };
_ = .{ &v, &x };
try expect(mem.eql(T, &@as([4]T, v + x), &[4]T{ 11, 22, 33, 44 }));
try expect(mem.eql(T, &@as([4]T, v - x), &[4]T{ 9, 18, 27, 36 }));
try expect(mem.eql(T, &@as([4]T, v * x), &[4]T{ 10, 40, 90, 160 }));
try expect(mem.eql(T, &@as([4]T, -x), &[4]T{ -1, -2, -3, -4 }));
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
const S = struct {
fn doTheTest(T: type) !void {
var v: @Vector(4, T) = .{ 10, 20, 30, 40 };
var x: @Vector(4, T) = .{ 1, 2, 3, 4 };
_ = .{ &v, &x };
try expectEqual(v + x, .{ 11, 22, 33, 44 });
try expectEqual(v - x, .{ 9, 18, 27, 36 });
try expectEqual(v * x, .{ 10, 40, 90, 160 });
try expectEqual(-x, .{ -1, -2, -3, -4 });
}
};
try S.doTheTest(f32);
try comptime S.doTheTest(f32);
try S.doTheTest(f64);
try comptime S.doTheTest(f64);
try S.doTheTest(f16);
try comptime S.doTheTest(f16);
try S.doTheTest(f80);
try comptime S.doTheTest(f80);
try S.doTheTest(f128);
try comptime S.doTheTest(f128);
}
test "vector bit operators" {
@ -1245,7 +1256,6 @@ test "array of vectors is copied" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Vec3 = @Vector(3, i32);
var points = [_]Vec3{
@ -1316,6 +1326,7 @@ test "zero multiplicand" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const zeros = @Vector(2, u32){ 0.0, 0.0 };
var ones = @Vector(2, u32){ 1.0, 1.0 };
@ -1410,7 +1421,6 @@ test "store to vector in slice" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var v = [_]@Vector(3, f32){
.{ 1, 1, 1 },
@ -1608,7 +1618,6 @@ test "@reduce on bool vector" {
test "bitcast vector to array of smaller vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const u8x32 = @Vector(32, u8);
const u8x64 = @Vector(64, u8);

View File

@ -436,11 +436,12 @@ const test_targets = blk: {
//},
.{
.target = .{
.cpu_arch = .riscv64,
.os_tag = .linux,
.abi = .musl,
},
.target = std.Target.Query.parse(
.{
.arch_os_abi = "riscv64-linux-musl",
.cpu_features = "baseline+v",
},
) catch @panic("OOM"),
.use_llvm = false,
.use_lld = false,
},