Merge pull request #19431 from Rexicon226/revive-riscv

revive STAGE2 RISCV64 backend
This commit is contained in:
Jakub Konka 2024-05-11 21:35:18 +02:00 committed by GitHub
commit 86d8688c7f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
135 changed files with 6607 additions and 1697 deletions

View File

@ -12,6 +12,8 @@ var cmdline_buffer: [4096]u8 = undefined;
var fba = std.heap.FixedBufferAllocator.init(&cmdline_buffer);
pub fn main() void {
if (builtin.zig_backend == .stage2_riscv64) return mainExtraSimple() catch @panic("test failure");
if (builtin.zig_backend == .stage2_aarch64) {
return mainSimple() catch @panic("test failure");
}
@ -247,3 +249,19 @@ pub fn mainSimple() anyerror!void {
if (failed != 0) std.process.exit(1);
}
}
pub fn mainExtraSimple() !void {
var fail_count: u8 = 0;
for (builtin.test_functions) |test_fn| {
test_fn.func() catch |err| {
if (err != error.SkipZigTest) {
fail_count += 1;
continue;
}
continue;
};
}
if (fail_count != 0) std.process.exit(1);
}

View File

@ -766,7 +766,6 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr
builtin.zig_backend == .stage2_aarch64 or
builtin.zig_backend == .stage2_x86 or
(builtin.zig_backend == .stage2_x86_64 and (builtin.target.ofmt != .elf and builtin.target.ofmt != .macho)) or
builtin.zig_backend == .stage2_riscv64 or
builtin.zig_backend == .stage2_sparc64 or
builtin.zig_backend == .stage2_spirv64)
{
@ -774,6 +773,19 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr
@breakpoint();
}
}
if (builtin.zig_backend == .stage2_riscv64) {
asm volatile ("ecall"
:
: [number] "{a7}" (64),
[arg1] "{a0}" (1),
[arg2] "{a1}" (@intFromPtr(msg.ptr)),
[arg3] "{a2}" (msg.len),
: "memory"
);
std.posix.exit(127);
}
switch (builtin.os.tag) {
.freestanding => {
while (true) {

View File

@ -1236,15 +1236,7 @@ pub const DwarfInfo = struct {
const opcode_base = try fbr.readByte();
const standard_opcode_lengths = try allocator.alloc(u8, opcode_base - 1);
defer allocator.free(standard_opcode_lengths);
{
var i: usize = 0;
while (i < opcode_base - 1) : (i += 1) {
standard_opcode_lengths[i] = try fbr.readByte();
}
}
const standard_opcode_lengths = try fbr.readBytes(opcode_base - 1);
var include_directories = std.ArrayList(FileEntry).init(allocator);
defer include_directories.deinit();

View File

@ -638,6 +638,8 @@ test lessThan {
const backend_can_use_eql_bytes = switch (builtin.zig_backend) {
// The SPIR-V backend does not support the optimized path yet.
.stage2_spirv64 => false,
// The RISC-V does not support vectors.
.stage2_riscv64 => false,
else => true,
};

View File

@ -20,10 +20,10 @@ pub const simplified_logic =
builtin.zig_backend == .stage2_x86 or
builtin.zig_backend == .stage2_aarch64 or
builtin.zig_backend == .stage2_arm or
builtin.zig_backend == .stage2_riscv64 or
builtin.zig_backend == .stage2_sparc64 or
builtin.cpu.arch == .spirv32 or
builtin.cpu.arch == .spirv64;
builtin.cpu.arch == .spirv64 or
builtin.zig_backend == .stage2_riscv64;
comptime {
// No matter what, we import the root file, so that any export, test, comptime
@ -43,6 +43,10 @@ comptime {
} else if (builtin.os.tag == .opencl) {
if (@hasDecl(root, "main"))
@export(spirvMain2, .{ .name = "main" });
} else if (native_arch.isRISCV()) {
if (!@hasDecl(root, "_start")) {
@export(riscv_start, .{ .name = "_start" });
}
} else {
if (!@hasDecl(root, "_start")) {
@export(_start2, .{ .name = "_start" });
@ -151,14 +155,6 @@ fn exit2(code: usize) noreturn {
: "memory", "cc"
);
},
.riscv64 => {
asm volatile ("ecall"
:
: [number] "{a7}" (94),
[arg1] "{a0}" (0),
: "rcx", "r11", "memory"
);
},
.sparc64 => {
asm volatile ("ta 0x6d"
:
@ -212,6 +208,23 @@ fn wasi_start() callconv(.C) void {
}
}
fn riscv_start() callconv(.C) noreturn {
std.process.exit(switch (@typeInfo(@typeInfo(@TypeOf(root.main)).Fn.return_type.?)) {
.NoReturn => root.main(),
.Void => ret: {
root.main();
break :ret 0;
},
.Int => |info| ret: {
if (info.bits != 8 or info.signedness == .signed) {
@compileError(bad_main_ret);
}
break :ret root.main();
},
else => @compileError("expected return type of main to be 'void', 'noreturn', 'u8'"),
});
}
fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) callconv(.C) usize {
uefi.handle = handle;
uefi.system_table = system_table;

View File

@ -22,7 +22,7 @@ pub var base_allocator_instance = std.heap.FixedBufferAllocator.init("");
pub var log_level = std.log.Level.warn;
// Disable printing in tests for simple backends.
pub const backend_can_print = builtin.zig_backend != .stage2_spirv64;
pub const backend_can_print = !(builtin.zig_backend == .stage2_spirv64 or builtin.zig_backend == .stage2_riscv64);
fn print(comptime fmt: []const u8, args: anytype) void {
if (@inComptime()) {

View File

@ -10499,9 +10499,10 @@ fn intCast(
const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_scalar_ty);
const dest_max_val = try sema.splat(operand_ty, dest_max_val_scalar);
const dest_max = Air.internedToRef(dest_max_val.toIntern());
const diff = try block.addBinOp(.sub_wrap, dest_max, operand);
if (actual_info.signedness == .signed) {
const diff = try block.addBinOp(.sub_wrap, dest_max, operand);
// Reinterpret the sign-bit as part of the value. This will make
// negative differences (`operand` > `dest_max`) appear too big.
const unsigned_scalar_operand_ty = try mod.intType(.unsigned, actual_bits);
@ -10542,7 +10543,7 @@ fn intCast(
try sema.addSafetyCheck(block, src, ok, .cast_truncated_data);
} else {
const ok = if (is_vector) ok: {
const is_in_range = try block.addCmpVector(diff, dest_max, .lte);
const is_in_range = try block.addCmpVector(operand, dest_max, .lte);
const all_in_range = try block.addInst(.{
.tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
@ -10552,7 +10553,7 @@ fn intCast(
});
break :ok all_in_range;
} else ok: {
const is_in_range = try block.addBinOp(.cmp_lte, diff, dest_max);
const is_in_range = try block.addBinOp(.cmp_lte, operand, dest_max);
break :ok is_in_range;
};
try sema.addSafetyCheck(block, src, ok, .cast_truncated_data);

File diff suppressed because it is too large Load Diff

View File

@ -1,25 +1,7 @@
//! This file contains the functionality for lowering RISCV64 MIR into
//! machine code
//! This file contains the functionality for emitting RISC-V MIR as machine code
const Emit = @This();
const std = @import("std");
const math = std.math;
const Mir = @import("Mir.zig");
const bits = @import("bits.zig");
const link = @import("../../link.zig");
const Module = @import("../../Module.zig");
const ErrorMsg = Module.ErrorMsg;
const assert = std.debug.assert;
const Instruction = bits.Instruction;
const Register = bits.Register;
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
mir: Mir,
bin_file: *link.File,
lower: Lower,
debug_output: DebugInfoOutput,
target: *const std.Target,
err_msg: ?*ErrorMsg = null,
src_loc: Module.SrcLoc,
code: *std.ArrayList(u8),
prev_di_line: u32,
@ -27,195 +9,185 @@ prev_di_column: u32,
/// Relative to the beginning of `code`.
prev_di_pc: usize,
const InnerError = error{
OutOfMemory,
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{},
relocs: std.ArrayListUnmanaged(Reloc) = .{},
pub const Error = Lower.Error || error{
EmitFail,
};
pub fn emitMir(
emit: *Emit,
) InnerError!void {
const mir_tags = emit.mir.instructions.items(.tag);
pub fn emitMir(emit: *Emit) Error!void {
log.debug("mir instruction len: {}", .{emit.lower.mir.instructions.len});
for (0..emit.lower.mir.instructions.len) |mir_i| {
const mir_index: Mir.Inst.Index = @intCast(mir_i);
try emit.code_offset_mapping.putNoClobber(
emit.lower.allocator,
mir_index,
@intCast(emit.code.items.len),
);
const lowered = try emit.lower.lowerMir(mir_index);
var lowered_relocs = lowered.relocs;
for (lowered.insts, 0..) |lowered_inst, lowered_index| {
const start_offset: u32 = @intCast(emit.code.items.len);
try lowered_inst.encode(emit.code.writer());
// Emit machine code
for (mir_tags, 0..) |tag, index| {
const inst = @as(u32, @intCast(index));
switch (tag) {
.add => try emit.mirRType(inst),
.sub => try emit.mirRType(inst),
while (lowered_relocs.len > 0 and
lowered_relocs[0].lowered_inst_index == lowered_index) : ({
lowered_relocs = lowered_relocs[1..];
}) switch (lowered_relocs[0].target) {
.inst => |target| try emit.relocs.append(emit.lower.allocator, .{
.source = start_offset,
.target = target,
.offset = 0,
.enc = std.meta.activeTag(lowered_inst.encoding.data),
}),
.load_symbol_reloc => |symbol| {
if (emit.lower.bin_file.cast(link.File.Elf)) |elf_file| {
const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?;
const sym_index = elf_file.zigObjectPtr().?.symbol(symbol.sym_index);
const sym = elf_file.symbol(sym_index);
.addi => try emit.mirIType(inst),
.jalr => try emit.mirIType(inst),
.ld => try emit.mirIType(inst),
.sd => try emit.mirIType(inst),
var hi_r_type: u32 = @intFromEnum(std.elf.R_RISCV.HI20);
var lo_r_type: u32 = @intFromEnum(std.elf.R_RISCV.LO12_I);
.ebreak => try emit.mirSystem(inst),
.ecall => try emit.mirSystem(inst),
.unimp => try emit.mirSystem(inst),
if (sym.flags.needs_zig_got) {
_ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
.dbg_line => try emit.mirDbgLine(inst),
hi_r_type = Elf.R_ZIG_GOT_HI20;
lo_r_type = Elf.R_ZIG_GOT_LO12;
}
.dbg_prologue_end => try emit.mirDebugPrologueEnd(),
.dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(),
try atom_ptr.addReloc(elf_file, .{
.r_offset = start_offset,
.r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | hi_r_type,
.r_addend = 0,
});
.mv => try emit.mirRR(inst),
try atom_ptr.addReloc(elf_file, .{
.r_offset = start_offset + 4,
.r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | lo_r_type,
.r_addend = 0,
});
} else return emit.fail("TODO: load_symbol_reloc non-ELF", .{});
},
};
}
std.debug.assert(lowered_relocs.len == 0);
.nop => try emit.mirNop(inst),
.ret => try emit.mirNop(inst),
if (lowered.insts.len == 0) {
const mir_inst = emit.lower.mir.instructions.get(mir_index);
switch (mir_inst.tag) {
else => unreachable,
.pseudo => switch (mir_inst.ops) {
else => unreachable,
.pseudo_dbg_prologue_end => {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setPrologueEnd();
log.debug("mirDbgPrologueEnd (line={d}, col={d})", .{
emit.prev_di_line, emit.prev_di_column,
});
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
},
.pseudo_dbg_line_column => try emit.dbgAdvancePCAndLine(
mir_inst.data.pseudo_dbg_line_column.line,
mir_inst.data.pseudo_dbg_line_column.column,
),
.pseudo_dbg_epilogue_begin => {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setEpilogueBegin();
log.debug("mirDbgEpilogueBegin (line={d}, col={d})", .{
emit.prev_di_line, emit.prev_di_column,
});
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
},
.pseudo_dead => {},
},
}
}
}
try emit.fixupRelocs();
}
.lui => try emit.mirUType(inst),
pub fn deinit(emit: *Emit) void {
emit.relocs.deinit(emit.lower.allocator);
emit.code_offset_mapping.deinit(emit.lower.allocator);
emit.* = undefined;
}
const Reloc = struct {
/// Offset of the instruction.
source: usize,
/// Target of the relocation.
target: Mir.Inst.Index,
/// Offset of the relocation within the instruction.
offset: u32,
/// Encoding of the instruction, used to determine how to modify it.
enc: Encoding.InstEnc,
};
fn fixupRelocs(emit: *Emit) Error!void {
for (emit.relocs.items) |reloc| {
log.debug("target inst: {}", .{emit.lower.mir.instructions.get(reloc.target)});
const target = emit.code_offset_mapping.get(reloc.target) orelse
return emit.fail("relocation target not found!", .{});
const disp = @as(i32, @intCast(target)) - @as(i32, @intCast(reloc.source));
const code: *[4]u8 = emit.code.items[reloc.source + reloc.offset ..][0..4];
log.debug("disp: {x}", .{disp});
switch (reloc.enc) {
.J => riscv_util.writeInstJ(code, @bitCast(disp)),
.B => riscv_util.writeInstB(code, @bitCast(disp)),
else => return emit.fail("tried to reloc encoding type {s}", .{@tagName(reloc.enc)}),
}
}
}
pub fn deinit(emit: *Emit) void {
emit.* = undefined;
}
fn writeInstruction(emit: *Emit, instruction: Instruction) !void {
const endian = emit.target.cpu.arch.endian();
std.mem.writeInt(u32, try emit.code.addManyAsArray(4), instruction.toU32(), endian);
}
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
@setCold(true);
assert(emit.err_msg == null);
const comp = emit.bin_file.comp;
const gpa = comp.gpa;
emit.err_msg = try ErrorMsg.create(gpa, emit.src_loc, format, args);
return error.EmitFail;
}
fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(self.prev_di_line));
const delta_pc: usize = self.code.items.len - self.prev_di_pc;
switch (self.debug_output) {
fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void {
const delta_line = @as(i33, line) - @as(i33, emit.prev_di_line);
const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
log.debug(" (advance pc={d} and line={d})", .{ delta_pc, delta_line });
switch (emit.debug_output) {
.dwarf => |dw| {
if (column != emit.prev_di_column) try dw.setColumn(column);
if (delta_line == 0) return; // TODO: fix these edge cases.
try dw.advancePCAndLine(delta_line, delta_pc);
self.prev_di_line = line;
self.prev_di_column = column;
self.prev_di_pc = self.code.items.len;
},
.plan9 => |dbg_out| {
if (delta_pc <= 0) return; // only do this when the pc changes
// increasing the line number
try link.File.Plan9.changeLine(&dbg_out.dbg_line, delta_line);
// increasing the pc
const d_pc_p9 = @as(i64, @intCast(delta_pc)) - dbg_out.pc_quanta;
if (d_pc_p9 > 0) {
// minus one because if its the last one, we want to leave space to change the line which is one pc quanta
try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, dbg_out.pc_quanta) + 128)) - dbg_out.pc_quanta);
if (dbg_out.pcop_change_index) |pci|
dbg_out.dbg_line.items[pci] += 1;
dbg_out.pcop_change_index = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
} else if (d_pc_p9 == 0) {
// we don't need to do anything, because adding the pc quanta does it for us
} else unreachable;
if (dbg_out.start_line == null)
dbg_out.start_line = self.prev_di_line;
dbg_out.end_line = line;
// only do this if the pc changed
self.prev_di_line = line;
self.prev_di_column = column;
self.prev_di_pc = self.code.items.len;
},
.none => {},
}
}
fn mirRType(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const r_type = emit.mir.instructions.items(.data)[inst].r_type;
switch (tag) {
.add => try emit.writeInstruction(Instruction.add(r_type.rd, r_type.rs1, r_type.rs2)),
.sub => try emit.writeInstruction(Instruction.sub(r_type.rd, r_type.rs1, r_type.rs2)),
else => unreachable,
}
}
fn mirIType(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const i_type = emit.mir.instructions.items(.data)[inst].i_type;
switch (tag) {
.addi => try emit.writeInstruction(Instruction.addi(i_type.rd, i_type.rs1, i_type.imm12)),
.jalr => try emit.writeInstruction(Instruction.jalr(i_type.rd, i_type.imm12, i_type.rs1)),
.ld => try emit.writeInstruction(Instruction.ld(i_type.rd, i_type.imm12, i_type.rs1)),
.sd => try emit.writeInstruction(Instruction.sd(i_type.rd, i_type.imm12, i_type.rs1)),
else => unreachable,
}
}
fn mirSystem(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
switch (tag) {
.ebreak => try emit.writeInstruction(Instruction.ebreak),
.ecall => try emit.writeInstruction(Instruction.ecall),
.unimp => try emit.writeInstruction(Instruction.unimp),
else => unreachable,
}
}
fn mirDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const dbg_line_column = emit.mir.instructions.items(.data)[inst].dbg_line_column;
switch (tag) {
.dbg_line => try emit.dbgAdvancePCAndLine(dbg_line_column.line, dbg_line_column.column),
else => unreachable,
}
}
fn mirDebugPrologueEnd(self: *Emit) !void {
switch (self.debug_output) {
.dwarf => |dw| {
try dw.setPrologueEnd();
try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column);
emit.prev_di_line = line;
emit.prev_di_column = column;
emit.prev_di_pc = emit.code.items.len;
},
.plan9 => {},
.none => {},
}
}
fn mirDebugEpilogueBegin(self: *Emit) !void {
switch (self.debug_output) {
.dwarf => |dw| {
try dw.setEpilogueBegin();
try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column);
},
.plan9 => {},
.none => {},
}
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) Error {
return switch (emit.lower.fail(format, args)) {
error.LowerFail => error.EmitFail,
else => |e| e,
};
}
fn mirRR(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rr = emit.mir.instructions.items(.data)[inst].rr;
const link = @import("../../link.zig");
const log = std.log.scoped(.emit);
const mem = std.mem;
const std = @import("std");
switch (tag) {
.mv => try emit.writeInstruction(Instruction.addi(rr.rd, rr.rs, 0)),
else => unreachable,
}
}
fn mirUType(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const u_type = emit.mir.instructions.items(.data)[inst].u_type;
switch (tag) {
.lui => try emit.writeInstruction(Instruction.lui(u_type.rd, u_type.imm20)),
else => unreachable,
}
}
fn mirNop(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
switch (tag) {
.nop => try emit.writeInstruction(Instruction.addi(.zero, .zero, 0)),
.ret => try emit.writeInstruction(Instruction.jalr(.zero, 0, .ra)),
else => unreachable,
}
}
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
const Emit = @This();
const Lower = @import("Lower.zig");
const Mir = @import("Mir.zig");
const riscv_util = @import("../../link/riscv.zig");
const Encoding = @import("Encoding.zig");
const Elf = @import("../../link/Elf.zig");

View File

@ -0,0 +1,403 @@
mnemonic: Mnemonic,
data: Data,
pub const Mnemonic = enum {
// I Type
ld,
lw,
lwu,
lh,
lhu,
lb,
lbu,
sltiu,
xori,
andi,
slli,
srli,
srai,
addi,
jalr,
// U Type
lui,
// S Type
sd,
sw,
sh,
sb,
// J Type
jal,
// B Type
beq,
// R Type
add,
@"and",
sub,
slt,
mul,
sltu,
xor,
// System
ecall,
ebreak,
unimp,
pub fn encoding(mnem: Mnemonic) Enc {
return switch (mnem) {
// zig fmt: off
.add => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0000000 },
.sltu => .{ .opcode = 0b0110011, .funct3 = 0b011, .funct7 = 0b0000000 },
.@"and" => .{ .opcode = 0b0110011, .funct3 = 0b111, .funct7 = 0b0000000 },
.sub => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0100000 },
.ld => .{ .opcode = 0b0000011, .funct3 = 0b011, .funct7 = null },
.lw => .{ .opcode = 0b0000011, .funct3 = 0b010, .funct7 = null },
.lwu => .{ .opcode = 0b0000011, .funct3 = 0b110, .funct7 = null },
.lh => .{ .opcode = 0b0000011, .funct3 = 0b001, .funct7 = null },
.lhu => .{ .opcode = 0b0000011, .funct3 = 0b101, .funct7 = null },
.lb => .{ .opcode = 0b0000011, .funct3 = 0b000, .funct7 = null },
.lbu => .{ .opcode = 0b0000011, .funct3 = 0b100, .funct7 = null },
.sltiu => .{ .opcode = 0b0010011, .funct3 = 0b011, .funct7 = null },
.addi => .{ .opcode = 0b0010011, .funct3 = 0b000, .funct7 = null },
.andi => .{ .opcode = 0b0010011, .funct3 = 0b111, .funct7 = null },
.xori => .{ .opcode = 0b0010011, .funct3 = 0b100, .funct7 = null },
.jalr => .{ .opcode = 0b1100111, .funct3 = 0b000, .funct7 = null },
.slli => .{ .opcode = 0b0010011, .funct3 = 0b001, .funct7 = null },
.srli => .{ .opcode = 0b0010011, .funct3 = 0b101, .funct7 = null },
.srai => .{ .opcode = 0b0010011, .funct3 = 0b101, .funct7 = null, .offset = 1 << 10 },
.lui => .{ .opcode = 0b0110111, .funct3 = null, .funct7 = null },
.sd => .{ .opcode = 0b0100011, .funct3 = 0b011, .funct7 = null },
.sw => .{ .opcode = 0b0100011, .funct3 = 0b010, .funct7 = null },
.sh => .{ .opcode = 0b0100011, .funct3 = 0b001, .funct7 = null },
.sb => .{ .opcode = 0b0100011, .funct3 = 0b000, .funct7 = null },
.jal => .{ .opcode = 0b1101111, .funct3 = null, .funct7 = null },
.beq => .{ .opcode = 0b1100011, .funct3 = 0b000, .funct7 = null },
.slt => .{ .opcode = 0b0110011, .funct3 = 0b010, .funct7 = 0b0000000 },
.xor => .{ .opcode = 0b0110011, .funct3 = 0b100, .funct7 = 0b0000000 },
.mul => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0000001 },
.ecall => .{ .opcode = 0b1110011, .funct3 = 0b000, .funct7 = null },
.ebreak => .{ .opcode = 0b1110011, .funct3 = 0b000, .funct7 = null },
.unimp => .{ .opcode = 0b0000000, .funct3 = 0b000, .funct7 = null },
// zig fmt: on
};
}
};
pub const InstEnc = enum {
R,
I,
S,
B,
U,
J,
/// extras that have unusual op counts
system,
pub fn fromMnemonic(mnem: Mnemonic) InstEnc {
return switch (mnem) {
.addi,
.ld,
.lw,
.lwu,
.lh,
.lhu,
.lb,
.lbu,
.jalr,
.sltiu,
.xori,
.andi,
.slli,
.srli,
.srai,
=> .I,
.lui,
=> .U,
.sd,
.sw,
.sh,
.sb,
=> .S,
.jal,
=> .J,
.beq,
=> .B,
.slt,
.sltu,
.mul,
.xor,
.add,
.sub,
.@"and",
=> .R,
.ecall,
.ebreak,
.unimp,
=> .system,
};
}
pub fn opsList(enc: InstEnc) [3]std.meta.FieldEnum(Operand) {
return switch (enc) {
// zig fmt: off
.R => .{ .reg, .reg, .reg, },
.I => .{ .reg, .reg, .imm, },
.S => .{ .reg, .reg, .imm, },
.B => .{ .reg, .reg, .imm, },
.U => .{ .reg, .imm, .none, },
.J => .{ .reg, .imm, .none, },
.system => .{ .none, .none, .none, },
// zig fmt: on
};
}
};
pub const Data = union(InstEnc) {
R: packed struct {
opcode: u7,
rd: u5,
funct3: u3,
rs1: u5,
rs2: u5,
funct7: u7,
},
I: packed struct {
opcode: u7,
rd: u5,
funct3: u3,
rs1: u5,
imm0_11: u12,
},
S: packed struct {
opcode: u7,
imm0_4: u5,
funct3: u3,
rs1: u5,
rs2: u5,
imm5_11: u7,
},
B: packed struct {
opcode: u7,
imm11: u1,
imm1_4: u4,
funct3: u3,
rs1: u5,
rs2: u5,
imm5_10: u6,
imm12: u1,
},
U: packed struct {
opcode: u7,
rd: u5,
imm12_31: u20,
},
J: packed struct {
opcode: u7,
rd: u5,
imm12_19: u8,
imm11: u1,
imm1_10: u10,
imm20: u1,
},
system: void,
pub fn toU32(self: Data) u32 {
return switch (self) {
.R => |v| @as(u32, @bitCast(v)),
.I => |v| @as(u32, @bitCast(v)),
.S => |v| @as(u32, @bitCast(v)),
.B => |v| @as(u32, @intCast(v.opcode)) + (@as(u32, @intCast(v.imm11)) << 7) + (@as(u32, @intCast(v.imm1_4)) << 8) + (@as(u32, @intCast(v.funct3)) << 12) + (@as(u32, @intCast(v.rs1)) << 15) + (@as(u32, @intCast(v.rs2)) << 20) + (@as(u32, @intCast(v.imm5_10)) << 25) + (@as(u32, @intCast(v.imm12)) << 31),
.U => |v| @as(u32, @bitCast(v)),
.J => |v| @as(u32, @bitCast(v)),
.system => unreachable,
};
}
pub fn construct(mnem: Mnemonic, ops: []const Operand) !Data {
const inst_enc = InstEnc.fromMnemonic(mnem);
const enc = mnem.encoding();
// special mnemonics
switch (mnem) {
.ecall,
.ebreak,
.unimp,
=> {
assert(ops.len == 0);
return .{
.I = .{
.rd = Register.zero.id(),
.rs1 = Register.zero.id(),
.imm0_11 = switch (mnem) {
.ecall => 0x000,
.ebreak => 0x001,
.unimp => 0,
else => unreachable,
},
.opcode = enc.opcode,
.funct3 = enc.funct3.?,
},
};
},
else => {},
}
switch (inst_enc) {
.R => {
assert(ops.len == 3);
return .{
.R = .{
.rd = ops[0].reg.id(),
.rs1 = ops[1].reg.id(),
.rs2 = ops[2].reg.id(),
.opcode = enc.opcode,
.funct3 = enc.funct3.?,
.funct7 = enc.funct7.?,
},
};
},
.S => {
assert(ops.len == 3);
const umm = ops[2].imm.asBits(u12);
return .{
.S = .{
.imm0_4 = @truncate(umm),
.rs1 = ops[0].reg.id(),
.rs2 = ops[1].reg.id(),
.imm5_11 = @truncate(umm >> 5),
.opcode = enc.opcode,
.funct3 = enc.funct3.?,
},
};
},
.I => {
assert(ops.len == 3);
return .{
.I = .{
.rd = ops[0].reg.id(),
.rs1 = ops[1].reg.id(),
.imm0_11 = ops[2].imm.asBits(u12) + enc.offset,
.opcode = enc.opcode,
.funct3 = enc.funct3.?,
},
};
},
.U => {
assert(ops.len == 2);
return .{
.U = .{
.rd = ops[0].reg.id(),
.imm12_31 = ops[1].imm.asBits(u20),
.opcode = enc.opcode,
},
};
},
.J => {
assert(ops.len == 2);
const umm = ops[1].imm.asBits(u21);
assert(umm % 4 == 0); // misaligned jump target
return .{
.J = .{
.rd = ops[0].reg.id(),
.imm1_10 = @truncate(umm >> 1),
.imm11 = @truncate(umm >> 11),
.imm12_19 = @truncate(umm >> 12),
.imm20 = @truncate(umm >> 20),
.opcode = enc.opcode,
},
};
},
.B => {
assert(ops.len == 3);
const umm = ops[2].imm.asBits(u13);
assert(umm % 4 == 0); // misaligned branch target
return .{
.B = .{
.rs1 = ops[0].reg.id(),
.rs2 = ops[1].reg.id(),
.imm1_4 = @truncate(umm >> 1),
.imm5_10 = @truncate(umm >> 5),
.imm11 = @truncate(umm >> 11),
.imm12 = @truncate(umm >> 12),
.opcode = enc.opcode,
.funct3 = enc.funct3.?,
},
};
},
else => std.debug.panic("TODO: construct {s}", .{@tagName(inst_enc)}),
}
}
};
pub fn findByMnemonic(mnem: Mnemonic, ops: []const Operand) !?Encoding {
if (!verifyOps(mnem, ops)) return null;
return .{
.mnemonic = mnem,
.data = try Data.construct(mnem, ops),
};
}
const Enc = struct {
opcode: u7,
funct3: ?u3,
funct7: ?u7,
offset: u12 = 0,
};
fn verifyOps(mnem: Mnemonic, ops: []const Operand) bool {
const inst_enc = InstEnc.fromMnemonic(mnem);
const list = std.mem.sliceTo(&inst_enc.opsList(), .none);
for (list, ops) |l, o| if (l != std.meta.activeTag(o)) return false;
return true;
}
const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.encoding);
const Encoding = @This();
const bits = @import("bits.zig");
const Register = bits.Register;
const encoder = @import("encoder.zig");
const Instruction = encoder.Instruction;
const Operand = Instruction.Operand;
const OperandEnum = std.meta.FieldEnum(Operand);

360
src/arch/riscv64/Lower.zig Normal file
View File

@ -0,0 +1,360 @@
//! This file contains the functionality for lowering RISC-V MIR to Instructions
bin_file: *link.File,
output_mode: std.builtin.OutputMode,
link_mode: std.builtin.LinkMode,
pic: bool,
allocator: Allocator,
mir: Mir,
cc: std.builtin.CallingConvention,
err_msg: ?*ErrorMsg = null,
src_loc: Module.SrcLoc,
result_insts_len: u8 = undefined,
result_relocs_len: u8 = undefined,
result_insts: [
@max(
1, // non-pseudo instruction
abi.callee_preserved_regs.len, // spill / restore regs,
)
]Instruction = undefined,
result_relocs: [1]Reloc = undefined,
pub const Error = error{
OutOfMemory,
LowerFail,
InvalidInstruction,
};
pub const Reloc = struct {
lowered_inst_index: u8,
target: Target,
const Target = union(enum) {
inst: Mir.Inst.Index,
/// Relocs the lowered_inst_index and the next one.
load_symbol_reloc: bits.Symbol,
};
};
/// The returned slice is overwritten by the next call to lowerMir.
pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
insts: []const Instruction,
relocs: []const Reloc,
} {
lower.result_insts = undefined;
lower.result_relocs = undefined;
errdefer lower.result_insts = undefined;
errdefer lower.result_relocs = undefined;
lower.result_insts_len = 0;
lower.result_relocs_len = 0;
defer lower.result_insts_len = undefined;
defer lower.result_relocs_len = undefined;
const inst = lower.mir.instructions.get(index);
log.debug("lowerMir {}", .{inst});
switch (inst.tag) {
else => try lower.generic(inst),
.pseudo => switch (inst.ops) {
.pseudo_dbg_line_column,
.pseudo_dbg_epilogue_begin,
.pseudo_dbg_prologue_end,
.pseudo_dead,
=> {},
.pseudo_load_rm, .pseudo_store_rm => {
const rm = inst.data.rm;
const frame_loc = rm.m.toFrameLoc(lower.mir);
switch (inst.ops) {
.pseudo_load_rm => {
const tag: Encoding.Mnemonic = switch (rm.m.mod.rm.size) {
.byte => .lb,
.hword => .lh,
.word => .lw,
.dword => .ld,
};
try lower.emit(tag, &.{
.{ .reg = rm.r },
.{ .reg = frame_loc.base },
.{ .imm = Immediate.s(frame_loc.disp) },
});
},
.pseudo_store_rm => {
const tag: Encoding.Mnemonic = switch (rm.m.mod.rm.size) {
.byte => .sb,
.hword => .sh,
.word => .sw,
.dword => .sd,
};
try lower.emit(tag, &.{
.{ .reg = frame_loc.base },
.{ .reg = rm.r },
.{ .imm = Immediate.s(frame_loc.disp) },
});
},
else => unreachable,
}
},
.pseudo_mv => {
const rr = inst.data.rr;
try lower.emit(.addi, &.{
.{ .reg = rr.rd },
.{ .reg = rr.rs },
.{ .imm = Immediate.s(0) },
});
},
.pseudo_ret => {
try lower.emit(.jalr, &.{
.{ .reg = .zero },
.{ .reg = .ra },
.{ .imm = Immediate.s(0) },
});
},
.pseudo_j => {
try lower.emit(.jal, &.{
.{ .reg = .zero },
.{ .imm = lower.reloc(.{ .inst = inst.data.inst }) },
});
},
.pseudo_spill_regs => try lower.pushPopRegList(true, inst.data.reg_list),
.pseudo_restore_regs => try lower.pushPopRegList(false, inst.data.reg_list),
.pseudo_load_symbol => {
const payload = inst.data.payload;
const data = lower.mir.extraData(Mir.LoadSymbolPayload, payload).data;
try lower.emit(.lui, &.{
.{ .reg = @enumFromInt(data.register) },
.{ .imm = lower.reloc(.{ .load_symbol_reloc = .{
.atom_index = data.atom_index,
.sym_index = data.sym_index,
} }) },
});
// the above reloc implies this one
try lower.emit(.addi, &.{
.{ .reg = @enumFromInt(data.register) },
.{ .reg = @enumFromInt(data.register) },
.{ .imm = Immediate.s(0) },
});
},
.pseudo_lea_rm => {
const rm = inst.data.rm;
const frame = rm.m.toFrameLoc(lower.mir);
try lower.emit(.addi, &.{
.{ .reg = rm.r },
.{ .reg = frame.base },
.{ .imm = Immediate.s(frame.disp) },
});
},
.pseudo_compare => {
const compare = inst.data.compare;
const op = compare.op;
const rd = compare.rd;
const rs1 = compare.rs1;
const rs2 = compare.rs2;
switch (op) {
.eq => {
try lower.emit(.xor, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
try lower.emit(.sltiu, &.{
.{ .reg = rd },
.{ .reg = rd },
.{ .imm = Immediate.s(1) },
});
},
.neq => {
try lower.emit(.xor, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
try lower.emit(.sltu, &.{
.{ .reg = rd },
.{ .reg = .zero },
.{ .reg = rd },
});
},
.gt => {
try lower.emit(.sltu, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
},
.gte => {
try lower.emit(.sltu, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
try lower.emit(.xori, &.{
.{ .reg = rd },
.{ .reg = rd },
.{ .imm = Immediate.s(1) },
});
},
.lt => {
try lower.emit(.slt, &.{
.{ .reg = rd },
.{ .reg = rs1 },
.{ .reg = rs2 },
});
},
.lte => {
try lower.emit(.slt, &.{
.{ .reg = rd },
.{ .reg = rs2 },
.{ .reg = rs1 },
});
try lower.emit(.xori, &.{
.{ .reg = rd },
.{ .reg = rd },
.{ .imm = Immediate.s(1) },
});
},
}
},
.pseudo_not => {
const rr = inst.data.rr;
try lower.emit(.xori, &.{
.{ .reg = rr.rd },
.{ .reg = rr.rs },
.{ .imm = Immediate.s(1) },
});
},
else => return lower.fail("TODO lower: psuedo {s}", .{@tagName(inst.ops)}),
},
}
return .{
.insts = lower.result_insts[0..lower.result_insts_len],
.relocs = lower.result_relocs[0..lower.result_relocs_len],
};
}
fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
const mnemonic = std.meta.stringToEnum(Encoding.Mnemonic, @tagName(inst.tag)) orelse {
return lower.fail("generic inst name '{s}' with op {s} doesn't match with a mnemonic", .{
@tagName(inst.tag),
@tagName(inst.ops),
});
};
try lower.emit(mnemonic, switch (inst.ops) {
.none => &.{},
.ri => &.{
.{ .reg = inst.data.u_type.rd },
.{ .imm = inst.data.u_type.imm20 },
},
.rr => &.{
.{ .reg = inst.data.rr.rd },
.{ .reg = inst.data.rr.rs },
},
.rri => &.{
.{ .reg = inst.data.i_type.rd },
.{ .reg = inst.data.i_type.rs1 },
.{ .imm = inst.data.i_type.imm12 },
},
.rr_inst => &.{
.{ .reg = inst.data.b_type.rs1 },
.{ .reg = inst.data.b_type.rs2 },
.{ .imm = lower.reloc(.{ .inst = inst.data.b_type.inst }) },
},
.rrr => &.{
.{ .reg = inst.data.r_type.rd },
.{ .reg = inst.data.r_type.rs1 },
.{ .reg = inst.data.r_type.rs2 },
},
else => return lower.fail("TODO: generic lower ops {s}", .{@tagName(inst.ops)}),
});
}
fn emit(lower: *Lower, mnemonic: Encoding.Mnemonic, ops: []const Instruction.Operand) !void {
lower.result_insts[lower.result_insts_len] =
try Instruction.new(mnemonic, ops);
lower.result_insts_len += 1;
}
fn reloc(lower: *Lower, target: Reloc.Target) Immediate {
lower.result_relocs[lower.result_relocs_len] = .{
.lowered_inst_index = lower.result_insts_len,
.target = target,
};
lower.result_relocs_len += 1;
return Immediate.s(0);
}
fn pushPopRegList(lower: *Lower, comptime spilling: bool, reg_list: Mir.RegisterList) !void {
var it = reg_list.iterator(.{ .direction = .forward });
var reg_i: u31 = 0;
while (it.next()) |i| {
const frame = lower.mir.frame_locs.get(@intFromEnum(bits.FrameIndex.spill_frame));
if (spilling) {
try lower.emit(.sd, &.{
.{ .reg = frame.base },
.{ .reg = abi.callee_preserved_regs[i] },
.{ .imm = Immediate.s(frame.disp + reg_i) },
});
} else {
try lower.emit(.ld, &.{
.{ .reg = abi.callee_preserved_regs[i] },
.{ .reg = frame.base },
.{ .imm = Immediate.s(frame.disp + reg_i) },
});
}
reg_i += 8;
}
}
pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error {
@setCold(true);
assert(lower.err_msg == null);
lower.err_msg = try ErrorMsg.create(lower.allocator, lower.src_loc, format, args);
return error.LowerFail;
}
const Lower = @This();
const abi = @import("abi.zig");
const assert = std.debug.assert;
const bits = @import("bits.zig");
const encoder = @import("encoder.zig");
const link = @import("../../link.zig");
const Encoding = @import("Encoding.zig");
const std = @import("std");
const log = std.log.scoped(.lower);
const Air = @import("../../Air.zig");
const Allocator = std.mem.Allocator;
const ErrorMsg = Module.ErrorMsg;
const Mir = @import("Mir.zig");
const Module = @import("../../Module.zig");
const Instruction = encoder.Instruction;
const Immediate = bits.Immediate;

View File

@ -6,50 +6,111 @@
//! The main purpose of MIR is to postpone the assignment of offsets until Isel,
//! so that, for example, the smaller encodings of jump instructions can be used.
const Mir = @This();
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const bits = @import("bits.zig");
const Register = bits.Register;
instructions: std.MultiArrayList(Inst).Slice,
/// The meaning of this data is determined by `Inst.Tag` value.
extra: []const u32,
frame_locs: std.MultiArrayList(FrameLoc).Slice,
pub const Inst = struct {
tag: Tag,
/// The meaning of this depends on `tag`.
data: Data,
pub const Tag = enum(u16) {
add,
addi,
/// Pseudo-instruction: End of prologue
dbg_prologue_end,
/// Pseudo-instruction: Beginning of epilogue
dbg_epilogue_begin,
/// Pseudo-instruction: Update debug line
dbg_line,
unimp,
ebreak,
ecall,
jalr,
ld,
lui,
mv,
nop,
ret,
sd,
sub,
};
ops: Ops,
/// The position of an MIR instruction within the `Mir` instructions array.
pub const Index = u32;
pub const Tag = enum(u16) {
/// Add immediate. Uses i_type payload.
addi,
/// Add immediate and produce a sign-extended result.
///
/// Uses i-type payload.
addiw,
jalr,
lui,
mv,
@"and",
xor,
ebreak,
ecall,
unimp,
/// OR instruction. Uses r_type payload.
@"or",
/// Addition
add,
/// Subtraction
sub,
/// Multiply, uses r_type. Needs the M extension.
mul,
/// Absolute Value, uses i_type payload.
abs,
sltu,
slt,
/// Immediate Logical Right Shift, uses i_type payload
srli,
/// Immediate Logical Left Shift, uses i_type payload
slli,
/// Immediate Arithmetic Right Shift, uses i_type payload.
srai,
/// Register Logical Left Shift, uses r_type payload
sllw,
/// Register Logical Right Shit, uses r_type payload
srlw,
/// Jumps, but stores the address of the instruction following the
/// jump in `rd`.
///
/// Uses j_type payload.
jal,
/// Immediate AND, uses i_type payload
andi,
/// Branch if equal, Uses b_type
beq,
/// Branch if not equal, Uses b_type
bne,
/// Boolean NOT, Uses rr payload
not,
/// Generates a NO-OP, uses nop payload
nop,
/// Load double (64 bits), uses i_type payload
ld,
/// Load word (32 bits), uses i_type payload
lw,
/// Load half (16 bits), uses i_type payload
lh,
/// Load byte (8 bits), uses i_type payload
lb,
/// Store double (64 bits), uses s_type payload
sd,
/// Store word (32 bits), uses s_type payload
sw,
/// Store half (16 bits), uses s_type payload
sh,
/// Store byte (8 bits), uses s_type payload
sb,
/// A pseudo-instruction. Used for anything that isn't 1:1 with an
/// assembly instruction.
pseudo,
};
/// All instructions have a 4-byte payload, which is contained within
/// this union. `Tag` determines which union field is active, as well as
/// this union. `Ops` determines which union field is active, as well as
/// how to interpret the data within.
pub const Data = union {
/// No additional data
@ -60,18 +121,69 @@ pub const Inst = struct {
///
/// Used by e.g. b
inst: Index,
/// A 16-bit immediate value.
///
/// Used by e.g. svc
imm16: u16,
/// Index into `extra`. Meaning of what can be found there is context-dependent.
///
/// Used by e.g. load_memory
payload: u32,
r_type: struct {
rd: Register,
rs1: Register,
rs2: Register,
},
i_type: struct {
rd: Register,
rs1: Register,
imm12: Immediate,
},
s_type: struct {
rs1: Register,
rs2: Register,
imm5: Immediate,
imm7: Immediate,
},
b_type: struct {
rs1: Register,
rs2: Register,
inst: Inst.Index,
},
u_type: struct {
rd: Register,
imm20: Immediate,
},
j_type: struct {
rd: Register,
inst: Inst.Index,
},
/// Debug info: line and column
///
/// Used by e.g. pseudo_dbg_line
pseudo_dbg_line_column: struct {
line: u32,
column: u32,
},
// Custom types to be lowered
/// Register + Memory
rm: struct {
r: Register,
m: Memory,
},
reg_list: Mir.RegisterList,
/// A register
///
/// Used by e.g. blr
reg: Register,
/// Two registers
///
/// Used by e.g. mv
@ -79,53 +191,136 @@ pub const Inst = struct {
rd: Register,
rs: Register,
},
/// I-Type
///
/// Used by e.g. jalr
i_type: struct {
rd: Register,
rs1: Register,
imm12: i12,
},
/// R-Type
///
/// Used by e.g. add
r_type: struct {
compare: struct {
rd: Register,
rs1: Register,
rs2: Register,
},
/// U-Type
///
/// Used by e.g. lui
u_type: struct {
rd: Register,
imm20: i20,
},
/// Debug info: line and column
///
/// Used by e.g. dbg_line
dbg_line_column: struct {
line: u32,
column: u32,
op: enum {
eq,
neq,
gt,
gte,
lt,
lte,
},
},
};
pub const Ops = enum {
/// No data associated with this instruction (only mnemonic is used).
none,
/// Two registers
rr,
/// Three registers
rrr,
/// Two registers + immediate, uses the i_type payload.
rri,
/// Two registers + Two Immediates
rrii,
/// Two registers + another instruction.
rr_inst,
/// Register + Memory
rm,
/// Register + Immediate
ri,
/// Another instruction.
inst,
/// Pseudo-instruction that will generate a backpatched
/// function prologue.
pseudo_prologue,
/// Pseudo-instruction that will generate a backpatched
/// function epilogue
pseudo_epilogue,
/// Pseudo-instruction: End of prologue
pseudo_dbg_prologue_end,
/// Pseudo-instruction: Beginning of epilogue
pseudo_dbg_epilogue_begin,
/// Pseudo-instruction: Update debug line
pseudo_dbg_line_column,
/// Pseudo-instruction that loads from memory into a register.
///
/// Uses `rm` payload.
pseudo_load_rm,
/// Pseudo-instruction that stores from a register into memory
///
/// Uses `rm` payload.
pseudo_store_rm,
/// Pseudo-instruction that loads the address of memory into a register.
///
/// Uses `rm` payload.
pseudo_lea_rm,
/// Shorthand for returning, aka jumping to ra register.
///
/// Uses nop payload.
pseudo_ret,
/// Jumps. Uses `inst` payload.
pseudo_j,
/// Dead inst, ignored by the emitter.
pseudo_dead,
/// Loads the address of a value that hasn't yet been allocated in memory.
///
/// uses the Mir.LoadSymbolPayload payload.
pseudo_load_symbol,
/// Moves the value of rs1 to rd.
///
/// uses the `rr` payload.
pseudo_mv,
pseudo_restore_regs,
pseudo_spill_regs,
pseudo_compare,
pseudo_not,
};
// Make sure we don't accidentally make instructions bigger than expected.
// Note that in safety builds, Zig is allowed to insert a secret field for safety checks.
// Note that in Debug builds, Zig is allowed to insert a secret field for safety checks.
// comptime {
// if (!std.debug.runtime_safety) {
// if (builtin.mode != .Debug) {
// assert(@sizeOf(Inst) == 8);
// }
// }
pub fn format(
inst: Inst,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
assert(fmt.len == 0);
_ = options;
try writer.print("Tag: {s}, Ops: {s}", .{ @tagName(inst.tag), @tagName(inst.ops) });
}
};
pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.instructions.deinit(gpa);
mir.frame_locs.deinit(gpa);
gpa.free(mir.extra);
mir.* = undefined;
}
pub const FrameLoc = struct {
base: Register,
disp: i32,
};
/// Returns the requested data, as well as the new index which is at the start of the
/// trailers for the object.
pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end: usize } {
@ -145,3 +340,59 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
.end = i,
};
}
pub const LoadSymbolPayload = struct {
register: u32,
atom_index: u32,
sym_index: u32,
};
/// Used in conjunction with payload to transfer a list of used registers in a compact manner.
pub const RegisterList = struct {
bitset: BitSet = BitSet.initEmpty(),
const BitSet = IntegerBitSet(32);
const Self = @This();
fn getIndexForReg(registers: []const Register, reg: Register) BitSet.MaskInt {
for (registers, 0..) |cpreg, i| {
if (reg.id() == cpreg.id()) return @intCast(i);
}
unreachable; // register not in input register list!
}
pub fn push(self: *Self, registers: []const Register, reg: Register) void {
const index = getIndexForReg(registers, reg);
self.bitset.set(index);
}
pub fn isSet(self: Self, registers: []const Register, reg: Register) bool {
const index = getIndexForReg(registers, reg);
return self.bitset.isSet(index);
}
pub fn iterator(self: Self, comptime options: std.bit_set.IteratorOptions) BitSet.Iterator(options) {
return self.bitset.iterator(options);
}
pub fn count(self: Self) i32 {
return @intCast(self.bitset.count());
}
pub fn size(self: Self) i32 {
return @intCast(self.bitset.count() * 8);
}
};
const Mir = @This();
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const bits = @import("bits.zig");
const Register = bits.Register;
const Immediate = bits.Immediate;
const Memory = bits.Memory;
const FrameIndex = bits.FrameIndex;
const FrameAddr = @import("CodeGen.zig").FrameAddr;
const IntegerBitSet = std.bit_set.IntegerBitSet;

View File

@ -3,9 +3,11 @@ const bits = @import("bits.zig");
const Register = bits.Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
const Type = @import("../../type.zig").Type;
const InternPool = @import("../../InternPool.zig");
const Module = @import("../../Module.zig");
const assert = std.debug.assert;
pub const Class = enum { memory, byval, integer, double_integer, fields };
pub const Class = enum { memory, byval, integer, double_integer, fields, none };
pub fn classifyType(ty: Type, mod: *Module) Class {
const target = mod.getTarget();
@ -91,11 +93,161 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
}
}
/// There are a maximum of 8 possible return slots. Returned values are in
/// the beginning of the array; unused slots are filled with .none.
pub fn classifySystem(ty: Type, zcu: *Module) [8]Class {
var result = [1]Class{.none} ** 8;
const memory_class = [_]Class{
.memory, .none, .none, .none,
.none, .none, .none, .none,
};
switch (ty.zigTypeTag(zcu)) {
.Bool, .Void, .NoReturn => {
result[0] = .integer;
return result;
},
.Pointer => switch (ty.ptrSize(zcu)) {
.Slice => {
result[0] = .integer;
result[1] = .integer;
return result;
},
else => {
result[0] = .integer;
return result;
},
},
.Optional => {
if (ty.isPtrLikeOptional(zcu)) {
result[0] = .integer;
return result;
}
result[0] = .integer;
result[1] = .integer;
return result;
},
.Int, .Enum, .ErrorSet => {
const int_bits = ty.intInfo(zcu).bits;
if (int_bits <= 64) {
result[0] = .integer;
return result;
}
if (int_bits <= 128) {
result[0] = .integer;
result[1] = .integer;
return result;
}
unreachable; // support > 128 bit int arguments
},
.ErrorUnion => {
const payload_ty = ty.errorUnionPayload(zcu);
const payload_bits = payload_ty.bitSize(zcu);
// the error union itself
result[0] = .integer;
// anyerror!void can fit into one register
if (payload_bits == 0) return result;
if (payload_bits <= 64) {
result[1] = .integer;
return result;
}
std.debug.panic("TODO: classifySystem ErrorUnion > 64 bit payload", .{});
},
.Struct => {
const layout = ty.containerLayout(zcu);
const ty_size = ty.abiSize(zcu);
if (layout == .@"packed") {
assert(ty_size <= 16);
result[0] = .integer;
if (ty_size > 8) result[1] = .integer;
return result;
}
return memory_class;
},
else => |bad_ty| std.debug.panic("classifySystem {s}", .{@tagName(bad_ty)}),
}
}
fn classifyStruct(
result: *[8]Class,
byte_offset: *u64,
loaded_struct: InternPool.LoadedStructType,
zcu: *Module,
) void {
const ip = &zcu.intern_pool;
var field_it = loaded_struct.iterateRuntimeOrder(ip);
while (field_it.next()) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
const field_align = loaded_struct.fieldAlign(ip, field_index);
byte_offset.* = std.mem.alignForward(
u64,
byte_offset.*,
field_align.toByteUnits() orelse field_ty.abiAlignment(zcu).toByteUnits().?,
);
if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
if (field_loaded_struct.layout != .@"packed") {
classifyStruct(result, byte_offset, field_loaded_struct, zcu);
continue;
}
}
const field_class = std.mem.sliceTo(&classifySystem(field_ty, zcu), .none);
const field_size = field_ty.abiSize(zcu);
combine: {
const result_class = &result[@intCast(byte_offset.* / 8)];
if (result_class.* == field_class[0]) {
break :combine;
}
if (result_class.* == .none) {
result_class.* = field_class[0];
break :combine;
}
assert(field_class[0] != .none);
// "If one of the classes is MEMORY, the result is the MEMORY class."
if (result_class.* == .memory or field_class[0] == .memory) {
result_class.* = .memory;
break :combine;
}
// "If one of the classes is INTEGER, the result is the INTEGER."
if (result_class.* == .integer or field_class[0] == .integer) {
result_class.* = .integer;
break :combine;
}
result_class.* = .integer;
}
@memcpy(result[@intCast(byte_offset.* / 8 + 1)..][0 .. field_class.len - 1], field_class[1..]);
byte_offset.* += field_size;
}
}
pub const callee_preserved_regs = [_]Register{
.s0, .s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11,
// .s0 is ommited to be used as a frame pointer
.s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11,
};
const allocatable_registers = callee_preserved_regs;
pub const function_arg_regs = [_]Register{
.a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7,
};
pub const function_ret_regs = [_]Register{
.a0, .a1,
};
pub const temporary_regs = [_]Register{
.t0, .t1, .t2, .t3, .t4, .t5, .t6,
};
const allocatable_registers = callee_preserved_regs ++ function_arg_regs ++ temporary_regs;
pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, &allocatable_registers);
// Register classes
@ -109,4 +261,31 @@ pub const RegisterClass = struct {
}, true);
break :blk set;
};
pub const fa: RegisterBitSet = blk: {
var set = RegisterBitSet.initEmpty();
set.setRangeValue(.{
.start = callee_preserved_regs.len,
.end = callee_preserved_regs.len + function_arg_regs.len,
}, true);
break :blk set;
};
pub const fr: RegisterBitSet = blk: {
var set = RegisterBitSet.initEmpty();
set.setRangeValue(.{
.start = callee_preserved_regs.len,
.end = callee_preserved_regs.len + function_ret_regs.len,
}, true);
break :blk set;
};
pub const tp: RegisterBitSet = blk: {
var set = RegisterBitSet.initEmpty();
set.setRangeValue(.{
.start = callee_preserved_regs.len + function_arg_regs.len,
.end = callee_preserved_regs.len + function_arg_regs.len + temporary_regs.len,
}, true);
break :blk set;
};
};

View File

@ -2,385 +2,149 @@ const std = @import("std");
const DW = std.dwarf;
const assert = std.debug.assert;
const testing = std.testing;
const Encoding = @import("Encoding.zig");
const Mir = @import("Mir.zig");
// TODO: this is only tagged to facilitate the monstrosity.
// Once packed structs work make it packed.
pub const Instruction = union(enum) {
R: packed struct {
opcode: u7,
rd: u5,
funct3: u3,
rs1: u5,
rs2: u5,
funct7: u7,
},
I: packed struct {
opcode: u7,
rd: u5,
funct3: u3,
rs1: u5,
imm0_11: u12,
},
S: packed struct {
opcode: u7,
imm0_4: u5,
funct3: u3,
rs1: u5,
rs2: u5,
imm5_11: u7,
},
B: packed struct {
opcode: u7,
imm11: u1,
imm1_4: u4,
funct3: u3,
rs1: u5,
rs2: u5,
imm5_10: u6,
imm12: u1,
},
U: packed struct {
opcode: u7,
rd: u5,
imm12_31: u20,
},
J: packed struct {
opcode: u7,
rd: u5,
imm12_19: u8,
imm11: u1,
imm1_10: u10,
imm20: u1,
},
pub const Memory = struct {
base: Base,
mod: Mod,
// TODO: once packed structs work we can remove this monstrosity.
pub fn toU32(self: Instruction) u32 {
return switch (self) {
.R => |v| @as(u32, @bitCast(v)),
.I => |v| @as(u32, @bitCast(v)),
.S => |v| @as(u32, @bitCast(v)),
.B => |v| @as(u32, @intCast(v.opcode)) + (@as(u32, @intCast(v.imm11)) << 7) + (@as(u32, @intCast(v.imm1_4)) << 8) + (@as(u32, @intCast(v.funct3)) << 12) + (@as(u32, @intCast(v.rs1)) << 15) + (@as(u32, @intCast(v.rs2)) << 20) + (@as(u32, @intCast(v.imm5_10)) << 25) + (@as(u32, @intCast(v.imm12)) << 31),
.U => |v| @as(u32, @bitCast(v)),
.J => |v| @as(u32, @bitCast(v)),
pub const Base = union(enum) {
reg: Register,
frame: FrameIndex,
reloc: Symbol,
};
pub const Mod = union(enum(u1)) {
rm: struct {
size: Size,
disp: i32 = 0,
},
off: i32,
};
pub const Size = enum(u4) {
/// Byte, 1 byte
byte,
/// Half word, 2 bytes
hword,
/// Word, 4 bytes
word,
/// Double word, 8 Bytes
dword,
pub fn fromByteSize(size: u64) Size {
return switch (size) {
1 => .byte,
2 => .hword,
4 => .word,
8 => .dword,
else => unreachable,
};
}
pub fn fromBitSize(bit_size: u64) Size {
return switch (bit_size) {
8 => .byte,
16 => .hword,
32 => .word,
64 => .dword,
else => unreachable,
};
}
pub fn bitSize(s: Size) u64 {
return switch (s) {
.byte => 8,
.hword => 16,
.word => 32,
.dword => 64,
};
}
};
/// Asserts `mem` can be represented as a `FrameLoc`.
pub fn toFrameLoc(mem: Memory, mir: Mir) Mir.FrameLoc {
const offset: i32 = switch (mem.mod) {
.off => |off| off,
.rm => |rm| rm.disp,
};
switch (mem.base) {
.reg => |reg| {
return .{
.base = reg,
.disp = offset,
};
},
.frame => |index| {
const base_loc = mir.frame_locs.get(@intFromEnum(index));
return .{
.base = base_loc.base,
.disp = base_loc.disp + offset,
};
},
.reloc => unreachable,
}
}
};
pub const Immediate = union(enum) {
signed: i32,
unsigned: u32,
pub fn u(x: u64) Immediate {
return .{ .unsigned = x };
}
fn rType(op: u7, fn3: u3, fn7: u7, rd: Register, r1: Register, r2: Register) Instruction {
return Instruction{
.R = .{
.opcode = op,
.funct3 = fn3,
.funct7 = fn7,
.rd = rd.id(),
.rs1 = r1.id(),
.rs2 = r2.id(),
pub fn s(x: i32) Immediate {
return .{ .signed = x };
}
pub fn asSigned(imm: Immediate, bit_size: u64) i64 {
return switch (imm) {
.signed => |x| switch (bit_size) {
1, 8 => @as(i8, @intCast(x)),
16 => @as(i16, @intCast(x)),
32, 64 => x,
else => unreachable,
},
.unsigned => |x| switch (bit_size) {
1, 8 => @as(i8, @bitCast(@as(u8, @intCast(x)))),
16 => @as(i16, @bitCast(@as(u16, @intCast(x)))),
32 => @as(i32, @bitCast(@as(u32, @intCast(x)))),
64 => @bitCast(x),
else => unreachable,
},
};
}
// RISC-V is all signed all the time -- convert immediates to unsigned for processing
fn iType(op: u7, fn3: u3, rd: Register, r1: Register, imm: i12) Instruction {
const umm = @as(u12, @bitCast(imm));
return Instruction{
.I = .{
.opcode = op,
.funct3 = fn3,
.rd = rd.id(),
.rs1 = r1.id(),
.imm0_11 = umm,
pub fn asUnsigned(imm: Immediate, bit_size: u64) u64 {
return switch (imm) {
.signed => |x| switch (bit_size) {
1, 8 => @as(u8, @bitCast(@as(i8, @intCast(x)))),
16 => @as(u16, @bitCast(@as(i16, @intCast(x)))),
32, 64 => @as(u32, @bitCast(x)),
else => unreachable,
},
.unsigned => |x| switch (bit_size) {
1, 8 => @as(u8, @intCast(x)),
16 => @as(u16, @intCast(x)),
32 => @as(u32, @intCast(x)),
64 => x,
else => unreachable,
},
};
}
fn sType(op: u7, fn3: u3, r1: Register, r2: Register, imm: i12) Instruction {
const umm = @as(u12, @bitCast(imm));
return Instruction{
.S = .{
.opcode = op,
.funct3 = fn3,
.rs1 = r1.id(),
.rs2 = r2.id(),
.imm0_4 = @as(u5, @truncate(umm)),
.imm5_11 = @as(u7, @truncate(umm >> 5)),
},
pub fn asBits(imm: Immediate, comptime T: type) T {
const int_info = @typeInfo(T).Int;
if (int_info.signedness != .unsigned) @compileError("Immediate.asBits needs unsigned T");
return switch (imm) {
.signed => |x| @bitCast(@as(std.meta.Int(.signed, int_info.bits), @intCast(x))),
.unsigned => |x| @intCast(x),
};
}
// Use significance value rather than bit value, same for J-type
// -- less burden on callsite, bonus semantic checking
fn bType(op: u7, fn3: u3, r1: Register, r2: Register, imm: i13) Instruction {
const umm = @as(u13, @bitCast(imm));
assert(umm % 2 == 0); // misaligned branch target
return Instruction{
.B = .{
.opcode = op,
.funct3 = fn3,
.rs1 = r1.id(),
.rs2 = r2.id(),
.imm1_4 = @as(u4, @truncate(umm >> 1)),
.imm5_10 = @as(u6, @truncate(umm >> 5)),
.imm11 = @as(u1, @truncate(umm >> 11)),
.imm12 = @as(u1, @truncate(umm >> 12)),
},
};
}
// We have to extract the 20 bits anyway -- let's not make it more painful
fn uType(op: u7, rd: Register, imm: i20) Instruction {
const umm = @as(u20, @bitCast(imm));
return Instruction{
.U = .{
.opcode = op,
.rd = rd.id(),
.imm12_31 = umm,
},
};
}
fn jType(op: u7, rd: Register, imm: i21) Instruction {
const umm = @as(u21, @bitCast(imm));
assert(umm % 2 == 0); // misaligned jump target
return Instruction{
.J = .{
.opcode = op,
.rd = rd.id(),
.imm1_10 = @as(u10, @truncate(umm >> 1)),
.imm11 = @as(u1, @truncate(umm >> 11)),
.imm12_19 = @as(u8, @truncate(umm >> 12)),
.imm20 = @as(u1, @truncate(umm >> 20)),
},
};
}
// The meat and potatoes. Arguments are in the order in which they would appear in assembly code.
// Arithmetic/Logical, Register-Register
pub fn add(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b000, 0b0000000, rd, r1, r2);
}
pub fn sub(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b000, 0b0100000, rd, r1, r2);
}
pub fn @"and"(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b111, 0b0000000, rd, r1, r2);
}
pub fn @"or"(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b110, 0b0000000, rd, r1, r2);
}
pub fn xor(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b100, 0b0000000, rd, r1, r2);
}
pub fn sll(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b001, 0b0000000, rd, r1, r2);
}
pub fn srl(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b101, 0b0000000, rd, r1, r2);
}
pub fn sra(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b101, 0b0100000, rd, r1, r2);
}
pub fn slt(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b010, 0b0000000, rd, r1, r2);
}
pub fn sltu(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b011, 0b0000000, rd, r1, r2);
}
// Arithmetic/Logical, Register-Register (32-bit)
pub fn addw(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0111011, 0b000, rd, r1, r2);
}
pub fn subw(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0111011, 0b000, 0b0100000, rd, r1, r2);
}
pub fn sllw(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0111011, 0b001, 0b0000000, rd, r1, r2);
}
pub fn srlw(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0111011, 0b101, 0b0000000, rd, r1, r2);
}
pub fn sraw(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0111011, 0b101, 0b0100000, rd, r1, r2);
}
// Arithmetic/Logical, Register-Immediate
pub fn addi(rd: Register, r1: Register, imm: i12) Instruction {
return iType(0b0010011, 0b000, rd, r1, imm);
}
pub fn andi(rd: Register, r1: Register, imm: i12) Instruction {
return iType(0b0010011, 0b111, rd, r1, imm);
}
pub fn ori(rd: Register, r1: Register, imm: i12) Instruction {
return iType(0b0010011, 0b110, rd, r1, imm);
}
pub fn xori(rd: Register, r1: Register, imm: i12) Instruction {
return iType(0b0010011, 0b100, rd, r1, imm);
}
pub fn slli(rd: Register, r1: Register, shamt: u6) Instruction {
return iType(0b0010011, 0b001, rd, r1, shamt);
}
pub fn srli(rd: Register, r1: Register, shamt: u6) Instruction {
return iType(0b0010011, 0b101, rd, r1, shamt);
}
pub fn srai(rd: Register, r1: Register, shamt: u6) Instruction {
return iType(0b0010011, 0b101, rd, r1, (1 << 10) + shamt);
}
pub fn slti(rd: Register, r1: Register, imm: i12) Instruction {
return iType(0b0010011, 0b010, rd, r1, imm);
}
pub fn sltiu(rd: Register, r1: Register, imm: u12) Instruction {
return iType(0b0010011, 0b011, rd, r1, @as(i12, @bitCast(imm)));
}
// Arithmetic/Logical, Register-Immediate (32-bit)
pub fn addiw(rd: Register, r1: Register, imm: i12) Instruction {
return iType(0b0011011, 0b000, rd, r1, imm);
}
pub fn slliw(rd: Register, r1: Register, shamt: u5) Instruction {
return iType(0b0011011, 0b001, rd, r1, shamt);
}
pub fn srliw(rd: Register, r1: Register, shamt: u5) Instruction {
return iType(0b0011011, 0b101, rd, r1, shamt);
}
pub fn sraiw(rd: Register, r1: Register, shamt: u5) Instruction {
return iType(0b0011011, 0b101, rd, r1, (1 << 10) + shamt);
}
// Upper Immediate
pub fn lui(rd: Register, imm: i20) Instruction {
return uType(0b0110111, rd, imm);
}
pub fn auipc(rd: Register, imm: i20) Instruction {
return uType(0b0010111, rd, imm);
}
// Load
pub fn ld(rd: Register, offset: i12, base: Register) Instruction {
return iType(0b0000011, 0b011, rd, base, offset);
}
pub fn lw(rd: Register, offset: i12, base: Register) Instruction {
return iType(0b0000011, 0b010, rd, base, offset);
}
pub fn lwu(rd: Register, offset: i12, base: Register) Instruction {
return iType(0b0000011, 0b110, rd, base, offset);
}
pub fn lh(rd: Register, offset: i12, base: Register) Instruction {
return iType(0b0000011, 0b001, rd, base, offset);
}
pub fn lhu(rd: Register, offset: i12, base: Register) Instruction {
return iType(0b0000011, 0b101, rd, base, offset);
}
pub fn lb(rd: Register, offset: i12, base: Register) Instruction {
return iType(0b0000011, 0b000, rd, base, offset);
}
pub fn lbu(rd: Register, offset: i12, base: Register) Instruction {
return iType(0b0000011, 0b100, rd, base, offset);
}
// Store
pub fn sd(rs: Register, offset: i12, base: Register) Instruction {
return sType(0b0100011, 0b011, base, rs, offset);
}
pub fn sw(rs: Register, offset: i12, base: Register) Instruction {
return sType(0b0100011, 0b010, base, rs, offset);
}
pub fn sh(rs: Register, offset: i12, base: Register) Instruction {
return sType(0b0100011, 0b001, base, rs, offset);
}
pub fn sb(rs: Register, offset: i12, base: Register) Instruction {
return sType(0b0100011, 0b000, base, rs, offset);
}
// Fence
// TODO: implement fence
// Branch
pub fn beq(r1: Register, r2: Register, offset: i13) Instruction {
return bType(0b1100011, 0b000, r1, r2, offset);
}
pub fn bne(r1: Register, r2: Register, offset: i13) Instruction {
return bType(0b1100011, 0b001, r1, r2, offset);
}
pub fn blt(r1: Register, r2: Register, offset: i13) Instruction {
return bType(0b1100011, 0b100, r1, r2, offset);
}
pub fn bge(r1: Register, r2: Register, offset: i13) Instruction {
return bType(0b1100011, 0b101, r1, r2, offset);
}
pub fn bltu(r1: Register, r2: Register, offset: i13) Instruction {
return bType(0b1100011, 0b110, r1, r2, offset);
}
pub fn bgeu(r1: Register, r2: Register, offset: i13) Instruction {
return bType(0b1100011, 0b111, r1, r2, offset);
}
// Jump
pub fn jal(link: Register, offset: i21) Instruction {
return jType(0b1101111, link, offset);
}
pub fn jalr(link: Register, offset: i12, base: Register) Instruction {
return iType(0b1100111, 0b000, link, base, offset);
}
// System
pub const ecall = iType(0b1110011, 0b000, .zero, .zero, 0x000);
pub const ebreak = iType(0b1110011, 0b000, .zero, .zero, 0x001);
pub const unimp = iType(0, 0, .zero, .zero, 0);
};
pub const Register = enum(u6) {
@ -404,50 +168,63 @@ pub const Register = enum(u6) {
t3, t4, t5, t6, // caller saved
// zig fmt: on
/// Returns the unique 4-bit ID of this register which is used in
/// Returns the unique 5-bit ID of this register which is used in
/// the machine code
pub fn id(self: Register) u5 {
return @as(u5, @truncate(@intFromEnum(self)));
}
pub fn dwarfLocOp(reg: Register) u8 {
return @as(u8, reg.id()) + DW.OP.reg0;
return @as(u8, reg.id());
}
};
// zig fmt: on
pub const FrameIndex = enum(u32) {
/// This index refers to the return address.
ret_addr,
/// This index refers to the frame pointer.
base_ptr,
/// This index refers to the entire stack frame.
stack_frame,
/// This index referes to where in the stack frame the args are spilled to.
args_frame,
/// This index referes to a frame dedicated to setting up args for function called
/// in this function. Useful for aligning args separately.
call_frame,
/// This index referes to the frame where callee saved registers are spilled and restore
/// from.
spill_frame,
/// Other indices are used for local variable stack slots
_,
test "serialize instructions" {
const Testcase = struct {
inst: Instruction,
expected: u32,
};
pub const named_count = @typeInfo(FrameIndex).Enum.fields.len;
const testcases = [_]Testcase{
.{ // add t6, zero, zero
.inst = Instruction.add(.t6, .zero, .zero),
.expected = 0b0000000_00000_00000_000_11111_0110011,
},
.{ // sd s0, 0x7f(s0)
.inst = Instruction.sd(.s0, 0x7f, .s0),
.expected = 0b0000011_01000_01000_011_11111_0100011,
},
.{ // bne s0, s1, 0x42
.inst = Instruction.bne(.s0, .s1, 0x42),
.expected = 0b0_000010_01001_01000_001_0001_0_1100011,
},
.{ // j 0x1a
.inst = Instruction.jal(.zero, 0x1a),
.expected = 0b0_0000001101_0_00000000_00000_1101111,
},
.{ // ebreak
.inst = Instruction.ebreak,
.expected = 0b000000000001_00000_000_00000_1110011,
},
};
for (testcases) |case| {
const actual = case.inst.toU32();
try testing.expectEqual(case.expected, actual);
pub fn isNamed(fi: FrameIndex) bool {
return @intFromEnum(fi) < named_count;
}
}
pub fn format(
fi: FrameIndex,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
try writer.writeAll("FrameIndex");
if (fi.isNamed()) {
try writer.writeByte('.');
try writer.writeAll(@tagName(fi));
} else {
try writer.writeByte('(');
try std.fmt.formatType(@intFromEnum(fi), fmt, options, writer, 0);
try writer.writeByte(')');
}
}
};
/// A linker symbol not yet allocated in VM.
pub const Symbol = struct {
/// Index of the containing atom.
atom_index: u32,
/// Index into the linker's symbol table.
sym_index: u32,
};

View File

@ -0,0 +1,49 @@
pub const Instruction = struct {
encoding: Encoding,
ops: [3]Operand = .{.none} ** 3,
pub const Operand = union(enum) {
none,
reg: Register,
mem: Memory,
imm: Immediate,
};
pub fn new(mnemonic: Encoding.Mnemonic, ops: []const Operand) !Instruction {
const encoding = (try Encoding.findByMnemonic(mnemonic, ops)) orelse {
std.log.err("no encoding found for: {s} {s} {s} {s} {s}", .{
@tagName(mnemonic),
@tagName(if (ops.len > 0) ops[0] else .none),
@tagName(if (ops.len > 1) ops[1] else .none),
@tagName(if (ops.len > 2) ops[2] else .none),
@tagName(if (ops.len > 3) ops[3] else .none),
});
return error.InvalidInstruction;
};
var result_ops: [3]Operand = .{.none} ** 3;
@memcpy(result_ops[0..ops.len], ops);
return .{
.encoding = encoding,
.ops = result_ops,
};
}
pub fn encode(inst: Instruction, writer: anytype) !void {
try writer.writeInt(u32, inst.encoding.data.toU32(), .little);
}
};
const std = @import("std");
const Lower = @import("Lower.zig");
const Mir = @import("Mir.zig");
const bits = @import("bits.zig");
const Encoding = @import("Encoding.zig");
const Register = bits.Register;
const Memory = bits.Memory;
const Immediate = bits.Immediate;
const log = std.log.scoped(.encode);

View File

@ -11132,6 +11132,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu
}
return o.builder.structType(.normal, types[0..types_len]);
},
.none => unreachable,
}
},
// TODO investigate C ABI for other architectures
@ -11389,6 +11390,7 @@ const ParamTypeIterator = struct {
it.llvm_index += it.types_len - 1;
return .multiple_llvm_types;
},
.none => unreachable,
}
},
// TODO investigate C ABI for other architectures

View File

@ -6409,6 +6409,8 @@ const RelaSectionTable = std.AutoArrayHashMapUnmanaged(u32, RelaSection);
// TODO: add comptime check we don't clobber any reloc for any ISA
pub const R_ZIG_GOT32: u32 = 0xff00;
pub const R_ZIG_GOTPCREL: u32 = 0xff01;
pub const R_ZIG_GOT_HI20: u32 = 0xff02;
pub const R_ZIG_GOT_LO12: u32 = 0xff03;
fn defaultEntrySymbolName(cpu_arch: std.Target.Cpu.Arch) []const u8 {
return switch (cpu_arch) {

View File

@ -2025,7 +2025,15 @@ const riscv = struct {
.SUB32,
=> {},
else => try atom.reportUnhandledRelocError(rel, elf_file),
else => |x| switch (@intFromEnum(x)) {
Elf.R_ZIG_GOT_HI20,
Elf.R_ZIG_GOT_LO12,
=> {
assert(symbol.flags.has_zig_got);
},
else => try atom.reportUnhandledRelocError(rel, elf_file),
},
}
}
@ -2046,7 +2054,6 @@ const riscv = struct {
const P, const A, const S, const GOT, const G, const TP, const DTP, const ZIG_GOT = args;
_ = TP;
_ = DTP;
_ = ZIG_GOT;
switch (r_type) {
.NONE => unreachable,
@ -2136,7 +2143,22 @@ const riscv = struct {
}
},
else => try atom.reportUnhandledRelocError(rel, elf_file),
else => |x| switch (@intFromEnum(x)) {
// Zig custom relocations
Elf.R_ZIG_GOT_HI20 => {
assert(target.flags.has_zig_got);
const disp: u32 = @bitCast(math.cast(i32, G + ZIG_GOT + A) orelse return error.Overflow);
riscv_util.writeInstU(code[r_offset..][0..4], disp);
},
Elf.R_ZIG_GOT_LO12 => {
assert(target.flags.has_zig_got);
const value: u32 = @bitCast(math.cast(i32, G + ZIG_GOT + A) orelse return error.Overflow);
riscv_util.writeInstI(code[r_offset..][0..4], value);
},
else => try atom.reportUnhandledRelocError(rel, elf_file),
},
}
}

View File

@ -25,38 +25,66 @@ pub fn writeAddend(
}
pub fn writeInstU(code: *[4]u8, value: u32) void {
var inst = Instruction{
var data = Encoding.Data{
.U = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.U,
Encoding.Data,
Encoding.Data.U,
), code),
};
const compensated: u32 = @bitCast(@as(i32, @bitCast(value)) + 0x800);
inst.U.imm12_31 = bitSlice(compensated, 31, 12);
mem.writeInt(u32, code, inst.toU32(), .little);
data.U.imm12_31 = bitSlice(compensated, 31, 12);
mem.writeInt(u32, code, data.toU32(), .little);
}
pub fn writeInstI(code: *[4]u8, value: u32) void {
var inst = Instruction{
var data = Encoding.Data{
.I = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.I,
Encoding.Data,
Encoding.Data.I,
), code),
};
inst.I.imm0_11 = bitSlice(value, 11, 0);
mem.writeInt(u32, code, inst.toU32(), .little);
data.I.imm0_11 = bitSlice(value, 11, 0);
mem.writeInt(u32, code, data.toU32(), .little);
}
pub fn writeInstS(code: *[4]u8, value: u32) void {
var inst = Instruction{
var data = Encoding.Data{
.S = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.S,
Encoding.Data,
Encoding.Data.S,
), code),
};
inst.S.imm0_4 = bitSlice(value, 4, 0);
inst.S.imm5_11 = bitSlice(value, 11, 5);
mem.writeInt(u32, code, inst.toU32(), .little);
data.S.imm0_4 = bitSlice(value, 4, 0);
data.S.imm5_11 = bitSlice(value, 11, 5);
mem.writeInt(u32, code, data.toU32(), .little);
}
pub fn writeInstJ(code: *[4]u8, value: u32) void {
var data = Encoding.Data{
.J = mem.bytesToValue(std.meta.TagPayload(
Encoding.Data,
Encoding.Data.J,
), code),
};
data.J.imm1_10 = bitSlice(value, 10, 1);
data.J.imm11 = bitSlice(value, 11, 11);
data.J.imm12_19 = bitSlice(value, 19, 12);
data.J.imm20 = bitSlice(value, 20, 20);
mem.writeInt(u32, code, data.toU32(), .little);
}
pub fn writeInstB(code: *[4]u8, value: u32) void {
var data = Encoding.Data{
.B = mem.bytesToValue(std.meta.TagPayload(
Encoding.Data,
Encoding.Data.B,
), code),
};
data.B.imm1_4 = bitSlice(value, 4, 1);
data.B.imm5_10 = bitSlice(value, 10, 5);
data.B.imm11 = bitSlice(value, 11, 11);
data.B.imm12 = bitSlice(value, 12, 12);
mem.writeInt(u32, code, data.toU32(), .little);
}
fn bitSlice(
@ -67,8 +95,9 @@ fn bitSlice(
return @truncate((value >> low) & (1 << (high - low + 1)) - 1);
}
const bits = @import("../arch/riscv64/bits.zig");
const encoder = @import("../arch/riscv64/encoder.zig");
const Encoding = @import("../arch/riscv64/Encoding.zig");
const mem = std.mem;
const std = @import("std");
pub const Instruction = bits.Instruction;
pub const Instruction = encoder.Instruction;

View File

@ -102,7 +102,7 @@ pub fn RegisterManager(
}
const OptionalIndex = std.math.IntFittingRange(0, set.len);
comptime var map = [1]OptionalIndex{set.len} ** (max_id + 1 - min_id);
comptime var map = [1]OptionalIndex{set.len} ** (max_id - min_id + 1);
inline for (set, 0..) |elem, elem_index| map[comptime elem.id() - min_id] = elem_index;
const id_index = reg.id() -% min_id;
@ -360,6 +360,7 @@ pub fn RegisterManager(
} else self.getRegIndexAssumeFree(tracked_index, inst);
}
pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) AllocateRegistersError!void {
log.debug("getting reg: {}", .{reg});
return self.getRegIndex(indexOfRegIntoTracked(reg) orelse return, inst);
}
pub fn getKnownReg(

View File

@ -507,7 +507,7 @@ pub fn zigBackend(target: std.Target, use_llvm: bool) std.builtin.CompilerBacken
if (use_llvm) return .stage2_llvm;
if (target.ofmt == .c) return .stage2_c;
return switch (target.cpu.arch) {
.wasm32, .wasm64 => std.builtin.CompilerBackend.stage2_wasm,
.wasm32, .wasm64 => .stage2_wasm,
.arm, .armeb, .thumb, .thumbeb => .stage2_arm,
.x86_64 => .stage2_x86_64,
.x86 => .stage2_x86,
@ -526,7 +526,7 @@ pub fn backendSupportsFeature(
feature: Feature,
) bool {
return switch (feature) {
.panic_fn => ofmt == .c or use_llvm or cpu_arch == .x86_64,
.panic_fn => ofmt == .c or use_llvm or cpu_arch == .x86_64 or cpu_arch == .riscv64,
.panic_unwrap_error => ofmt == .c or use_llvm,
.safety_check_formatted => ofmt == .c or use_llvm,
.error_return_trace => use_llvm,

View File

@ -5,8 +5,8 @@ const expect = std.testing.expect;
test "@abs integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testAbsIntegers();
try testAbsIntegers();
@ -51,7 +51,6 @@ fn testAbsIntegers() !void {
test "@abs unsigned integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try comptime testAbsUnsignedIntegers();
@ -92,9 +91,9 @@ fn testAbsUnsignedIntegers() !void {
test "@abs floats" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testAbsFloats(f16);
try testAbsFloats(f16);
@ -151,8 +150,8 @@ test "@abs int vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testAbsIntVectors(1);
try testAbsIntVectors(1);
@ -218,8 +217,8 @@ test "@abs unsigned int vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testAbsUnsignedIntVectors(1);
try testAbsUnsignedIntVectors(1);
@ -277,9 +276,9 @@ test "@abs float vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// https://github.com/ziglang/zig/issues/12827
if (builtin.zig_backend == .stage2_llvm and

View File

@ -16,6 +16,7 @@ test "global variable alignment" {
}
test "large alignment of local constant" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky
@ -25,6 +26,7 @@ test "large alignment of local constant" {
}
test "slicing array of length 1 can not assume runtime index is always zero" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky
@ -52,6 +54,7 @@ fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 {
}
test "@alignCast pointers" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
var x: u32 align(4) = 1;
expectsOnly1(&x);
try expect(x == 2);
@ -235,6 +238,7 @@ fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 {
}
test "specifying alignment allows pointer cast" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -247,6 +251,7 @@ fn testBytesAlign(b: u8) !void {
}
test "@alignCast slices" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -265,6 +270,7 @@ fn sliceExpects4(slice: []align(4) u32) void {
}
test "return error union with 128-bit integer" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -277,6 +283,7 @@ fn give() anyerror!u128 {
}
test "page aligned array on stack" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -305,6 +312,7 @@ test "function alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// function alignment is a compile error on wasm32/wasm64
if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;
@ -418,6 +426,7 @@ test "function callconv expression depends on generic parameter" {
}
test "runtime-known array index has best alignment possible" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// take full advantage of over-alignment
@ -478,6 +487,7 @@ const DefaultAligned = struct {
};
test "read 128-bit field from default aligned struct in stack memory" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -497,6 +507,7 @@ var default_aligned_global = DefaultAligned{
};
test "read 128-bit field from default aligned struct in global memory" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -506,6 +517,7 @@ test "read 128-bit field from default aligned struct in global memory" {
}
test "struct field explicit alignment" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -550,6 +562,7 @@ test "align(@alignOf(T)) T does not force resolution of T" {
}
test "align(N) on functions" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -595,6 +608,7 @@ test "comptime alloc alignment" {
}
test "@alignCast null" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -610,6 +624,7 @@ test "alignment of slice element" {
}
test "sub-aligned pointer field access" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
@ -658,6 +673,7 @@ test "alignment of zero-bit types is respected" {
}
test "zero-bit fields in extern struct pad fields appropriately" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;

View File

@ -29,6 +29,8 @@ test "comparison of @alignOf(T) against zero" {
}
test "correct alignment for elements and slices of aligned array" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buf: [1024]u8 align(64) = undefined;
var start: usize = 1;
var end: usize = undefined;

View File

@ -22,6 +22,7 @@ test "arrays" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var array: [5]u32 = undefined;
@ -49,6 +50,7 @@ fn getArrayLen(a: []const u32) usize {
test "array concat with undefined" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -87,6 +89,7 @@ test "array concat with tuple" {
test "array init with concat" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const a = 'a';
var i: [4]u8 = [2]u8{ a, 'b' } ++ [2]u8{ 'c', 'd' };
@ -96,6 +99,7 @@ test "array init with concat" {
test "array init with mult" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const a = 'a';
var i: [8]u8 = [2]u8{ a, 'b' } ** 4;
@ -159,6 +163,7 @@ test "array len field" {
test "array with sentinels" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest(is_ct: bool) !void {
@ -226,6 +231,7 @@ test "nested arrays of integers" {
test "implicit comptime in array type size" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var arr: [plusOne(10)]bool = undefined;
_ = &arr;
@ -239,6 +245,7 @@ fn plusOne(x: u32) u32 {
test "single-item pointer to array indexing and slicing" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testSingleItemPtrArrayIndexSlice();
try comptime testSingleItemPtrArrayIndexSlice();
@ -264,6 +271,7 @@ fn doSomeMangling(array: *[4]u8) void {
test "implicit cast zero sized array ptr to slice" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var b = "".*;
@ -302,6 +310,7 @@ const Str = struct { a: []Sub };
test "set global var array via slice embedded in struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var s = Str{ .a = s_array[0..] };
@ -338,6 +347,7 @@ test "read/write through global variable array of struct fields initialized via
test "implicit cast single-item pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testImplicitCastSingleItemPtr();
try comptime testImplicitCastSingleItemPtr();
@ -368,6 +378,7 @@ test "comptime evaluating function that takes array by value" {
test "runtime initialize array elem and then implicit cast to slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var two: i32 = 2;
_ = &two;
@ -378,6 +389,7 @@ test "runtime initialize array elem and then implicit cast to slice" {
test "array literal as argument to function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn entry(two: i32) !void {
@ -406,6 +418,7 @@ test "double nested array to const slice cast in array literal" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn entry(two: i32) !void {
@ -492,6 +505,7 @@ test "anonymous literal in array" {
test "access the null element of a null terminated array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -511,6 +525,7 @@ test "type deduction for array subscript expression" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -530,6 +545,7 @@ test "sentinel element count towards the ABI size calculation" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -577,6 +593,7 @@ test "type coercion of anon struct literal to array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const U = union {
@ -611,6 +628,7 @@ test "type coercion of pointer to anon struct literal to pointer to array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const U = union {
@ -663,6 +681,7 @@ test "array init of container level array variable" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
var pair: [2]usize = .{ 1, 2 };
@ -683,6 +702,8 @@ test "array init of container level array variable" {
}
test "runtime initialized sentinel-terminated array literal" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var c: u16 = 300;
_ = &c;
const f = &[_:0x9999]u16{c};
@ -695,6 +716,7 @@ test "array of array agregate init" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a = [1]u32{11} ** 10;
var b = [1][10]u32{a} ** 2;
@ -745,6 +767,7 @@ test "slicing array of zero-sized values" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var arr: [32]u0 = undefined;
for (arr[0..]) |*zero|
@ -754,6 +777,8 @@ test "slicing array of zero-sized values" {
}
test "array init with no result pointer sets field result types" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
// A function parameter has a result type, but no result pointer.
fn f(arr: [1]u32) u32 {
@ -768,6 +793,8 @@ test "array init with no result pointer sets field result types" {
}
test "runtime side-effects in comptime-known array init" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var side_effects: u4 = 0;
const init = [4]u4{
blk: {
@ -792,6 +819,8 @@ test "runtime side-effects in comptime-known array init" {
}
test "slice initialized through reference to anonymous array init provides result types" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var my_u32: u32 = 123;
var my_u64: u64 = 456;
_ = .{ &my_u32, &my_u64 };
@ -805,6 +834,8 @@ test "slice initialized through reference to anonymous array init provides resul
}
test "sentinel-terminated slice initialized through reference to anonymous array init provides result types" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var my_u32: u32 = 123;
var my_u64: u64 = 456;
_ = .{ &my_u32, &my_u64 };
@ -818,6 +849,8 @@ test "sentinel-terminated slice initialized through reference to anonymous array
}
test "many-item pointer initialized through reference to anonymous array init provides result types" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var my_u32: u32 = 123;
var my_u64: u64 = 456;
_ = .{ &my_u32, &my_u64 };
@ -834,6 +867,8 @@ test "many-item pointer initialized through reference to anonymous array init pr
}
test "many-item sentinel-terminated pointer initialized through reference to anonymous array init provides result types" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var my_u32: u32 = 123;
var my_u64: u64 = 456;
_ = .{ &my_u32, &my_u64 };
@ -851,6 +886,8 @@ test "many-item sentinel-terminated pointer initialized through reference to ano
}
test "pointer to array initialized through reference to anonymous array init provides result types" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var my_u32: u32 = 123;
var my_u64: u64 = 456;
_ = .{ &my_u32, &my_u64 };
@ -864,6 +901,8 @@ test "pointer to array initialized through reference to anonymous array init pro
}
test "pointer to sentinel-terminated array initialized through reference to anonymous array init provides result types" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var my_u32: u32 = 123;
var my_u64: u64 = 456;
_ = .{ &my_u32, &my_u64 };
@ -890,6 +929,7 @@ test "copied array element doesn't alias source" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: [10][10]u32 = undefined;
@ -902,6 +942,7 @@ test "copied array element doesn't alias source" {
test "array initialized with string literal" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
a: u32,
@ -922,6 +963,7 @@ test "array initialized with string literal" {
test "array initialized with array with sentinel" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
a: u32,
@ -969,6 +1011,7 @@ test "accessing multidimensional global array at comptime" {
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const array = [_][]const []const u8{
@ -985,6 +1028,7 @@ test "union that needs padding bytes inside an array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const B = union(enum) {
D: u8,
@ -1006,6 +1050,7 @@ test "union that needs padding bytes inside an array" {
test "runtime index of array of zero-bit values" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var runtime: struct { array: [1]void, index: usize } = undefined;
runtime = .{ .array = .{{}}, .index = 0 };

View File

@ -46,6 +46,7 @@ test "output constraint modifiers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
@ -69,6 +70,7 @@ test "alternative constraints" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
@ -87,6 +89,7 @@ test "sized integer/float in asm input" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
@ -137,6 +140,7 @@ test "struct/array/union types as input values" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly

View File

@ -15,6 +15,7 @@ test "cmpxchg" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCmpxchg();
try comptime testCmpxchg();
@ -41,6 +42,7 @@ test "fence" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: i32 = 1234;
@fence(.seq_cst);
@ -52,6 +54,7 @@ test "atomicrmw and atomicload" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var data: u8 = 200;
try testAtomicRmw(&data);
@ -80,6 +83,7 @@ test "cmpxchg with ptr" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var data1: i32 = 1234;
var data2: i32 = 5678;
@ -105,6 +109,7 @@ test "cmpxchg with ignored result" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: i32 = 1234;
@ -149,6 +154,7 @@ test "cmpxchg on a global variable" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
// https://github.com/ziglang/zig/issues/10627
@ -164,6 +170,7 @@ test "atomic load and rmw with enum" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Value = enum(u8) { a, b, c };
var x = Value.a;
@ -181,6 +188,7 @@ test "atomic store" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: u32 = 0;
@atomicStore(u32, &x, 1, .seq_cst);
@ -194,6 +202,7 @@ test "atomic store comptime" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testAtomicStore();
try testAtomicStore();
@ -212,6 +221,7 @@ test "atomicrmw with floats" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
// https://github.com/ziglang/zig/issues/10627
@ -241,6 +251,7 @@ test "atomicrmw with ints" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) {
// https://github.com/ziglang/zig/issues/16846
@ -390,6 +401,7 @@ test "atomics with different types" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testAtomicsWithType(bool, true, false);
@ -419,6 +431,7 @@ test "return @atomicStore, using it as a void value" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const A = struct {

View File

@ -16,6 +16,8 @@ test "empty function with comments" {
}
test "truncate" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(testTruncate(0x10fd) == 0xfd);
comptime assert(testTruncate(0x10fd) == 0xfd);
}
@ -25,6 +27,7 @@ fn testTruncate(x: u32) u8 {
test "truncate to non-power-of-two integers" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testTrunc(u32, u1, 0b10101, 0b1);
try testTrunc(u32, u1, 0b10110, 0b0);
@ -42,6 +45,7 @@ test "truncate to non-power-of-two integers from 128-bit" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testTrunc(u128, u1, 0xffffffff_ffffffff_ffffffff_01010101, 0x01);
try testTrunc(u128, u1, 0xffffffff_ffffffff_ffffffff_01010110, 0x00);
@ -220,6 +224,7 @@ const OpaqueB = opaque {};
test "opaque types" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(*OpaqueA != *OpaqueB);
@ -371,6 +376,7 @@ test "take address of parameter" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testTakeAddressOfParameter(12.34);
}
@ -395,6 +401,7 @@ test "array 2D const double ptr" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const rect_2d_vertexes = [_][1]f32{
[_]f32{1.0},
@ -407,6 +414,7 @@ test "array 2D const double ptr with offset" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const rect_2d_vertexes = [_][2]f32{
[_]f32{ 3.0, 4.239 },
@ -419,6 +427,7 @@ test "array 3D const double ptr with offset" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const rect_3d_vertexes = [_][2][2]f32{
[_][2]f32{
@ -474,6 +483,7 @@ fn testStructInFn() !void {
test "fn call returning scalar optional in equality expression" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(getNull() == null);
}
@ -484,6 +494,7 @@ fn getNull() ?*i32 {
test "global variable assignment with optional unwrapping with var initialized to undefined" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
var data: i32 = 1234;
@ -502,6 +513,7 @@ var global_foo: *i32 = undefined;
test "peer result location with typed parent, runtime condition, comptime prongs" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest(arg: i32) i32 {
@ -581,6 +593,7 @@ test "equality compare fn ptrs" {
test "self reference through fn ptr field" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const A = struct {
@ -613,6 +626,7 @@ var global_ptr = &gdt[0];
test "global constant is loaded with a runtime-known index" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -631,6 +645,7 @@ test "global constant is loaded with a runtime-known index" {
test "multiline string literal is null terminated" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const s1 =
\\one
@ -645,6 +660,7 @@ test "string escapes" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expectEqualStrings("\"", "\x22");
try expectEqualStrings("\'", "\x27");
@ -674,6 +690,7 @@ test "string concatenation" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const a = "OK" ++ " IT " ++ "WORKED";
const b = "OK IT WORKED";
@ -697,6 +714,7 @@ test "result location is optional inside error union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const x = maybe(true) catch unreachable;
try expect(x.? == 42);
@ -712,6 +730,7 @@ fn maybe(x: bool) anyerror!?u32 {
test "auto created variables have correct alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo(str: [*]const u8) u32 {
@ -733,6 +752,7 @@ test "extern variable with non-pointer opaque type" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@export(var_to_export, .{ .name = "opaque_extern_var" });
try expect(@as(*align(1) u32, @ptrCast(&opaque_extern_var)).* == 42);
@ -776,6 +796,7 @@ test "discarding the result of various expressions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo() !u32 {
@ -817,6 +838,7 @@ test "labeled block implicitly ends in a break" {
test "catch in block has correct result location" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn open() error{A}!@This() {
@ -848,6 +870,7 @@ test "labeled block with runtime branch forwards its result location type to bre
test "try in labeled block doesn't cast to wrong type" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
a: u32,
@ -874,6 +897,7 @@ test "weird array and tuple initializations" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const E = enum { a, b };
const S = struct { e: E };
@ -992,6 +1016,7 @@ comptime {
test "switch inside @as gets correct type" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: u32 = 0;
_ = &a;
@ -1058,6 +1083,7 @@ test "returning an opaque type from a function" {
test "orelse coercion as function argument" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Loc = struct { start: i32 = -1 };
const Container = struct {
@ -1075,6 +1101,8 @@ test "orelse coercion as function argument" {
}
test "runtime-known globals initialized with undefined" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
var array: [10]u32 = [_]u32{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
var vp: [*]u32 = undefined;
@ -1095,6 +1123,7 @@ test "arrays and vectors with big integers" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// TODO: only aarch64-windows didn't pass in the PR that added this code.
// figure out why if you can run this target.
@ -1119,6 +1148,8 @@ test "pointer to struct literal with runtime field is constant" {
}
test "integer compare" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTestSigned(comptime T: type) !void {
var z: T = 0;
@ -1215,6 +1246,8 @@ test "pointer to tuple field can be dereferenced at comptime" {
}
test "proper value is returned from labeled block" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn hash(v: *u32, key: anytype) void {
const Key = @TypeOf(key);
@ -1281,6 +1314,7 @@ test "break out of block based on comptime known values" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const source = "A-";
@ -1317,6 +1351,7 @@ test "allocation and looping over 3-byte integer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .macos) {
return error.SkipZigTest; // TODO
@ -1350,6 +1385,8 @@ test "allocation and looping over 3-byte integer" {
}
test "loading array from struct is not optimized away" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
arr: [1]u32 = .{0},
fn doTheTest(self: *@This()) !void {

View File

@ -65,6 +65,7 @@ test "sharded table" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// realistic 16-way sharding
try testShardedTable(u32, 4, 8);
@ -116,6 +117,7 @@ test "Saturating Shift Left where lhs is of a computed type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn getIntShiftType(comptime T: type) type {

View File

@ -10,6 +10,7 @@ const native_endian = builtin.target.cpu.arch.endian();
test "@bitCast iX -> uX (32, 64)" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const bit_values = [_]usize{ 32, 64 };
@ -24,6 +25,7 @@ test "@bitCast iX -> uX (8, 16, 128)" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const bit_values = [_]usize{ 8, 16, 128 };
@ -39,6 +41,7 @@ test "@bitCast iX -> uX exotic integers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const bit_values = [_]usize{ 1, 48, 27, 512, 493, 293, 125, 204, 112 };
@ -83,6 +86,7 @@ test "bitcast uX to bytes" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const bit_values = [_]usize{ 1, 48, 27, 512, 493, 293, 125, 204, 112 };
inline for (bit_values) |bits| {
@ -161,6 +165,7 @@ test "@bitCast packed structs at runtime and comptime" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Full = packed struct {
number: u16,
@ -187,6 +192,7 @@ test "@bitCast packed structs at runtime and comptime" {
test "@bitCast extern structs at runtime and comptime" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Full = extern struct {
number: u16,
@ -221,6 +227,7 @@ test "bitcast packed struct to integer and back" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const LevelUpMove = packed struct {
move_id: u9,
@ -243,6 +250,7 @@ test "bitcast packed struct to integer and back" {
test "implicit cast to error union by returning" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn entry() !void {
@ -272,6 +280,8 @@ test "comptime bitcast used in expression has the correct type" {
}
test "bitcast passed as tuple element" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo(args: anytype) !void {
comptime assert(@TypeOf(args[0]) == f32);
@ -282,6 +292,8 @@ test "bitcast passed as tuple element" {
}
test "triple level result location with bitcast sandwich passed as tuple element" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo(args: anytype) !void {
comptime assert(@TypeOf(args[0]) == f64);
@ -299,6 +311,7 @@ test "@bitCast packed struct of floats" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Foo = packed struct {
a: f16 = 0,
@ -337,6 +350,7 @@ test "comptime @bitCast packed struct to int and back" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and native_endian == .big) {
// https://github.com/ziglang/zig/issues/13782
@ -401,6 +415,7 @@ test "bitcast vector to integer and back" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const arr: [16]bool = [_]bool{ true, false } ++ [_]bool{true} ** 14;
var x: @Vector(16, bool) = @splat(true);
@ -426,6 +441,7 @@ test "bitcast nan float does not modify signaling bit" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// TODO: https://github.com/ziglang/zig/issues/14366
if (builtin.zig_backend == .stage2_llvm and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
@ -482,6 +498,7 @@ test "@bitCast of packed struct of bools all true" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const P = packed struct {
b0: bool,

View File

@ -16,6 +16,7 @@ test "@bitReverse" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testBitReverse();
try testBitReverse();
@ -121,6 +122,7 @@ test "bitReverse vectors u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime vector8();
try vector8();
@ -141,6 +143,7 @@ test "bitReverse vectors u16" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime vector16();
try vector16();
@ -161,6 +164,7 @@ test "bitReverse vectors u24" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime vector24();
try vector24();

View File

@ -9,6 +9,8 @@ test "bool literals" {
}
test "cast bool to int" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const t = true;
const f = false;
try expectEqual(@as(u32, 1), @intFromBool(t));

View File

@ -10,8 +10,8 @@ test {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var val: u8 = undefined;
try testing.expectEqual({}, @atomicStore(u8, &val, 0, .unordered));

View File

@ -3,6 +3,8 @@ const builtin = @import("builtin");
const expect = std.testing.expect;
test "@byteSwap integers" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) {
// TODO: Remove when self-hosted wasm supports more types for byteswap
const ByteSwapIntTest = struct {
@ -118,6 +120,7 @@ test "@byteSwap vectors u16" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime vector16();
try vector16();
@ -138,6 +141,7 @@ test "@byteSwap vectors u24" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime vector24();
try vector24();

View File

@ -5,6 +5,7 @@ var result: []const u8 = "wrong";
test "pass string literal byvalue to a generic var param" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
start();
blowUpStack(10);

View File

@ -60,6 +60,7 @@ test "tuple parameters" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const add = struct {
fn add(a: i32, b: i32) i32 {
@ -93,6 +94,7 @@ test "result location of function call argument through runtime condition and st
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const E = enum { a, b };
const S = struct {
@ -112,6 +114,7 @@ test "result location of function call argument through runtime condition and st
test "function call with 40 arguments" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest(thirty_nine: i32) !void {
@ -271,6 +274,7 @@ test "forced tail call" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) {
// Only attempt this test on targets we know have tail call support in LLVM.
@ -306,6 +310,7 @@ test "inline call preserves tail call" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) {
// Only attempt this test on targets we know have tail call support in LLVM.
@ -339,6 +344,7 @@ test "inline call preserves tail call" {
test "inline call doesn't re-evaluate non generic struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo(f: struct { a: u8, b: u8 }) !void {
@ -405,6 +411,7 @@ test "recursive inline call with comptime known argument" {
test "inline while with @call" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn inc(a: *u32) void {
@ -420,6 +427,8 @@ test "inline while with @call" {
}
test "method call as parameter type" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo(x: anytype, y: @TypeOf(x).Inner()) @TypeOf(y) {
return y;
@ -437,6 +446,7 @@ test "non-anytype generic parameters provide result type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn f(comptime T: type, y: T) !void {
@ -467,6 +477,7 @@ test "argument to generic function has correct result type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo(_: anytype, e: enum { a, b }) bool {
@ -502,6 +513,8 @@ test "call inline fn through pointer" {
}
test "call coerced function" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const T = struct {
x: f64,
const T = @This();

View File

@ -31,6 +31,8 @@ noinline fn insertionSort(data: []u64) void {
}
test "arguments pointed to on stack into tailcall" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
switch (builtin.cpu.arch) {
.wasm32,
.mips,

View File

@ -24,6 +24,7 @@ test "peer type resolution: ?T and T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(peerTypeTAndOptionalT(true, false).? == 0);
try expect(peerTypeTAndOptionalT(false, false).? == 3);
@ -56,6 +57,8 @@ test "@intCast to comptime_int" {
}
test "implicit cast comptime numbers to any type when the value fits" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const a: u64 = 255;
var b: u8 = a;
_ = &b;
@ -103,6 +106,7 @@ test "@floatFromInt" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -126,6 +130,7 @@ test "@floatFromInt(f80)" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest(comptime Int: type) !void {
@ -160,6 +165,7 @@ test "@intFromFloat" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testIntFromFloats();
try comptime testIntFromFloats();
@ -182,6 +188,7 @@ fn expectIntFromFloat(comptime F: type, f: F, comptime I: type, i: I) !void {
test "implicitly cast indirect pointer to maybe-indirect pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const Self = @This();
@ -242,6 +249,7 @@ test "coerce undefined to optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(MakeType(void).getNull() == null);
try expect(MakeType(void).getNonNull() != null);
@ -262,6 +270,7 @@ fn MakeType(comptime T: type) type {
test "implicit cast from *[N]T to [*c]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: [4]u16 = [4]u16{ 0, 1, 2, 3 };
var y: [*c]u16 = &x;
@ -299,6 +308,7 @@ test "peer result null and comptime_int" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn blah(n: i32) ?i32 {
@ -323,6 +333,7 @@ test "peer result null and comptime_int" {
test "*const ?[*]const T to [*c]const [*c]const T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var array = [_]u8{ 'o', 'k' };
const opt_array_ptr: ?[*]const u8 = &array;
@ -336,6 +347,7 @@ test "array coercion to undefined at runtime" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@setRuntimeSafety(true);
@ -366,6 +378,7 @@ test "return u8 coercing into ?u32 return type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -400,6 +413,7 @@ test "peer type unsigned int to signed" {
test "expected [*c]const u8, found [*:0]const u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: [*:0]const u8 = "hello";
_ = &a;
@ -413,6 +427,7 @@ test "explicit cast from integer to error type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCastIntToErr(error.ItBroke);
try comptime testCastIntToErr(error.ItBroke);
@ -442,6 +457,7 @@ test "implicitly cast from T to anyerror!?T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try castToOptionalTypeError(1);
try comptime castToOptionalTypeError(1);
@ -467,6 +483,7 @@ fn castToOptionalTypeError(z: i32) !void {
test "implicitly cast from [0]T to anyerror![]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCastZeroArrayToErrSliceMut();
try comptime testCastZeroArrayToErrSliceMut();
@ -484,6 +501,7 @@ test "peer type resolution: [0]u8, []const u8, and anyerror![]u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() anyerror!void {
@ -540,6 +558,7 @@ fn testCastConstArrayRefToConstSlice() !void {
test "peer type resolution: error and [N]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(mem.eql(u8, try testPeerErrorAndArray(0), "OK"));
comptime assert(mem.eql(u8, try testPeerErrorAndArray(0), "OK"));
@ -564,6 +583,7 @@ fn testPeerErrorAndArray2(x: u8) anyerror![]const u8 {
test "single-item pointer of array to slice to unknown length pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCastPtrOfArrayToSliceAndPtr();
try comptime testCastPtrOfArrayToSliceAndPtr();
@ -593,6 +613,7 @@ fn testCastPtrOfArrayToSliceAndPtr() !void {
test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const window_name = [1][*]const u8{"window name"};
const x: [*]const ?[*]const u8 = &window_name;
@ -605,6 +626,7 @@ test "@intCast on vector" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -639,6 +661,7 @@ test "@floatCast cast down" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var double: f64 = 0.001534;
@ -656,6 +679,7 @@ test "@floatCast cast down" {
test "peer type resolution: unreachable, error set, unreachable" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Error = error{
FileDescriptorAlreadyPresentInSet,
@ -750,6 +774,7 @@ test "peer type resolution: error union and error set" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const a: error{Three} = undefined;
const b: error{ One, Two }!u32 = undefined;
@ -816,6 +841,7 @@ test "peer cast *[0]T to E![]const T" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buffer: [5]u8 = "abcde".*;
const buf: anyerror![]const u8 = buffer[0..];
@ -831,6 +857,7 @@ test "peer cast *[0]T to []const T" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buffer: [5]u8 = "abcde".*;
const buf: []const u8 = buffer[0..];
@ -854,6 +881,7 @@ test "peer resolution of string literals" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const E = enum { a, b, c, d };
@ -875,6 +903,7 @@ test "peer resolution of string literals" {
test "peer cast [:x]T to []T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -891,6 +920,7 @@ test "peer cast [:x]T to []T" {
test "peer cast [N:x]T to [N]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -907,6 +937,7 @@ test "peer cast [N:x]T to [N]T" {
test "peer cast *[N:x]T to *[N]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -922,6 +953,7 @@ test "peer cast *[N:x]T to *[N]T" {
test "peer cast [*:x]T to [*]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -942,6 +974,7 @@ test "peer cast [:x]T to [*:x]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -962,6 +995,7 @@ test "peer cast [:x]T to [*:x]T" {
test "peer type resolution implicit cast to return type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -982,6 +1016,7 @@ test "peer type resolution implicit cast to return type" {
test "peer type resolution implicit cast to variable type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -1007,6 +1042,7 @@ test "variable initialization uses result locations properly with regards to the
test "cast between C pointer with different but compatible types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo(arg: [*]c_ushort) u16 {
@ -1024,6 +1060,7 @@ test "cast between C pointer with different but compatible types" {
test "peer type resolve string lit with sentinel-terminated mutable slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var array: [4:0]u8 = undefined;
array[4] = 0; // TODO remove this when #4372 is solved
@ -1090,6 +1127,7 @@ test "implicit cast from [*]T to ?*anyopaque" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a = [_]u8{ 3, 2, 1 };
var runtime_zero: usize = 0;
@ -1120,6 +1158,7 @@ fn foobar(func: PFN_void) !void {
test "cast function with an opaque parameter" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) {
// https://github.com/ziglang/zig/issues/16845
@ -1152,6 +1191,7 @@ test "implicit ptr to *anyopaque" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: u32 = 1;
const ptr: *align(@alignOf(u32)) anyopaque = &a;
@ -1165,6 +1205,7 @@ test "implicit ptr to *anyopaque" {
test "return null from fn () anyerror!?&T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const a = returnNullFromOptionalTypeErrorRef();
const b = returnNullLitFromOptionalTypeErrorRef();
@ -1181,6 +1222,7 @@ fn returnNullLitFromOptionalTypeErrorRef() anyerror!?*A {
test "peer type resolution: [0]u8 and []const u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(peerTypeEmptyArrayAndSlice(true, "hi").len == 0);
try expect(peerTypeEmptyArrayAndSlice(false, "hi").len == 1);
@ -1201,6 +1243,7 @@ test "implicitly cast from [N]T to ?[]const T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(mem.eql(u8, castToOptionalSlice().?, "hi"));
comptime assert(mem.eql(u8, castToOptionalSlice().?, "hi"));
@ -1215,6 +1258,7 @@ test "cast u128 to f128 and back" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testCast128();
try testCast128();
@ -1236,6 +1280,7 @@ test "implicit cast from *[N]T to ?[*]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: ?[*]u16 = null;
var y: [4]u16 = [4]u16{ 0, 1, 2, 3 };
@ -1251,6 +1296,7 @@ test "implicit cast from *T to ?*anyopaque" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: u8 = 1;
incrementVoidPtrValue(&a);
@ -1264,6 +1310,7 @@ fn incrementVoidPtrValue(value: ?*anyopaque) void {
test "implicit cast *[0]T to E![]const u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x = @as(anyerror![]const u8, &[0]u8{});
_ = &x;
@ -1285,6 +1332,7 @@ test "*const [N]null u8 to ?[]const u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -1321,6 +1369,7 @@ test "assignment to optional pointer result loc" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var foo: struct { ptr: ?*anyopaque } = .{ .ptr = &global_struct };
_ = &foo;
@ -1328,6 +1377,8 @@ test "assignment to optional pointer result loc" {
}
test "cast between *[N]void and []void" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: [4]void = undefined;
const b: []void = &a;
try expect(b.len == 4);
@ -1353,6 +1404,7 @@ test "cast f16 to wider types" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -1373,6 +1425,7 @@ test "cast f128 to narrower types" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -1391,6 +1444,7 @@ test "peer type resolution: unreachable, null, slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest(num: usize, word: []const u8) !void {
@ -1431,6 +1485,7 @@ test "cast compatible optional types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: ?[:0]const u8 = null;
_ = &a;
@ -1441,6 +1496,7 @@ test "cast compatible optional types" {
test "coerce undefined single-item pointer of array to error union of slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const a = @as([*]u8, undefined)[0..0];
var b: error{a}![]const u8 = a;
@ -1464,6 +1520,7 @@ test "coerce between pointers of compatible differently-named floats" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) {
// https://github.com/ziglang/zig/issues/12396
@ -1518,6 +1575,7 @@ test "cast typed undefined to int" {
test "implicit cast from [:0]T to [*c]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: [:0]const u8 = "foo";
_ = &a;
@ -1541,6 +1599,7 @@ test "bitcast packed struct with u0" {
test "optional pointer coerced to optional allowzero pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var p: ?*u32 = undefined;
var q: ?*allowzero u32 = undefined;
@ -1557,6 +1616,8 @@ test "optional slice coerced to allowzero many pointer" {
}
test "optional slice passed as parameter coerced to allowzero many pointer" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const ns = struct {
const Color = struct {
r: u8,
@ -1576,6 +1637,8 @@ test "optional slice passed as parameter coerced to allowzero many pointer" {
}
test "single item pointer to pointer to array to slice" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: i32 = 1234;
try expect(@as([]const i32, @as(*[1]i32, &x))[0] == 1234);
const z1 = @as([]const i32, @as(*[1]i32, &x));
@ -1583,6 +1646,8 @@ test "single item pointer to pointer to array to slice" {
}
test "peer type resolution forms error union" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var foo: i32 = 123;
_ = &foo;
const result = if (foo < 0) switch (-foo) {
@ -1616,6 +1681,8 @@ test "@volatileCast without a result location" {
}
test "coercion from single-item pointer to @as to slice" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: u32 = 1;
// Why the following line gets a compile error?
@ -1628,6 +1695,7 @@ test "peer type resolution: const sentinel slice and mutable non-sentinel slice"
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest(comptime T: type, comptime s: T) !void {
@ -1658,6 +1726,7 @@ test "peer type resolution: float and comptime-known fixed-width integer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const i: u8 = 100;
var f: f32 = 1.234;
@ -1680,6 +1749,7 @@ test "peer type resolution: same array type with sentinel" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: [2:0]u32 = .{ 0, 1 };
var b: [2:0]u32 = .{ 2, 3 };
@ -1702,6 +1772,7 @@ test "peer type resolution: array with sentinel and array without sentinel" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: [2:0]u32 = .{ 0, 1 };
var b: [2]u32 = .{ 2, 3 };
@ -1724,6 +1795,7 @@ test "peer type resolution: array and vector with same child type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var arr: [2]u32 = .{ 0, 1 };
var vec: @Vector(2, u32) = .{ 2, 3 };
@ -1747,6 +1819,7 @@ test "peer type resolution: array with smaller child type and vector with larger
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var arr: [2]u8 = .{ 0, 1 };
var vec: @Vector(2, u64) = .{ 2, 3 };
@ -1769,6 +1842,7 @@ test "peer type resolution: error union and optional of same type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const E = error{Foo};
var a: E!*u8 = error.Foo;
@ -1792,6 +1866,7 @@ test "peer type resolution: C pointer and @TypeOf(null)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: [*c]c_int = 0x1000;
_ = &a;
@ -1814,6 +1889,7 @@ test "peer type resolution: three-way resolution combines error set and optional
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const E = error{Foo};
var a: E = error.Foo;
@ -1858,6 +1934,7 @@ test "peer type resolution: vector and optional vector" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: ?@Vector(3, u32) = .{ 0, 1, 2 };
var b: @Vector(3, u32) = .{ 3, 4, 5 };
@ -1880,6 +1957,7 @@ test "peer type resolution: optional fixed-width int and comptime_int" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: ?i32 = 42;
_ = &a;
@ -1902,6 +1980,7 @@ test "peer type resolution: array and tuple" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var arr: [3]i32 = .{ 1, 2, 3 };
_ = &arr;
@ -1926,6 +2005,7 @@ test "peer type resolution: vector and tuple" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var vec: @Vector(3, i32) = .{ 1, 2, 3 };
_ = &vec;
@ -1950,6 +2030,7 @@ test "peer type resolution: vector and array and tuple" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var vec: @Vector(2, i8) = .{ 10, 20 };
var arr: [2]i8 = .{ 30, 40 };
@ -2034,6 +2115,7 @@ test "peer type resolution: tuple pointer and optional slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// Miscompilation on Intel's OpenCL CPU runtime.
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky
@ -2058,6 +2140,7 @@ test "peer type resolution: many compatible pointers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buf = "foo-3".*;
@ -2125,6 +2208,7 @@ test "peer type resolution: tuples with comptime fields" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const a = .{ 1, 2 };
const b = .{ @as(u32, 3), @as(i16, 4) };
@ -2156,6 +2240,7 @@ test "peer type resolution: C pointer and many pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buf = "hello".*;
@ -2179,6 +2264,7 @@ test "peer type resolution: pointer attributes are combined correctly" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buf_a align(4) = "foo".*;
var buf_b align(4) = "bar".*;
@ -2222,6 +2308,7 @@ test "peer type resolution: arrays of compatible types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var e0: u8 = 3;
var e1: u8 = 2;
@ -2239,6 +2326,7 @@ test "cast builtins can wrap result in optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const MyEnum = enum(u32) { _ };
@ -2276,6 +2364,7 @@ test "cast builtins can wrap result in error union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const MyEnum = enum(u32) { _ };
@ -2314,6 +2403,7 @@ test "cast builtins can wrap result in error union and optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const MyEnum = enum(u32) { _ };
@ -2354,6 +2444,7 @@ test "@floatCast on vector" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -2394,6 +2485,7 @@ test "@ptrFromInt on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -2417,6 +2509,7 @@ test "@intFromPtr on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -2441,6 +2534,7 @@ test "@floatFromInt on vector" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -2460,6 +2554,7 @@ test "@intFromFloat on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -2480,6 +2575,7 @@ test "@intFromBool on vector" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and
builtin.cpu.arch == .aarch64 and builtin.os.tag == .windows)
@ -2503,6 +2599,7 @@ test "@intFromBool on vector" {
test "numeric coercions with undefined" {
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const from: i32 = undefined;
var to: f32 = from;
@ -2513,6 +2610,7 @@ test "numeric coercions with undefined" {
test "15-bit int to float" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: u15 = 42;
_ = &a;
@ -2525,6 +2623,7 @@ test "@as does not corrupt values with incompatible representations" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const x: f32 = @as(f16, blk: {
if (false) {
@ -2540,6 +2639,7 @@ test "result information is preserved through many nested structures" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -2566,6 +2666,7 @@ test "@intCast vector of signed integer" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: @Vector(4, i32) = .{ 1, 2, 3, 4 };
_ = &x;
@ -2586,6 +2687,7 @@ test "implicit cast from ptr to tuple to ptr to struct" {
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const ComptimeReason = union(enum) {
c_import: struct {

View File

@ -9,6 +9,7 @@ test "@intCast i32 to u7" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: u128 = maxInt(u128);
var y: i32 = 120;
@ -34,6 +35,8 @@ test "coerce i8 to i32 and @intCast back" {
}
test "coerce non byte-sized integers accross 32bits boundary" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var v: u21 = 6417;
_ = &v;
@ -163,6 +166,8 @@ const Piece = packed struct {
};
test "load non byte-sized optional value" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// Originally reported at https://github.com/ziglang/zig/issues/14200
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
@ -178,6 +183,8 @@ test "load non byte-sized optional value" {
}
test "load non byte-sized value in struct" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.endian() != .little) return error.SkipZigTest; // packed struct TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
@ -215,6 +222,7 @@ test "load non byte-sized value in union" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// note: this bug is triggered by the == operator, expectEqual will hide it
// using ptrCast not to depend on unitialised memory state

View File

@ -408,6 +408,8 @@ test "mutate entire slice at comptime" {
}
test "dereference undefined pointer to zero-bit type" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const p0: *void = undefined;
try testing.expectEqual({}, p0.*);
@ -513,5 +515,7 @@ fn fieldPtrTest() u32 {
return a.value;
}
test "pointer in aggregate field can mutate comptime state" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime std.testing.expect(fieldPtrTest() == 2);
}

View File

@ -10,6 +10,7 @@ test "const slice child" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const strs = [_][*]const u8{ "one", "two", "three" };
argv = &strs;

View File

@ -53,6 +53,7 @@ test "return variable while defer expression in scope to modify it" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -94,6 +95,7 @@ test "mixing normal and error defers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(runSomeErrorDefers(true) catch unreachable);
try expect(result[0] == 'c');
@ -114,6 +116,7 @@ test "errdefer with payload" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo() !i32 {
@ -136,6 +139,7 @@ test "reference to errdefer payload" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo() !i32 {
@ -158,6 +162,7 @@ test "reference to errdefer payload" {
test "simple else prong doesn't emit an error for unreachable else prong" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo() error{Foo}!void {

View File

@ -23,6 +23,8 @@ test "simple destructure" {
}
test "destructure with comptime syntax" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
{

View File

@ -16,6 +16,7 @@ test "thingy" {}
test thingy {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (thingy(1, 2) != 3) unreachable;
}

View File

@ -5,6 +5,7 @@ test "empty file level struct" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const T = @import("empty_file_level_struct.zig");
const info = @typeInfo(T);
@ -17,6 +18,7 @@ test "empty file level union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const T = @import("empty_file_level_union.zig");
const info = @typeInfo(T);

View File

@ -48,6 +48,8 @@ test "empty extern union" {
}
test "empty union passed as argument" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const U = union(enum) {
fn f(u: @This()) void {
switch (u) {}
@ -57,6 +59,8 @@ test "empty union passed as argument" {
}
test "empty enum passed as argument" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const E = enum {
fn f(e: @This()) void {
switch (e) {}

View File

@ -610,6 +610,7 @@ fn testEnumWithSpecifiedTagValues(x: MultipleChoice) !void {
test "enum with specified tag values" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testEnumWithSpecifiedTagValues(MultipleChoice.C);
try comptime testEnumWithSpecifiedTagValues(MultipleChoice.C);
@ -618,6 +619,7 @@ test "enum with specified tag values" {
test "non-exhaustive enum" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const E = enum(u8) { a, b, _ };
@ -682,6 +684,7 @@ test "empty non-exhaustive enum" {
test "single field non-exhaustive enum" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const E = enum(u8) { a, _ };
@ -746,6 +749,7 @@ test "cast integer literal to enum" {
test "enum with specified and unspecified tag values" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2.D);
try comptime testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2.D);
@ -854,6 +858,8 @@ fn doALoopThing(id: EnumWithOneMember) void {
}
test "comparison operator on enum with one member is comptime-known" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
doALoopThing(EnumWithOneMember.Eof);
}
@ -907,6 +913,7 @@ test "enum literal casting to tagged union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Arch = union(enum) {
x86_64,
@ -933,6 +940,7 @@ const Bar = enum { A, B, C, D };
test "enum literal casting to error union with payload enum" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var bar: error{B}!Bar = undefined;
bar = .B; // should never cast to the error set
@ -944,6 +952,7 @@ test "constant enum initialization with differing sizes" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try test3_1(test3_foo);
try test3_2(test3_bar);
@ -987,6 +996,7 @@ test "@tagName" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(mem.eql(u8, testEnumTagNameBare(BareNumber.Three), "Three"));
comptime assert(mem.eql(u8, testEnumTagNameBare(BareNumber.Three), "Three"));
@ -1003,6 +1013,7 @@ test "@tagName non-exhaustive enum" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(mem.eql(u8, testEnumTagNameBare(NonExhaustive.B), "B"));
comptime assert(mem.eql(u8, testEnumTagNameBare(NonExhaustive.B), "B"));
@ -1014,6 +1025,7 @@ test "@tagName is null-terminated" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest(n: BareNumber) !void {
@ -1029,6 +1041,7 @@ test "tag name with assigned enum values" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const LocalFoo = enum(u8) {
A = 1,
@ -1052,6 +1065,7 @@ test "tag name with signed enum values" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const LocalFoo = enum(isize) {
alfa = 62,
@ -1068,6 +1082,7 @@ test "enum literal casting to optional" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var bar: ?Bar = undefined;
bar = .B;
@ -1096,6 +1111,7 @@ test "bit field access with enum fields" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var data = bit_field_1;
try expect(getA(&data) == A.Two);
@ -1136,6 +1152,7 @@ test "tag name functions are unique" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
const E = enum { a, b };
@ -1212,6 +1229,8 @@ test "enum tag from a local variable" {
}
test "auto-numbered enum with signed tag type" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const E = enum(i32) { a, b };
try std.testing.expectEqual(@as(i32, 0), @intFromEnum(E.a));
@ -1266,6 +1285,7 @@ test "matching captures causes enum equivalence" {
test "large enum field values" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
const E = enum(u64) { min = std.math.minInt(u64), max = std.math.maxInt(u64) };

View File

@ -31,6 +31,7 @@ fn shouldBeNotEqual(a: anyerror, b: anyerror) void {
test "error binary operator" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const a = errBinaryOperatorG(true) catch 3;
const b = errBinaryOperatorG(false) catch 3;
@ -62,12 +63,14 @@ pub fn baz() anyerror!i32 {
test "error wrapping" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect((baz() catch unreachable) == 15);
}
test "unwrap simple value from error" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const i = unwrapSimpleValueFromErrorDo() catch unreachable;
try expect(i == 13);
@ -78,6 +81,7 @@ fn unwrapSimpleValueFromErrorDo() anyerror!isize {
test "error return in assignment" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
doErrReturnInAssignment() catch unreachable;
}
@ -100,6 +104,7 @@ test "syntax: optional operator in front of error union operator" {
test "widen cast integer payload of error union function call" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn errorable() !u64 {
@ -124,6 +129,7 @@ test "debug info for optional error set" {
test "implicit cast to optional to error union to return result loc" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn entry() !void {
@ -235,6 +241,8 @@ fn testExplicitErrorSetCast(set1: Set1) !void {
}
test "@errorCast on error unions" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
{
@ -262,6 +270,7 @@ test "@errorCast on error unions" {
test "comptime test error for empty error set" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testComptimeTestErrorEmptySet(1234);
try comptime testComptimeTestErrorEmptySet(1234);
@ -297,6 +306,8 @@ test "inferred empty error set comptime catch" {
}
test "error inference with an empty set" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const Struct = struct {
pub fn func() (error{})!usize {
@ -319,6 +330,7 @@ test "error inference with an empty set" {
test "error union peer type resolution" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testErrorUnionPeerTypeResolution(1);
}
@ -350,6 +362,7 @@ fn quux_1() !i32 {
test "error: Zero sized error set returned with value payload crash" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
_ = try foo3(0);
_ = try comptime foo3(0);
@ -363,6 +376,7 @@ fn foo3(b: usize) Error!usize {
test "error: Infer error set from literals" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
_ = nullLiteral("n") catch |err| handleErrors(err);
_ = floatLiteral("n") catch |err| handleErrors(err);
@ -402,6 +416,7 @@ test "nested error union function call in optional unwrap" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const Foo = struct {
@ -448,6 +463,7 @@ test "nested error union function call in optional unwrap" {
test "return function call to error set from error union function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn errorable() anyerror!i32 {
@ -466,6 +482,7 @@ test "optional error set is the same size as error set" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
comptime assert(@sizeOf(?anyerror) == @sizeOf(anyerror));
comptime assert(@alignOf(?anyerror) == @alignOf(anyerror));
@ -481,6 +498,7 @@ test "optional error set is the same size as error set" {
test "nested catch" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn entry() !void {
@ -506,6 +524,7 @@ test "nested catch" {
test "function pointer with return type that is error union with payload which is pointer of parent struct" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const Foo = struct {
@ -531,6 +550,7 @@ test "return result loc as peer result loc in inferred error set function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -562,6 +582,7 @@ test "error payload type is correctly resolved" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const MyIntWrapper = struct {
const Self = @This();
@ -592,6 +613,7 @@ test "@errorName" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(mem.eql(u8, @errorName(error.AnError), "AnError"));
try expect(mem.eql(u8, @errorName(error.ALongerErrorName), "ALongerErrorName"));
@ -606,6 +628,7 @@ test "@errorName sentinel length matches slice length" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const name = testBuiltinErrorName(error.FooBar);
const length: usize = 6;
@ -700,6 +723,7 @@ test "error union payload is properly aligned" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
a: u128,
@ -731,6 +755,7 @@ test "ret_ptr doesn't cause own inferred error set to be resolved" {
test "simple else prong allowed even when all errors handled" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo() !u8 {
@ -759,6 +784,7 @@ test "pointer to error union payload" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var err_union: anyerror!u8 = 15;
@ -792,6 +818,7 @@ test "error union of noreturn used with if" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
NoReturn.a = 64;
if (NoReturn.loop()) {
@ -806,6 +833,7 @@ test "error union of noreturn used with try" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
NoReturn.a = 64;
const err = NoReturn.testTry();
@ -817,6 +845,7 @@ test "error union of noreturn used with catch" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
NoReturn.a = 64;
const err = NoReturn.testCatch();
@ -828,6 +857,7 @@ test "alignment of wrapping an error union payload" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const I = extern struct { x: i128 };
@ -843,6 +873,7 @@ test "alignment of wrapping an error union payload" {
test "compare error union and error set" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: anyerror = error.Foo;
var b: anyerror!u32 = error.Bar;
@ -881,6 +912,7 @@ test "error from comptime string" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const name = "Weird error name!";
const S = struct {
@ -904,6 +936,7 @@ test "field access of anyerror results in smaller error set" {
test "optional error union return type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo() ?anyerror!u32 {
@ -918,6 +951,7 @@ test "optional error union return type" {
test "optional error set return type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const E = error{ A, B };
const S = struct {
@ -931,6 +965,8 @@ test "optional error set return type" {
}
test "optional error set function parameter" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest(a: ?anyerror) !void {
try std.testing.expect(a.? == error.OutOfMemory);
@ -960,6 +996,7 @@ test "returning an error union containing a type with no runtime bits" {
test "try used in recursive function with inferred error set" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Value = union(enum) {
values: []const @This(),
@ -1001,6 +1038,7 @@ test "function called at runtime is properly analyzed for inferred error set" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo() !void {
@ -1024,6 +1062,7 @@ test "generic type constructed from inferred error set of unresolved function" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn write(_: void, bytes: []const u8) !usize {
@ -1039,6 +1078,8 @@ test "generic type constructed from inferred error set of unresolved function" {
}
test "errorCast to adhoc inferred error set" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
inline fn baz() !i32 {
return @errorCast(err());
@ -1051,6 +1092,8 @@ test "errorCast to adhoc inferred error set" {
}
test "errorCast from error sets to error unions" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const err_union: Set1!void = @errorCast(error.A);
try expectError(error.A, err_union);
}
@ -1059,6 +1102,7 @@ test "result location initialization of error union with OPV payload" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
x: u0,
@ -1080,6 +1124,7 @@ test "result location initialization of error union with OPV payload" {
test "return error union with i65" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(try add(1000, 234) == 1234);
}

View File

@ -73,6 +73,7 @@ fn constExprEvalOnSingleExprBlocksFn(x: i32, b: bool) i32 {
test "constant expressions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var array: [array_size]u8 = undefined;
_ = &array;
@ -142,6 +143,7 @@ test "pointer to type" {
test "a type constructed in a global expression" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var l: List = undefined;
l.array[0] = 10;
@ -394,6 +396,7 @@ test "return 0 from function that has u0 return type" {
test "statically initialized struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
st_init_str_foo.x += 1;
try expect(st_init_str_foo.x == 14);
@ -444,6 +447,7 @@ test "binary math operator in partially inlined function" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var s: [4]u32 = undefined;
var b: [16]u8 = undefined;
@ -489,6 +493,7 @@ test "comptime bitwise operators" {
test "comptime shlWithOverflow" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const ct_shifted = @shlWithOverflow(~@as(u64, 0), 16)[0];
var a = ~@as(u64, 0);
@ -501,6 +506,7 @@ test "comptime shlWithOverflow" {
test "const ptr to variable data changes at runtime" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(foo_ref.name[0] == 'a');
foo_ref.name = "b";
@ -522,6 +528,7 @@ test "runtime 128 bit integer division" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: u128 = 152313999999999991610955792383;
var b: u128 = 10000000000000000000;
@ -542,6 +549,7 @@ test "static eval list init" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(static_vec3.data[2] == 1.0);
try expect(vec3(0.0, 0.0, 3.0).data[2] == 3.0);
@ -713,6 +721,8 @@ fn loopNTimes(comptime n: usize) void {
}
test "variable inside inline loop that has different types on different iterations" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testVarInsideInlineLoop(.{ true, @as(u32, 42) });
}
@ -736,6 +746,7 @@ test "array concatenation of function calls" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a = oneItem(3) ++ oneItem(4);
try expect(std.mem.eql(i32, &a, &[_]i32{ 3, 4 }));
@ -745,6 +756,7 @@ test "array multiplication of function calls" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a = oneItem(3) ** scalar(2);
try expect(std.mem.eql(i32, &a, &[_]i32{ 3, 3 }));
@ -762,6 +774,7 @@ test "array concatenation peer resolves element types - value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a = [2]u3{ 1, 7 };
var b = [3]u8{ 200, 225, 255 };
@ -779,6 +792,7 @@ test "array concatenation peer resolves element types - pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a = [2]u3{ 1, 7 };
var b = [3]u8{ 200, 225, 255 };
@ -795,6 +809,7 @@ test "array concatenation sets the sentinel - value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a = [2]u3{ 1, 7 };
var b = [3:69]u8{ 200, 225, 255 };
@ -813,6 +828,7 @@ test "array concatenation sets the sentinel - value" {
test "array concatenation sets the sentinel - pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a = [2]u3{ 1, 7 };
var b = [3:69]u8{ 200, 225, 255 };
@ -831,6 +847,7 @@ test "array multiplication sets the sentinel - value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a = [2:7]u3{ 1, 6 };
_ = &a;
@ -848,6 +865,7 @@ test "array multiplication sets the sentinel - pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a = [2:7]u3{ 1, 6 };
const b = &a ** 2;
@ -1070,6 +1088,7 @@ test "comptime break operand passing through runtime condition converted to runt
test "comptime break operand passing through runtime switch converted to runtime break" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest(runtime: u8) !void {
@ -1090,6 +1109,7 @@ test "comptime break operand passing through runtime switch converted to runtime
test "no dependency loop for alignment of self struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -1127,6 +1147,7 @@ test "no dependency loop for alignment of self struct" {
test "no dependency loop for alignment of self bare union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -1164,6 +1185,7 @@ test "no dependency loop for alignment of self bare union" {
test "no dependency loop for alignment of self tagged union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -1354,6 +1376,7 @@ test "lazy value is resolved as slice operand" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const A = struct { a: u32 };
var a: [512]u64 = undefined;
@ -1533,6 +1556,7 @@ test "non-optional and optional array elements concatenated" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const array = [1]u8{'A'} ++ [1]?u8{null};
var index: usize = 0;
@ -1607,6 +1631,8 @@ test "struct in comptime false branch is not evaluated" {
}
test "result of nested switch assigned to variable" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var zds: u32 = 0;
zds = switch (zds) {
0 => switch (zds) {
@ -1621,6 +1647,8 @@ test "result of nested switch assigned to variable" {
}
test "inline for loop of functions returning error unions" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const T1 = struct {
fn v() error{}!usize {
return 1;
@ -1639,6 +1667,8 @@ test "inline for loop of functions returning error unions" {
}
test "if inside a switch" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var condition = true;
var wave_type: u32 = 0;
_ = .{ &condition, &wave_type };

View File

@ -48,6 +48,7 @@ test "exporting using field access" {
test "exporting comptime-known value" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
(builtin.target.ofmt != .elf and
builtin.target.ofmt != .macho and
@ -67,6 +68,7 @@ test "exporting comptime-known value" {
test "exporting comptime var" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
(builtin.target.ofmt != .elf and
builtin.target.ofmt != .macho and

View File

@ -9,6 +9,8 @@ const builtin = @import("builtin");
// and generates code
const vram = @as([*]volatile u8, @ptrFromInt(0x20000000))[0..0x8000];
export fn writeToVRam() void {
if (builtin.zig_backend == .stage2_riscv64) return;
vram[0] = 'X';
}

View File

@ -7,6 +7,7 @@ test "anyopaque extern symbol" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const a = @extern(*anyopaque, .{ .name = "a_mystery_symbol" });
const b: *i32 = @alignCast(@ptrCast(a));
@ -19,6 +20,7 @@ test "function extern symbol" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const a = @extern(*const fn () callconv(.C) i32, .{ .name = "a_mystery_function" });
try expect(a() == 4567);
@ -32,6 +34,7 @@ test "function extern symbol matches extern decl" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
extern fn another_mystery_function() u32;

View File

@ -2,6 +2,8 @@ const expect = @import("std").testing.expect;
const builtin = @import("builtin");
test "@fieldParentPtr struct" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const C = struct {
a: bool = true,
b: f32 = 3.14,
@ -135,6 +137,8 @@ test "@fieldParentPtr struct" {
}
test "@fieldParentPtr extern struct" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const C = extern struct {
a: bool = true,
b: f32 = 3.14,
@ -269,6 +273,7 @@ test "@fieldParentPtr extern struct" {
test "@fieldParentPtr extern struct first zero-bit field" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const C = extern struct {
a: u0 = 0,
@ -372,6 +377,7 @@ test "@fieldParentPtr extern struct first zero-bit field" {
test "@fieldParentPtr extern struct middle zero-bit field" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const C = extern struct {
a: f32 = 3.14,
@ -475,6 +481,7 @@ test "@fieldParentPtr extern struct middle zero-bit field" {
test "@fieldParentPtr extern struct last zero-bit field" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const C = extern struct {
a: f32 = 3.14,
@ -581,6 +588,7 @@ test "@fieldParentPtr unaligned packed struct" {
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const C = packed struct {
a: bool = true,
@ -719,6 +727,7 @@ test "@fieldParentPtr aligned packed struct" {
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const C = packed struct {
a: f32 = 3.14,
@ -856,6 +865,7 @@ test "@fieldParentPtr nested packed struct" {
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
const C = packed struct {
@ -1018,6 +1028,7 @@ test "@fieldParentPtr packed struct first zero-bit field" {
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const C = packed struct {
a: u0 = 0,
@ -1123,6 +1134,7 @@ test "@fieldParentPtr packed struct middle zero-bit field" {
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const C = packed struct {
a: f32 = 3.14,
@ -1228,6 +1240,7 @@ test "@fieldParentPtr packed struct last zero-bit field" {
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const C = packed struct {
a: f32 = 3.14,
@ -1330,6 +1343,8 @@ test "@fieldParentPtr packed struct last zero-bit field" {
}
test "@fieldParentPtr tagged union" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const C = union(enum) {
a: bool,
b: f32,
@ -1463,6 +1478,8 @@ test "@fieldParentPtr tagged union" {
}
test "@fieldParentPtr untagged union" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const C = union {
a: bool,
b: f32,
@ -1596,6 +1613,8 @@ test "@fieldParentPtr untagged union" {
}
test "@fieldParentPtr extern union" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const C = extern union {
a: bool,
b: f32,
@ -1731,6 +1750,7 @@ test "@fieldParentPtr extern union" {
test "@fieldParentPtr packed union" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.target.cpu.arch.endian() == .big) return error.SkipZigTest; // TODO
const C = packed union {
@ -1868,6 +1888,7 @@ test "@fieldParentPtr packed union" {
test "@fieldParentPtr tagged union all zero-bit fields" {
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const C = union(enum) {
a: u0,

View File

@ -15,12 +15,15 @@ fn epsForType(comptime T: type) T {
test "add f16" {
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testAdd(f16);
try comptime testAdd(f16);
}
test "add f32/f64" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testAdd(f32);
try comptime testAdd(f32);
try testAdd(f64);
@ -30,6 +33,7 @@ test "add f32/f64" {
test "add f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testAdd(f80);
try comptime testAdd(f80);
@ -49,12 +53,15 @@ fn testAdd(comptime T: type) !void {
test "sub f16" {
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testSub(f16);
try comptime testSub(f16);
}
test "sub f32/f64" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testSub(f32);
try comptime testSub(f32);
try testSub(f64);
@ -64,6 +71,7 @@ test "sub f32/f64" {
test "sub f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testSub(f80);
try comptime testSub(f80);
@ -83,12 +91,15 @@ fn testSub(comptime T: type) !void {
test "mul f16" {
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMul(f16);
try comptime testMul(f16);
}
test "mul f32/f64" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMul(f32);
try comptime testMul(f32);
try testMul(f64);
@ -98,6 +109,7 @@ test "mul f32/f64" {
test "mul f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMul(f80);
try comptime testMul(f80);
@ -119,6 +131,7 @@ test "cmp f16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCmp(f16);
try comptime testCmp(f16);
@ -127,6 +140,7 @@ test "cmp f16" {
test "cmp f32/f64" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCmp(f32);
try comptime testCmp(f32);
@ -140,6 +154,7 @@ test "cmp f128" {
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCmp(f128);
try comptime testCmp(f128);
@ -213,6 +228,7 @@ test "different sized float comparisons" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testDifferentSizedFloatComparisons();
try comptime testDifferentSizedFloatComparisons();
@ -261,6 +277,7 @@ test "@sqrt f16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testSqrt(f16);
try comptime testSqrt(f16);
@ -272,6 +289,7 @@ test "@sqrt f32/f64" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testSqrt(f32);
try comptime testSqrt(f32);
@ -285,6 +303,7 @@ test "@sqrt f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.os.tag == .freebsd) {
// TODO https://github.com/ziglang/zig/issues/10875
@ -371,6 +390,7 @@ test "@sqrt with vectors" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testSqrtWithVectors();
try comptime testSqrtWithVectors();
@ -392,6 +412,7 @@ test "@sin f16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testSin(f16);
try comptime testSin(f16);
@ -403,6 +424,7 @@ test "@sin f32/f64" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testSin(f32);
comptime try testSin(f32);
@ -416,6 +438,7 @@ test "@sin f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testSin(f80);
comptime try testSin(f80);
@ -443,6 +466,7 @@ test "@sin with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testSinWithVectors();
try comptime testSinWithVectors();
@ -464,6 +488,7 @@ test "@cos f16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCos(f16);
try comptime testCos(f16);
@ -475,6 +500,7 @@ test "@cos f32/f64" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCos(f32);
try comptime testCos(f32);
@ -488,6 +514,7 @@ test "@cos f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCos(f80);
try comptime testCos(f80);
@ -515,6 +542,7 @@ test "@cos with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCosWithVectors();
try comptime testCosWithVectors();
@ -536,6 +564,7 @@ test "@tan f16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testTan(f16);
try comptime testTan(f16);
@ -547,6 +576,7 @@ test "@tan f32/f64" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testTan(f32);
try comptime testTan(f32);
@ -560,6 +590,7 @@ test "@tan f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testTan(f80);
try comptime testTan(f80);
@ -587,6 +618,7 @@ test "@tan with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testTanWithVectors();
try comptime testTanWithVectors();
@ -608,6 +640,7 @@ test "@exp f16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testExp(f16);
try comptime testExp(f16);
@ -619,6 +652,7 @@ test "@exp f32/f64" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testExp(f32);
try comptime testExp(f32);
@ -632,6 +666,7 @@ test "@exp f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testExp(f80);
try comptime testExp(f80);
@ -663,6 +698,7 @@ test "@exp with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testExpWithVectors();
try comptime testExpWithVectors();
@ -684,6 +720,7 @@ test "@exp2 f16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testExp2(f16);
try comptime testExp2(f16);
@ -695,6 +732,7 @@ test "@exp2 f32/f64" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testExp2(f32);
try comptime testExp2(f32);
@ -708,6 +746,7 @@ test "@exp2 f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testExp2(f80);
try comptime testExp2(f80);
@ -734,6 +773,7 @@ test "@exp2 with @vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testExp2WithVectors();
try comptime testExp2WithVectors();
@ -755,6 +795,7 @@ test "@log f16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testLog(f16);
try comptime testLog(f16);
@ -766,6 +807,7 @@ test "@log f32/f64" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testLog(f32);
try comptime testLog(f32);
@ -779,6 +821,7 @@ test "@log f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testLog(f80);
try comptime testLog(f80);
@ -806,6 +849,7 @@ test "@log with @vectors" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var v: @Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
@ -824,6 +868,7 @@ test "@log2 f16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testLog2(f16);
try comptime testLog2(f16);
@ -835,6 +880,7 @@ test "@log2 f32/f64" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testLog2(f32);
try comptime testLog2(f32);
@ -848,6 +894,7 @@ test "@log2 f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testLog2(f80);
try comptime testLog2(f80);
@ -873,6 +920,7 @@ test "@log2 with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// https://github.com/ziglang/zig/issues/13681
if (builtin.zig_backend == .stage2_llvm and
builtin.cpu.arch == .aarch64 and
@ -899,6 +947,7 @@ test "@log10 f16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testLog10(f16);
try comptime testLog10(f16);
@ -910,6 +959,7 @@ test "@log10 f32/f64" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testLog10(f32);
try comptime testLog10(f32);
@ -923,6 +973,7 @@ test "@log10 f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testLog10(f80);
try comptime testLog10(f80);
@ -949,6 +1000,7 @@ test "@log10 with vectors" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testLog10WithVectors();
try comptime testLog10WithVectors();
@ -969,6 +1021,7 @@ test "@abs f16" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testFabs(f16);
try comptime testFabs(f16);
@ -978,6 +1031,7 @@ test "@abs f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testFabs(f32);
try comptime testFabs(f32);
@ -992,6 +1046,7 @@ test "@abs f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testFabs(f80);
try comptime testFabs(f80);
@ -1069,6 +1124,7 @@ test "@abs with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testFabsWithVectors();
try comptime testFabsWithVectors();
@ -1089,6 +1145,7 @@ test "@floor f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testFloor(f16);
try comptime testFloor(f16);
@ -1099,6 +1156,7 @@ test "@floor f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testFloor(f32);
try comptime testFloor(f32);
@ -1113,6 +1171,7 @@ test "@floor f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) {
// https://github.com/ziglang/zig/issues/12602
@ -1161,6 +1220,7 @@ test "@floor with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
@ -1184,6 +1244,7 @@ test "@ceil f16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCeil(f16);
try comptime testCeil(f16);
@ -1195,6 +1256,7 @@ test "@ceil f32/f64" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCeil(f32);
try comptime testCeil(f32);
@ -1209,6 +1271,7 @@ test "@ceil f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) {
// https://github.com/ziglang/zig/issues/12602
@ -1258,6 +1321,7 @@ test "@ceil with vectors" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
@ -1281,6 +1345,7 @@ test "@trunc f16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) {
// https://github.com/ziglang/zig/issues/16846
@ -1297,6 +1362,7 @@ test "@trunc f32/f64" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) {
// https://github.com/ziglang/zig/issues/16846
@ -1316,6 +1382,7 @@ test "@trunc f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) {
// https://github.com/ziglang/zig/issues/12602
@ -1365,6 +1432,7 @@ test "@trunc with vectors" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
@ -1389,6 +1457,7 @@ test "neg f16" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.os.tag == .freebsd) {
// TODO file issue to track this failure
@ -1405,6 +1474,7 @@ test "neg f32/f64" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testNeg(f32);
try comptime testNeg(f32);
@ -1419,6 +1489,7 @@ test "neg f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testNeg(f80);
try comptime testNeg(f80);
@ -1525,6 +1596,7 @@ test "comptime fixed-width float zero divided by zero produces NaN" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
inline for (.{ f16, f32, f64, f80, f128 }) |F| {
try expect(math.isNan(@as(F, 0) / @as(F, 0)));
@ -1584,6 +1656,7 @@ test "comptime inf >= runtime 1" {
test "comptime isNan(nan * 1)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const nan_times_one = comptime std.math.nan(f64) * 1;
try std.testing.expect(std.math.isNan(nan_times_one));
@ -1591,6 +1664,7 @@ test "comptime isNan(nan * 1)" {
test "runtime isNan(nan * 1)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const nan_times_one = std.math.nan(f64) * 1;
try std.testing.expect(std.math.isNan(nan_times_one));
@ -1598,6 +1672,7 @@ test "runtime isNan(nan * 1)" {
test "comptime isNan(nan * 0)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const nan_times_zero = comptime std.math.nan(f64) * 0;
try std.testing.expect(std.math.isNan(nan_times_zero));
@ -1607,6 +1682,7 @@ test "comptime isNan(nan * 0)" {
test "runtime isNan(nan * 0)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const nan_times_zero = std.math.nan(f64) * 0;
try std.testing.expect(std.math.isNan(nan_times_zero));
@ -1616,6 +1692,7 @@ test "runtime isNan(nan * 0)" {
test "comptime isNan(inf * 0)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const inf_times_zero = comptime std.math.inf(f64) * 0;
try std.testing.expect(std.math.isNan(inf_times_zero));
@ -1625,6 +1702,7 @@ test "comptime isNan(inf * 0)" {
test "runtime isNan(inf * 0)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const inf_times_zero = std.math.inf(f64) * 0;
try std.testing.expect(std.math.isNan(inf_times_zero));

View File

@ -71,6 +71,7 @@ fn outer(y: u32) *const fn (u32) u32 {
test "return inner function which references comptime variable of outer function" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const func = outer(10);
try expect(func(3) == 7);
@ -80,6 +81,7 @@ test "discard the result of a function that returns a struct" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn entry() void {
@ -104,6 +106,7 @@ test "inline function call that calls optional function pointer, return pointer
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
field: u32,
@ -181,12 +184,14 @@ test "function with complex callconv and return type expressions" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(fComplexCallconvRet(3).x == 9);
}
test "pass by non-copying value" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(addPointCoords(Point{ .x = 1, .y = 2 }) == 3);
}
@ -202,6 +207,7 @@ fn addPointCoords(pt: Point) i32 {
test "pass by non-copying value through var arg" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect((try addPointCoordsVar(Point{ .x = 1, .y = 2 })) == 3);
}
@ -213,6 +219,7 @@ fn addPointCoordsVar(pt: anytype) !i32 {
test "pass by non-copying value as method" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var pt = Point2{ .x = 1, .y = 2 };
try expect(pt.addPointCoords() == 3);
@ -229,6 +236,7 @@ const Point2 = struct {
test "pass by non-copying value as method, which is generic" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var pt = Point3{ .x = 1, .y = 2 };
try expect(pt.addPointCoords(i32) == 3);
@ -257,6 +265,7 @@ test "implicit cast fn call result to optional in field result" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn entry() !void {
@ -283,6 +292,7 @@ test "implicit cast fn call result to optional in field result" {
test "void parameters" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try voidFun(1, void{}, 2, {});
}
@ -346,6 +356,7 @@ test "function call with anon list literal" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -366,6 +377,7 @@ test "function call with anon list literal - 2D" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -402,6 +414,8 @@ test "ability to give comptime types and non comptime types to same parameter" {
}
test "function with inferred error set but returning no error" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo() !void {}
};
@ -412,6 +426,7 @@ test "function with inferred error set but returning no error" {
test "import passed byref to function in return type" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn get() @import("std").ArrayListUnmanaged(i32) {
@ -429,6 +444,7 @@ test "implicit cast function to function ptr" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S1 = struct {
export fn someFunctionThatReturnsAValue() c_int {
@ -449,6 +465,7 @@ test "implicit cast function to function ptr" {
test "method call with optional and error union first param" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
x: i32 = 1234,
@ -468,6 +485,7 @@ test "method call with optional and error union first param" {
test "method call with optional pointer first param" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
x: i32 = 1234,
@ -487,6 +505,7 @@ test "using @ptrCast on function pointers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const A = struct { data: [4]u8 };
@ -524,6 +543,7 @@ test "function returns function returning type" {
test "peer type resolution of inferred error set with non-void payload" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn openDataFile(mode: enum { read, write }) !u32 {
@ -566,6 +586,8 @@ test "lazy values passed to anytype parameter" {
}
test "pass and return comptime-only types" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn returnNull(comptime x: @Type(.Null)) @Type(.Null) {
return x;

View File

@ -34,6 +34,7 @@ fn custom(comptime T: type, comptime num: u64) fn (T) u64 {
test "fn delegation" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const foo = Foo{};
try expect(foo.one() == 11);

View File

@ -69,6 +69,7 @@ test "basic for loop" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const expected_result = [_]u8{ 9, 8, 7, 6, 0, 1, 2, 3 } ** 3;
@ -112,6 +113,7 @@ test "for with null and T peer types and inferred result location type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest(slice: []const u8) !void {
@ -132,6 +134,7 @@ test "for with null and T peer types and inferred result location type" {
test "2 break statements and an else" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn entry(t: bool, f: bool) !void {
@ -153,6 +156,7 @@ test "for loop with pointer elem var" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const source = "abcdefg";
var target: [source.len]u8 = undefined;
@ -179,6 +183,7 @@ fn mangleString(s: []u8) void {
test "for copies its payload" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -198,6 +203,7 @@ test "for on slice with allowzero ptr" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest(slice: []const u8) !void {
@ -213,6 +219,7 @@ test "for on slice with allowzero ptr" {
test "else continue outer for" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var i: usize = 6;
var buf: [5]u8 = undefined;
@ -226,6 +233,7 @@ test "else continue outer for" {
test "for loop with else branch" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var x = [_]u32{ 1, 2 };
@ -275,6 +283,7 @@ test "two counters" {
test "1-based counter and ptr to array" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var ok: usize = 0;
@ -308,6 +317,7 @@ test "slice and two counters, one is offset and one is runtime" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const slice: []const u8 = "blah";
var start: usize = 0;
@ -337,6 +347,7 @@ test "two slices, one captured by-ref" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buf: [10]u8 = undefined;
const slice1: []const u8 = "blah";
@ -356,6 +367,7 @@ test "raw pointer and slice" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buf: [10]u8 = undefined;
const slice: []const u8 = "blah";
@ -375,6 +387,7 @@ test "raw pointer and counter" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buf: [10]u8 = undefined;
const ptr: [*]u8 = &buf;
@ -393,6 +406,7 @@ test "inline for with slice as the comptime-known" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const comptime_slice = "hello";
var runtime_i: usize = 3;
@ -424,6 +438,7 @@ test "inline for with counter as the comptime-known" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var runtime_slice = "hello";
var runtime_i: usize = 3;
@ -456,6 +471,7 @@ test "inline for on tuple pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct { u32, u32, u32 };
var s: S = .{ 100, 200, 300 };
@ -471,6 +487,7 @@ test "ref counter that starts at zero" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
for ([_]usize{ 0, 1, 2 }, 0..) |i, j| {
try expectEqual(i, j);
@ -486,6 +503,7 @@ test "inferred alloc ptr of for loop" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var cond = false;

View File

@ -19,6 +19,7 @@ fn checkSize(comptime T: type) usize {
test "simple generic fn" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(max(i32, 3, -1) == 3);
try expect(max(u8, 1, 100) == 100);
@ -55,6 +56,7 @@ test "fn with comptime args" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(gimmeTheBigOne(1234, 5678) == 5678);
try expect(shouldCallSameInstance(34, 12) == 34);
@ -65,6 +67,7 @@ test "anytype params" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(max_i32(12, 34) == 34);
try expect(max_f64(1.2, 3.4) == 3.4);
@ -89,6 +92,7 @@ fn max_f64(a: f64, b: f64) f64 {
test "type constructed by comptime function call" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var l: SimpleList(10) = undefined;
l.array[0] = 10;
@ -113,6 +117,7 @@ test "function with return type type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var list: List(i32) = undefined;
var list2: List(i32) = undefined;
@ -154,6 +159,7 @@ test "generic fn with implicit cast" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(getFirstByte(u8, &[_]u8{13}) == 13);
try expect(getFirstByte(u16, &[_]u16{
@ -172,6 +178,7 @@ test "generic fn keeps non-generic parameter types" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const A = 128;
@ -280,6 +287,7 @@ test "generic function instantiation turns into comptime call" {
test "generic function with void and comptime parameter" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct { x: i32 };
const namespace = struct {
@ -296,6 +304,7 @@ test "generic function with void and comptime parameter" {
test "anonymous struct return type referencing comptime parameter" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
pub fn extraData(comptime T: type, index: usize) struct { data: T, end: usize } {
@ -314,6 +323,7 @@ test "generic function instantiation non-duplicates" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.os.tag == .wasi) return error.SkipZigTest;
const S = struct {
@ -385,6 +395,7 @@ test "extern function used as generic parameter" {
test "generic struct as parameter type" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest(comptime Int: type, thing: struct { int: Int }) !void {
@ -399,6 +410,8 @@ test "generic struct as parameter type" {
}
test "slice as parameter type" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn internComptimeString(comptime str: []const u8) *const []const u8 {
return &struct {
@ -423,6 +436,7 @@ test "null sentinel pointer passed as generic argument" {
test "generic function passed as comptime argument" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doMath(comptime f: fn (type, i32, i32) error{Overflow}!i32, a: i32, b: i32) !void {
@ -435,6 +449,7 @@ test "generic function passed as comptime argument" {
test "return type of generic function is function pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn b(comptime T: type) ?*const fn () error{}!T {
@ -447,6 +462,7 @@ test "return type of generic function is function pointer" {
test "coerced function body has inequal value with its uncoerced body" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const A = B(i32, c);
@ -496,6 +512,7 @@ test "union in struct captures argument" {
test "function argument tuple used as struct field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn DeleagateWithContext(comptime Function: type) type {
@ -530,6 +547,7 @@ test "call generic function with from function called by the generic function" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and
builtin.cpu.arch == .aarch64 and builtin.os.tag == .windows) return error.SkipZigTest;
@ -572,6 +590,7 @@ fn StructCapture(comptime T: type) type {
test "call generic function that uses capture from function declaration's scope" {
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = StructCapture(f64);
const s = S.foo(123);

View File

@ -7,6 +7,7 @@ test "store to global array" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(pos[1] == 0.0);
pos = [2]f32{ 0.0, 1.0 };
@ -18,6 +19,7 @@ test "store to global vector" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(vpos[1] == 0.0);
vpos = @Vector(2, f32){ 0.0, 1.0 };
@ -28,6 +30,7 @@ test "slices pointing at the same address as global array." {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const a = [_]u8{ 1, 2, 3 };

View File

@ -12,6 +12,8 @@ const Bar = struct {
};
test "@hasDecl" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(@hasDecl(Foo, "public_thing"));
try expect(!@hasDecl(Foo, "private_thing"));
try expect(!@hasDecl(Foo, "no_thing"));
@ -22,6 +24,8 @@ test "@hasDecl" {
}
test "@hasDecl using a sliced string literal" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(@hasDecl(@This(), "std") == true);
try expect(@hasDecl(@This(), "std"[0..0]) == false);
try expect(@hasDecl(@This(), "std"[0..1]) == false);

View File

@ -45,6 +45,7 @@ var global_with_err: anyerror!u32 = error.SomeError;
test "unwrap mutable global var" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (global_with_val) |v| {
try expect(v == 0);
@ -82,6 +83,7 @@ test "const result loc, runtime if cond, else unreachable" {
test "if copies its payload" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -118,6 +120,7 @@ test "if peer expressions inferred optional type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var self: []const u8 = "abcdef";
var index: usize = 0;
@ -136,6 +139,7 @@ test "if-else expression with runtime condition result location is inferred opti
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const A = struct { b: u64, c: u64 };
var d: bool = true;
@ -145,6 +149,8 @@ test "if-else expression with runtime condition result location is inferred opti
}
test "result location with inferred type ends up being pointer to comptime_int" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: ?u32 = 1234;
var b: u32 = 2000;
_ = .{ &a, &b };
@ -190,6 +196,8 @@ test "if value shouldn't be load-elided if used later (structs)" {
}
test "if value shouldn't be load-elided if used later (optionals)" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: ?i32 = 1;
var b: ?i32 = 1;

View File

@ -6,18 +6,21 @@ const a_namespace = @import("import/a_namespace.zig");
test "call fn via namespace lookup" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(@as(i32, 1234) == a_namespace.foo());
}
test "importing the same thing gives the same import" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(@import("std") == @import("std"));
}
test "import in non-toplevel scope" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
usingnamespace @import("import/a_namespace.zig");
@ -27,6 +30,7 @@ test "import in non-toplevel scope" {
test "import empty file" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
_ = @import("import/empty.zig");
}

View File

@ -33,6 +33,7 @@ test "import c keywords" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try std.testing.expect(int == .c_keyword_variable);
try std.testing.expect(long == .c_keyword_variable);

View File

@ -23,6 +23,7 @@ fn foo(a: A) i32 {
test "incomplete struct param top level declaration" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const a = A{
.b = B{

View File

@ -5,6 +5,7 @@ const builtin = @import("builtin");
test "inline scalar prongs" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: usize = 0;
switch (x) {
@ -20,6 +21,7 @@ test "inline scalar prongs" {
test "inline prong ranges" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: usize = 0;
_ = &x;
@ -35,6 +37,7 @@ const E = enum { a, b, c, d };
test "inline switch enums" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: E = .a;
_ = &x;
@ -49,6 +52,7 @@ test "inline switch unions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: U = .a;
_ = &x;
@ -75,6 +79,7 @@ test "inline switch unions" {
test "inline else bool" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a = true;
_ = &a;
@ -87,6 +92,7 @@ test "inline else bool" {
test "inline else error" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Err = error{ a, b, c };
var a = Err.a;
@ -100,6 +106,7 @@ test "inline else error" {
test "inline else enum" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const E2 = enum(u8) { a = 2, b = 3, c = 4, d = 5 };
var a: E2 = .a;
@ -113,6 +120,7 @@ test "inline else enum" {
test "inline else int with gaps" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: u8 = 0;
_ = &a;
@ -131,6 +139,7 @@ test "inline else int with gaps" {
test "inline else int all values" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: u2 = 0;
_ = &a;

View File

@ -9,6 +9,7 @@ test "uint128" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buff: u128 = maxInt(u128);
try expect(buff == maxInt(u128));
@ -28,6 +29,7 @@ test "undefined 128 bit int" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@setRuntimeSafety(true);
@ -47,6 +49,7 @@ test "int128" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buff: i128 = -1;
try expect(buff < 0 and (buff + 1) == 0);
@ -70,6 +73,7 @@ test "truncate int128" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var buff: u128 = maxInt(u128);
@ -93,6 +97,7 @@ test "shift int128" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const types = .{ u128, i128 };
inline for (types) |t| {

View File

@ -4,6 +4,8 @@ const maxInt = std.math.maxInt;
const builtin = @import("builtin");
test "int comparison elision" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
testIntEdges(u0);
testIntEdges(i0);
testIntEdges(u1);

View File

@ -6,6 +6,7 @@ test "integer division" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testDivision();
try comptime testDivision();
@ -97,6 +98,7 @@ test "large integer division" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var numerator: u256 = 99999999999999999997315645440;

View File

@ -21,6 +21,7 @@ test "ir block deps" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect((foo(1) catch unreachable) == 0);
try expect((foo(2) catch unreachable) == 0);

View File

@ -6,6 +6,7 @@ test "strlit to vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const strlit = "0123456789abcdef0123456789ABCDEF";
const vec_from_strlit: @Vector(32, u8) = strlit.*;

View File

@ -12,6 +12,7 @@ const math = std.math;
test "assignment operators" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var i: u32 = 0;
i += 5;
@ -64,6 +65,7 @@ test "@clz" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testClz();
try comptime testClz();
@ -82,6 +84,7 @@ test "@clz big ints" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testClzBigInts();
try comptime testClzBigInts();
@ -103,6 +106,7 @@ test "@clz vectors" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testClzVectors();
try comptime testClzVectors();
@ -146,6 +150,7 @@ test "@ctz" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCtz();
try comptime testCtz();
@ -169,6 +174,7 @@ test "@ctz 128-bit integers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCtz128();
try comptime testCtz128();
@ -187,6 +193,7 @@ test "@ctz vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
// This regressed with LLVM 14:
@ -229,6 +236,7 @@ test "float equality" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const x: f64 = 0.012;
const y: f64 = x + 1.0;
@ -343,6 +351,8 @@ test "comptime_int multi-limb partial shift right" {
}
test "xor" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try test_xor();
try comptime test_xor();
}
@ -385,6 +395,8 @@ fn comptimeAdd(comptime a: comptime_int, comptime b: comptime_int) comptime_int
}
test "binary not" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(comptime x: {
break :x ~@as(u16, 0b1010101010101010) == 0b0101010101010101;
});
@ -407,6 +419,7 @@ test "binary not 128-bit" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(comptime x: {
break :x ~@as(u128, 0x55555555_55555555_55555555_55555555) == 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa;
@ -430,6 +443,7 @@ test "division" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) {
// https://github.com/ziglang/zig/issues/16846
@ -518,6 +532,7 @@ test "division half-precision floats" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testDivisionFP16();
try comptime testDivisionFP16();
@ -554,6 +569,8 @@ fn mod(comptime T: type, a: T, b: T) T {
}
test "unsigned wrapping" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testUnsignedWrappingEval(maxInt(u32));
try comptime testUnsignedWrappingEval(maxInt(u32));
}
@ -565,6 +582,8 @@ fn testUnsignedWrappingEval(x: u32) !void {
}
test "signed wrapping" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testSignedWrappingEval(maxInt(i32));
try comptime testSignedWrappingEval(maxInt(i32));
}
@ -576,6 +595,8 @@ fn testSignedWrappingEval(x: i32) !void {
}
test "signed negation wrapping" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testSignedNegationWrappingEval(minInt(i16));
try comptime testSignedNegationWrappingEval(minInt(i16));
}
@ -586,6 +607,8 @@ fn testSignedNegationWrappingEval(x: i16) !void {
}
test "unsigned negation wrapping" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testUnsignedNegationWrappingEval(1);
try comptime testUnsignedNegationWrappingEval(1);
}
@ -598,6 +621,7 @@ fn testUnsignedNegationWrappingEval(x: u16) !void {
test "negation wrapping" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expectEqual(@as(u1, 1), negateWrap(u1, 1));
}
@ -611,6 +635,7 @@ fn negateWrap(comptime T: type, x: T) T {
test "unsigned 64-bit division" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) {
// https://github.com/ziglang/zig/issues/16846
@ -644,6 +669,8 @@ test "bit shift a u1" {
}
test "truncating shift right" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testShrTrunc(maxInt(u16));
try comptime testShrTrunc(maxInt(u16));
}
@ -658,6 +685,7 @@ test "f128" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try test_f128();
try comptime test_f128();
@ -689,6 +717,7 @@ test "128-bit multiplication" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var a: i128 = 3;
@ -715,6 +744,7 @@ test "@addWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var a: u8 = 250;
@ -765,6 +795,7 @@ test "@addWithOverflow" {
test "small int addition" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: u2 = 0;
try expect(x == 0);
@ -786,6 +817,7 @@ test "small int addition" {
test "basic @mulWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var a: u8 = 86;
@ -818,6 +850,7 @@ test "basic @mulWithOverflow" {
test "extensive @mulWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var a: u5 = 3;
@ -989,6 +1022,8 @@ test "extensive @mulWithOverflow" {
}
test "@mulWithOverflow bitsize > 32" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// aarch64 fails on a release build of the compiler.
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -1056,6 +1091,7 @@ test "@mulWithOverflow u256" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
const const_lhs: u256 = 8035709466408580321693645878924206181189;
@ -1091,6 +1127,7 @@ test "@subWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var a: u8 = 1;
@ -1143,6 +1180,7 @@ test "@shlWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var a: u4 = 2;
@ -1250,6 +1288,7 @@ test "quad hex float literal parsing accurate" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const a: f128 = 0x1.1111222233334444555566667777p+0;
@ -1345,6 +1384,8 @@ test "quad hex float literal parsing accurate" {
}
test "truncating shift left" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testShlTrunc(maxInt(u16));
try comptime testShlTrunc(maxInt(u16));
}
@ -1354,6 +1395,8 @@ fn testShlTrunc(x: u16) !void {
}
test "exact shift left" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testShlExact(0b00110101);
try comptime testShlExact(0b00110101);
@ -1365,6 +1408,8 @@ fn testShlExact(x: u8) !void {
}
test "exact shift right" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testShrExact(0b10110100);
try comptime testShrExact(0b10110100);
}
@ -1374,6 +1419,8 @@ fn testShrExact(x: u8) !void {
}
test "shift left/right on u0 operand" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
var x: u0 = 0;
@ -1408,6 +1455,7 @@ test "remainder division" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) {
// https://github.com/ziglang/zig/issues/12602
@ -1446,6 +1494,7 @@ test "float remainder division using @rem" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime frem(f16);
try comptime frem(f32);
@ -1489,6 +1538,7 @@ test "float modulo division using @mod" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime fmod(f16);
try comptime fmod(f32);
@ -1531,6 +1581,7 @@ test "@round f16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testRound(f16, 12.0);
try comptime testRound(f16, 12.0);
@ -1542,6 +1593,7 @@ test "@round f32/f64" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testRound(f64, 12.0);
try comptime testRound(f64, 12.0);
@ -1561,6 +1613,7 @@ test "@round f80" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testRound(f80, 12.0);
try comptime testRound(f80, 12.0);
@ -1573,6 +1626,7 @@ test "@round f128" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testRound(f128, 12.0);
try comptime testRound(f128, 12.0);
@ -1590,6 +1644,7 @@ test "vector integer addition" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -1612,6 +1667,7 @@ test "NaN comparison" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testNanEqNan(f16);
try testNanEqNan(f32);
@ -1629,6 +1685,7 @@ test "NaN comparison f80" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testNanEqNan(f80);
try comptime testNanEqNan(f80);
@ -1651,6 +1708,7 @@ test "vector comparison" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .avx2)) return error.SkipZigTest;
@ -1683,6 +1741,7 @@ test "signed zeros are represented properly" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -1712,6 +1771,7 @@ test "absFloat" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testAbsFloat();
try comptime testAbsFloat();
@ -1745,6 +1805,7 @@ test "@clz works on both vector and scalar inputs" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: u32 = 0x1;
_ = &x;

View File

@ -9,6 +9,7 @@ test "@max" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -31,6 +32,7 @@ test "@max on vectors" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
@ -63,6 +65,7 @@ test "@min" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -85,6 +88,7 @@ test "@min for vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
@ -120,6 +124,7 @@ test "@min/max for floats" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest(comptime T: type) !void {
@ -167,6 +172,7 @@ test "@min/@max more than two vector arguments" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const x: @Vector(2, u32) = .{ 3, 2 };
const y: @Vector(2, u32) = .{ 4, 1 };
@ -198,6 +204,7 @@ test "@min/@max notices vector bounds" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: @Vector(2, u16) = .{ 140, 40 };
const y: @Vector(2, u64) = .{ 5, 100 };
@ -251,6 +258,7 @@ test "@min/@max notices bounds from vector types" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: @Vector(2, u16) = .{ 30, 67 };
var y: @Vector(2, u32) = .{ 20, 500 };
@ -271,6 +279,7 @@ test "@min/@max notices bounds from types when comptime-known value is undef" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: u32 = 1_000_000;
_ = &x;
@ -291,6 +300,7 @@ test "@min/@max notices bounds from vector types when element of comptime-known
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .avx)) return error.SkipZigTest;

View File

@ -31,6 +31,7 @@ test "standard field calls" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(HasFuncs.one(0) == 1);
try expect(HasFuncs.two(0) == 2);
@ -75,6 +76,7 @@ test "@field field calls" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(@field(HasFuncs, "one")(0) == 1);
try expect(@field(HasFuncs, "two")(0) == 2);

View File

@ -7,6 +7,7 @@ test "memcpy and memset intrinsics" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMemcpyMemset();
try comptime testMemcpyMemset();
@ -28,6 +29,7 @@ test "@memcpy with both operands single-ptr-to-array, one is null-terminated" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMemcpyBothSinglePtrArrayOneIsNullTerminated();
try comptime testMemcpyBothSinglePtrArrayOneIsNullTerminated();
@ -48,6 +50,7 @@ test "@memcpy dest many pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMemcpyDestManyPtr();
try comptime testMemcpyDestManyPtr();
@ -70,6 +73,7 @@ test "@memcpy slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMemcpySlice();
try comptime testMemcpySlice();

View File

@ -7,6 +7,7 @@ test "@memset on array pointers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMemsetArray();
try comptime testMemsetArray();
@ -36,6 +37,7 @@ test "@memset on slices" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMemsetSlice();
try comptime testMemsetSlice();
@ -71,6 +73,7 @@ test "memset with bool element" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buf: [5]bool = undefined;
@memset(&buf, true);
@ -83,6 +86,7 @@ test "memset with 1-byte struct element" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct { x: bool };
var buf: [5]S = undefined;
@ -96,6 +100,7 @@ test "memset with 1-byte array element" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const A = [1]bool;
var buf: [5]A = undefined;
@ -109,6 +114,7 @@ test "memset with large array element, runtime known" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const A = [128]u64;
var buf: [5]A = undefined;
@ -127,6 +133,7 @@ test "memset with large array element, comptime known" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const A = [128]u64;
var buf: [5]A = undefined;
@ -144,6 +151,7 @@ test "@memset provides result type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct { x: u32 };
@ -162,6 +170,7 @@ test "zero keys with @memset" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Keys = struct {
up: bool,

View File

@ -13,6 +13,7 @@ fn foo() C!void {
test "merge error sets" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (foo()) {
@panic("unexpected");

View File

@ -6,6 +6,8 @@ const no_x86_64_hardware_fma_support = builtin.zig_backend == .stage2_x86_64 and
!std.Target.x86.featureSetHas(builtin.cpu.features, .fma);
test "@mulAdd" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (no_x86_64_hardware_fma_support) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@ -37,6 +39,7 @@ test "@mulAdd f16" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testMulAdd16();
try testMulAdd16();
@ -57,6 +60,7 @@ test "@mulAdd f80" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testMulAdd80();
try testMulAdd80();
@ -77,6 +81,7 @@ test "@mulAdd f128" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testMulAdd128();
try testMulAdd128();
@ -109,6 +114,7 @@ test "vector f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime vector16();
try vector16();
@ -129,6 +135,7 @@ fn vector32() !void {
test "vector f32" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (no_x86_64_hardware_fma_support) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@ -153,6 +160,7 @@ fn vector64() !void {
test "vector f64" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (no_x86_64_hardware_fma_support) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@ -182,6 +190,7 @@ test "vector f80" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime vector80();
try vector80();
@ -208,6 +217,7 @@ test "vector f128" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime vector128();
try vector128();

View File

@ -16,6 +16,7 @@ test "call extern function defined with conflicting type" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@import("conflicting_externs/a.zig").issue529(null);
issue529(null);

View File

@ -4,6 +4,7 @@ const expect = std.testing.expect;
test "namespace depends on compile var" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (some_namespace.a_bool) {
try expect(some_namespace.a_bool);

View File

@ -26,6 +26,7 @@ test "nan memory equality" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// signaled
try testing.expect(mem.eql(u8, mem.asBytes(&snan_u16), mem.asBytes(&snan_f16)));

View File

@ -32,6 +32,7 @@ test "test maybe object and get a pointer to the inner value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var maybe_bool: ?bool = true;
@ -52,6 +53,7 @@ test "maybe return" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try maybeReturnImpl();
try comptime maybeReturnImpl();
@ -71,6 +73,7 @@ fn foo(x: ?i32) ?bool {
test "test null runtime" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testTestNullRuntime(null);
}
@ -82,6 +85,7 @@ fn testTestNullRuntime(x: ?i32) !void {
test "optional void" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try optionalVoidImpl();
try comptime optionalVoidImpl();
@ -105,6 +109,7 @@ const Empty = struct {};
test "optional struct{}" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
_ = try optionalEmptyStructImpl();
_ = try comptime optionalEmptyStructImpl();
@ -130,6 +135,7 @@ test "null with default unwrap" {
test "optional pointer to 0 bit type null value at runtime" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const EmptyStruct = struct {};
var x: ?*EmptyStruct = null;
@ -141,6 +147,7 @@ test "if var maybe pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(shouldBeAPlus1(Particle{
.a = 14,
@ -184,6 +191,7 @@ test "unwrap optional which is field of global var" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
struct_with_optional.field = null;
if (struct_with_optional.field) |payload| {

View File

@ -9,6 +9,7 @@ const expectEqualStrings = std.testing.expectEqualStrings;
test "passing an optional integer as a parameter" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn entry() bool {
@ -28,6 +29,7 @@ pub const EmptyStruct = struct {};
test "optional pointer to size zero struct" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var e = EmptyStruct{};
const o: ?*EmptyStruct = &e;
@ -58,6 +60,7 @@ fn testNullPtrsEql() !void {
test "optional with zero-bit type" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest(comptime ZeroBit: type, comptime zero_bit: ZeroBit) !void {
@ -110,6 +113,7 @@ test "address of unwrap optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const Foo = struct {
@ -131,6 +135,7 @@ test "nested optional field in struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S2 = struct {
y: u8,
@ -149,6 +154,7 @@ test "equality compare optionals and non-optionals" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -206,6 +212,7 @@ test "equality compare optionals and non-optionals" {
test "compare optionals with modified payloads" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var lhs: ?bool = false;
const lhs_payload = &lhs.?;
@ -233,6 +240,7 @@ test "compare optionals with modified payloads" {
test "unwrap function call with optional pointer return value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn entry() !void {
@ -254,6 +262,7 @@ test "unwrap function call with optional pointer return value" {
test "nested orelse" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn entry() !void {
@ -280,6 +289,7 @@ test "nested orelse" {
test "self-referential struct through a slice of optional" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const Node = struct {
@ -316,6 +326,7 @@ test "coerce an anon struct literal to optional struct" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const Struct = struct {
@ -335,6 +346,7 @@ test "0-bit child type coerced to optional return ptr result location" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -360,6 +372,7 @@ test "0-bit child type coerced to optional return ptr result location" {
test "0-bit child type coerced to optional" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -387,6 +400,7 @@ test "array of optional unaligned types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Enum = enum { one, two, three };
@ -423,6 +437,7 @@ test "optional pointer to zero bit optional payload" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const B = struct {
fn foo(_: *@This()) void {}
@ -442,6 +457,7 @@ test "optional pointer to zero bit error union payload" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const B = struct {
fn foo(_: *@This()) void {}
@ -475,6 +491,7 @@ const NoReturn = struct {
test "optional of noreturn used with if" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
NoReturn.a = 64;
if (NoReturn.loop()) |_| {
@ -486,6 +503,7 @@ test "optional of noreturn used with if" {
test "optional of noreturn used with orelse" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
NoReturn.a = 64;
const val = NoReturn.testOrelse();
@ -505,6 +523,7 @@ test "alignment of wrapping an optional payload" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const I = extern struct { x: i128 };
@ -522,6 +541,7 @@ test "Optional slice size is optimized" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(@sizeOf(?[]u8) == @sizeOf([]u8));
var a: ?[]const u8 = null;
@ -535,6 +555,7 @@ test "Optional slice passed to function" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo(a: ?[]const u8) !void {
@ -551,6 +572,7 @@ test "Optional slice passed to function" {
test "peer type resolution in nested if expressions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Thing = struct { n: i32 };
var a = false;
@ -578,6 +600,7 @@ test "cast slice to const slice nested in error union and optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn inner() !?[]u8 {
@ -591,6 +614,8 @@ test "cast slice to const slice nested in error union and optional" {
}
test "variable of optional of noreturn" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var null_opv: ?noreturn = null;
_ = &null_opv;
try std.testing.expectEqual(@as(?noreturn, null), null_opv);
@ -600,6 +625,7 @@ test "copied optional doesn't alias source" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var opt_x: ?[3]f32 = [_]f32{0.0} ** 3;
@ -614,6 +640,7 @@ test "result location initialization of optional with OPV payload" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
x: u0,

View File

@ -238,6 +238,7 @@ test "regular in irregular packed struct" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Irregular = packed struct {
bar: Regular = Regular{},
@ -258,6 +259,7 @@ test "nested packed struct unaligned" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet
const S1 = packed struct {
@ -330,6 +332,7 @@ test "byte-aligned field pointer offsets" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const A = packed struct {
@ -432,6 +435,7 @@ test "nested packed struct field pointers" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // ubsan unaligned pointer access
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet
const S2 = packed struct {
@ -469,6 +473,7 @@ test "load pointer from packed struct" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const A = struct {
index: u16,
@ -489,6 +494,7 @@ test "@intFromPtr on a packed struct field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (native_endian != .little) return error.SkipZigTest;
const S = struct {
@ -512,6 +518,7 @@ test "@intFromPtr on a packed struct field unaligned and nested" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet
const S1 = packed struct {
@ -618,6 +625,8 @@ test "@intFromPtr on a packed struct field unaligned and nested" {
}
test "packed struct fields modification" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// Originally reported at https://github.com/ziglang/zig/issues/16615
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
@ -646,6 +655,7 @@ test "optional pointer in packed struct" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const T = packed struct { ptr: ?*const u8 };
var n: u8 = 0;
@ -661,6 +671,7 @@ test "nested packed struct field access test" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Vec2 = packed struct {
x: f32,
@ -777,6 +788,7 @@ test "nested packed struct field access test" {
test "nested packed struct at non-zero offset" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Pair = packed struct(u24) {
a: u16 = 0,
@ -810,6 +822,7 @@ test "nested packed struct at non-zero offset 2" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO packed structs larger than 64 bits
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const Pair = packed struct(u40) {
@ -890,6 +903,7 @@ test "packed struct passed to callconv(.C) function" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const Packed = packed struct {
@ -938,6 +952,7 @@ test "packed struct initialized in bitcast" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const T = packed struct { val: u8 };
var val: u8 = 123;
@ -951,6 +966,7 @@ test "pointer to container level packed struct field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = packed struct(u32) {
test_bit: bool,
@ -975,6 +991,7 @@ test "store undefined to packed result location" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: u4 = 0;
_ = &x;
@ -995,6 +1012,8 @@ test "bitcast back and forth" {
}
test "field access of packed struct smaller than its abi size inside struct initialized with rls" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// Originally reported at https://github.com/ziglang/zig/issues/14200
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
@ -1013,6 +1032,8 @@ test "field access of packed struct smaller than its abi size inside struct init
}
test "modify nested packed struct aligned field" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// Originally reported at https://github.com/ziglang/zig/issues/14632
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@ -1043,6 +1064,8 @@ test "modify nested packed struct aligned field" {
}
test "assigning packed struct inside another packed struct" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// Originally reported at https://github.com/ziglang/zig/issues/9674
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
@ -1077,6 +1100,7 @@ test "packed struct used as part of anon decl name" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = packed struct { a: u0 = 0 };
var a: u8 = 0;
@ -1099,6 +1123,8 @@ test "packed struct acts as a namespace" {
}
test "pointer loaded correctly from packed struct" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const RAM = struct {
data: [0xFFFF + 1]u8,
fn new() !@This() {
@ -1142,6 +1168,7 @@ test "assignment to non-byte-aligned field in packed struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Frame = packed struct {
num: u20,
@ -1164,6 +1191,7 @@ test "packed struct field pointer aligned properly" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Foo = packed struct {
a: i32,
@ -1183,6 +1211,7 @@ test "load flag from packed struct in union" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const A = packed struct {
a: bool,
@ -1251,6 +1280,7 @@ test "2-byte packed struct argument in C calling convention" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = packed struct(u16) {
x: u15 = 0,

View File

@ -8,6 +8,7 @@ test "flags in packed union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testFlagsInPackedUnion();
try comptime testFlagsInPackedUnion();
@ -50,6 +51,7 @@ test "flags in packed union at offset" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testFlagsInPackedUnionAtOffset();
try comptime testFlagsInPackedUnionAtOffset();
@ -98,6 +100,8 @@ fn testFlagsInPackedUnionAtOffset() !void {
}
test "packed union in packed struct" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// Originally reported at https://github.com/ziglang/zig/issues/16581
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
@ -137,6 +141,7 @@ test "packed union initialized with a runtime value" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Fields = packed struct {
timestamp: u50,

View File

@ -20,6 +20,7 @@ fn testDerefPtr() !void {
test "pointer arithmetic" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var ptr: [*]const u8 = "abcd";
@ -68,6 +69,7 @@ test "initialize const optional C pointer to null" {
test "assigning integer to C pointer" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: i32 = 0;
var y: i32 = 1;
@ -85,6 +87,7 @@ test "assigning integer to C pointer" {
test "C pointer comparison and arithmetic" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -154,6 +157,7 @@ test "implicit casting between C pointer and optional non-C pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var slice: []const u8 = "aoeu";
_ = &slice;
@ -170,6 +174,7 @@ test "implicit cast error unions with non-optional to optional pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -197,6 +202,7 @@ test "allowzero pointer and slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var ptr: [*]allowzero i32 = @ptrFromInt(0);
const opt_ptr: ?[*]allowzero i32 = ptr;
@ -216,6 +222,7 @@ test "assign null directly to C pointer and test null equality" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: [*c]i32 = null;
_ = &x;
@ -283,6 +290,7 @@ test "array initialization types" {
test "null terminated pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -300,6 +308,7 @@ test "null terminated pointer" {
test "allow any sentinel" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -315,6 +324,7 @@ test "allow any sentinel" {
test "pointer sentinel with enums" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const Number = enum {
@ -337,6 +347,7 @@ test "pointer sentinel with optional element" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -353,6 +364,7 @@ test "pointer sentinel with +inf" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -430,6 +442,7 @@ test "indexing array with sentinel returns correct type" {
test "element pointer to slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -452,6 +465,7 @@ test "element pointer to slice" {
test "element pointer arithmetic to slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -476,6 +490,7 @@ test "element pointer arithmetic to slice" {
test "array slicing to slice" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -506,6 +521,7 @@ test "ptrCast comptime known slice to C pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const s: [:0]const u8 = "foo";
var p: [*c]const u8 = @ptrCast(s);
@ -525,6 +541,7 @@ test "pointer alignment and element type include call expression" {
test "pointer to array has explicit alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const Base = extern struct { a: u8 };

View File

@ -8,6 +8,7 @@ test "@popCount integers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testPopCountIntegers();
try testPopCountIntegers();
@ -18,6 +19,7 @@ test "@popCount 128bit integer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
comptime {
try expect(@popCount(@as(u128, 0b11111111000110001100010000100001000011000011100101010001)) == 24);
@ -81,6 +83,7 @@ test "@popCount vectors" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testPopCountVectors();
try testPopCountVectors();

View File

@ -3,6 +3,7 @@ const std = @import("std");
test "@prefetch()" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: [2]u32 = .{ 42, 42 };
var a_len = a.len;

View File

@ -58,6 +58,7 @@ fn testReinterpretStructWrappedBytesAsInteger() !void {
test "reinterpret bytes of an array into an extern struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testReinterpretBytesAsExternStruct();
try comptime testReinterpretBytesAsExternStruct();
@ -232,6 +233,7 @@ test "implicit optional pointer to optional anyopaque pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buf: [4]u8 = "aoeu".*;
const x: ?[*]u8 = &buf;
@ -244,6 +246,7 @@ test "@ptrCast slice to slice" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo(slice: []u32) []i32 {

View File

@ -3,6 +3,8 @@ const builtin = @import("builtin");
const expectEqual = std.testing.expectEqual;
test "casting integer address to function pointer" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
addressToFunction();
comptime addressToFunction();
}
@ -17,6 +19,7 @@ test "mutate through ptr initialized with constant ptrFromInt value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
forceCompilerAnalyzeBranchHardCodedPtrDereference(false);
}
@ -34,6 +37,7 @@ test "@ptrFromInt creates null pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const ptr = @as(?*u32, @ptrFromInt(0));
try expectEqual(@as(?*u32, null), ptr);
@ -43,6 +47,7 @@ test "@ptrFromInt creates allowzero zero pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const ptr = @as(*allowzero u32, @ptrFromInt(0));
try expectEqual(@as(usize, 0), @intFromPtr(ptr));

View File

@ -4,6 +4,7 @@ const expect = @import("std").testing.expect;
test "pub enum" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try pubEnumTest(other.APubEnum.Two);
}
@ -13,6 +14,7 @@ fn pubEnumTest(foo: other.APubEnum) !void {
test "cast with imported symbol" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(@as(other.size_t, 42) == 42);
}

View File

@ -8,6 +8,7 @@ test "reference a variable in an if after an if in the 2nd switch prong" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try foo(true, Num.Two, false, "aoeu");
try expect(!ok);

View File

@ -28,6 +28,7 @@ fn dummy(a: bool, b: i32, c: f32) i32 {
test "reflection: @field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var f = Foo{
.one = 42,

View File

@ -10,6 +10,7 @@ test "return address" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
_ = retAddr();
// TODO: #14938

View File

@ -9,6 +9,7 @@ test "saturating add" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -57,6 +58,7 @@ test "saturating add 128bit" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -82,6 +84,7 @@ test "saturating subtraction" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -129,6 +132,7 @@ test "saturating subtraction 128bit" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -158,6 +162,7 @@ test "saturating multiplication" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .wasm32) {
// https://github.com/ziglang/zig/issues/9660
@ -203,6 +208,7 @@ test "saturating shift-left" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -243,6 +249,7 @@ test "saturating shl uses the LHS type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const lhs_const: u8 = 1;
var lhs_var: u8 = 1;

View File

@ -9,6 +9,7 @@ test "@select vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime selectVectors();
try selectVectors();
@ -39,6 +40,7 @@ test "@select arrays" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .avx2)) return error.SkipZigTest;

View File

@ -7,6 +7,7 @@ test "@shuffle int" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .ssse3)) return error.SkipZigTest;
@ -54,6 +55,7 @@ test "@shuffle bool 1" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and
builtin.cpu.arch == .aarch64 and builtin.os.tag == .windows)
@ -83,6 +85,7 @@ test "@shuffle bool 2" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) {
// https://github.com/ziglang/zig/issues/3246

View File

@ -81,6 +81,7 @@ const P = packed struct {
test "@offsetOf" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// Packed structs have fixed memory layout
try expect(@offsetOf(P, "a") == 0);
@ -157,6 +158,7 @@ test "@TypeOf() has no runtime side effects" {
test "branching logic inside @TypeOf" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
var data: i32 = 0;
@ -271,6 +273,7 @@ test "runtime instructions inside typeof in comptime only scope" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var y: i8 = 2;
@ -327,6 +330,7 @@ test "peer type resolution with @TypeOf doesn't trigger dependency loop check" {
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const T = struct {
next: @TypeOf(null, @as(*const @This(), undefined)),
@ -408,6 +412,7 @@ test "Extern function calls, dereferences and field access in @TypeOf" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Test = struct {
fn test_fn_1(a: c_long) @TypeOf(c_fopen("test", "r").*) {

View File

@ -30,6 +30,7 @@ comptime {
test "slicing" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var array: [20]i32 = undefined;
@ -66,6 +67,7 @@ test "comptime slice of undefined pointer of length 0" {
test "implicitly cast array of size 0 to slice" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var msg = [_]u8{};
try assertLenIsZero(&msg);
@ -122,6 +124,7 @@ test "slice of type" {
test "generic malloc free" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const a = memAlloc(u8, 10) catch unreachable;
memFree(u8, a);
@ -183,6 +186,8 @@ test "slicing zero length array" {
}
test "slicing pointer by length" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 };
const ptr: [*]const u8 = @as([*]const u8, @ptrCast(&array));
const slice = ptr[1..][0..5];
@ -231,6 +236,7 @@ test "runtime safety lets us slice from len..len" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var an_array = [_]u8{ 1, 2, 3 };
try expect(mem.eql(u8, sliceFromLenToLen(an_array[0..], 3, 3), ""));
@ -243,6 +249,7 @@ fn sliceFromLenToLen(a_slice: []u8, start: usize, end: usize) []u8 {
test "C pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buf: [*c]const u8 = "kjdhfkjdhfdkjhfkfjhdfkjdhfkdjhfdkjhf";
var len: u32 = 10;
@ -255,6 +262,7 @@ test "C pointer slice access" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buf: [10]u32 = [1]u32{42} ** 10;
const c_ptr = @as([*c]const u32, @ptrCast(&buf));
@ -285,6 +293,7 @@ fn sliceSum(comptime q: []const u8) i32 {
test "slice type with custom alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const LazilyResolvedType = struct {
anything: i32,
@ -298,6 +307,7 @@ test "slice type with custom alignment" {
test "obtaining a null terminated slice" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// here we have a normal array
var buf: [50]u8 = undefined;
@ -342,6 +352,7 @@ test "empty array to slice" {
test "@ptrCast slice to pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -357,6 +368,8 @@ test "@ptrCast slice to pointer" {
}
test "slice multi-pointer without end" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
try testPointer();
@ -394,6 +407,7 @@ test "slice syntax resulting in pointer-to-array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -612,6 +626,7 @@ test "slice syntax resulting in pointer-to-array" {
test "slice pointer-to-array null terminated" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
comptime {
var array = [5:0]u8{ 1, 2, 3, 4, 5 };
@ -630,6 +645,7 @@ test "slice pointer-to-array null terminated" {
test "slice pointer-to-array zero length" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
comptime {
{
@ -664,6 +680,7 @@ test "type coercion of pointer to anon struct literal to pointer to slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const U = union {
@ -755,6 +772,7 @@ test "slice sentinel access at comptime" {
test "slicing array with sentinel as end index" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn do() !void {
@ -773,6 +791,7 @@ test "slicing array with sentinel as end index" {
test "slicing slice with sentinel as end index" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn do() !void {
@ -831,6 +850,7 @@ test "global slice field access" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
var slice: []const u8 = undefined;
@ -842,6 +862,8 @@ test "global slice field access" {
}
test "slice of void" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var n: usize = 10;
_ = &n;
var arr: [12]void = undefined;
@ -850,6 +872,8 @@ test "slice of void" {
}
test "slice with dereferenced value" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: usize = 0;
const idx: *usize = &a;
_ = blk: {
@ -884,6 +908,7 @@ test "empty slice ptr is non null" {
test "slice decays to many pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buf: [8]u8 = "abcdefg\x00".*;
const p: [*:0]const u8 = buf[0..7 :0];
@ -894,6 +919,7 @@ test "write through pointer to optional slice arg" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn bar(foo: *?[]const u8) !void {
@ -930,6 +956,7 @@ test "slicing zero length array field of struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
a: [0]usize,
@ -945,6 +972,7 @@ test "slicing slices gives correct result" {
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const foo = "1234";
const bar = foo[0..4];
@ -959,6 +987,7 @@ test "get address of element of zero-sized slice" {
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn destroy(_: *void) void {}
@ -972,6 +1001,7 @@ test "sentinel-terminated 0-length slices" {
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const u32s: [4]u32 = [_]u32{ 0, 1, 2, 3 };

View File

@ -17,6 +17,7 @@ test "@src" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try doTheTest();
}
@ -37,6 +38,8 @@ test "@src used as a comptime parameter" {
}
test "@src in tuple passed to anytype function" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn Foo(a: anytype) u32 {
return a[0].line;

View File

@ -34,6 +34,7 @@ const ptr_type_name: [*:0]const u8 = type_name;
test "@typeName() returns a string literal" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try std.testing.expect(*const [type_name.len:0]u8 == @TypeOf(type_name));
try std.testing.expect(std.mem.eql(u8, "behavior.string_literals.TestType", type_name));
@ -47,6 +48,7 @@ const expected_contents = "hello zig\n";
test "@embedFile() returns a string literal" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try std.testing.expect(*const [expected_contents.len:0]u8 == @TypeOf(actual_contents));
try std.testing.expect(std.mem.eql(u8, expected_contents, actual_contents));
@ -61,6 +63,7 @@ fn testFnForSrc() std.builtin.SourceLocation {
test "@src() returns a struct containing 0-terminated string slices" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const src = testFnForSrc();
try std.testing.expect([:0]const u8 == @TypeOf(src.file));

Some files were not shown because too many files have changed in this diff Show More