mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
riscv: implement more operators
we can run `std.debug.print` now, with both run-time strings and integers!
This commit is contained in:
parent
9766b68c47
commit
8d30fc45c4
@ -271,6 +271,7 @@ pub fn mainSimple() anyerror!void {
|
|||||||
};
|
};
|
||||||
// is the backend capable of using std.fmt.format to print a summary at the end?
|
// is the backend capable of using std.fmt.format to print a summary at the end?
|
||||||
const print_summary = switch (builtin.zig_backend) {
|
const print_summary = switch (builtin.zig_backend) {
|
||||||
|
.stage2_riscv64 => true,
|
||||||
else => false,
|
else => false,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -282,11 +283,13 @@ pub fn mainSimple() anyerror!void {
|
|||||||
const stderr = if (comptime enable_print) std.io.getStdErr() else {};
|
const stderr = if (comptime enable_print) std.io.getStdErr() else {};
|
||||||
|
|
||||||
for (builtin.test_functions) |test_fn| {
|
for (builtin.test_functions) |test_fn| {
|
||||||
|
if (test_fn.func()) |_| {
|
||||||
if (enable_print) {
|
if (enable_print) {
|
||||||
stderr.writeAll(test_fn.name) catch {};
|
stderr.writeAll(test_fn.name) catch {};
|
||||||
stderr.writeAll("... ") catch {};
|
stderr.writeAll("... ") catch {};
|
||||||
|
stderr.writeAll("PASS\n") catch {};
|
||||||
}
|
}
|
||||||
test_fn.func() catch |err| {
|
} else |err| if (enable_print) {
|
||||||
if (enable_print) {
|
if (enable_print) {
|
||||||
stderr.writeAll(test_fn.name) catch {};
|
stderr.writeAll(test_fn.name) catch {};
|
||||||
stderr.writeAll("... ") catch {};
|
stderr.writeAll("... ") catch {};
|
||||||
@ -300,8 +303,7 @@ pub fn mainSimple() anyerror!void {
|
|||||||
if (enable_print) stderr.writeAll("SKIP\n") catch {};
|
if (enable_print) stderr.writeAll("SKIP\n") catch {};
|
||||||
skipped += 1;
|
skipped += 1;
|
||||||
continue;
|
continue;
|
||||||
};
|
}
|
||||||
if (enable_print) stderr.writeAll("PASS\n") catch {};
|
|
||||||
passed += 1;
|
passed += 1;
|
||||||
}
|
}
|
||||||
if (enable_print and print_summary) {
|
if (enable_print and print_summary) {
|
||||||
|
|||||||
@ -775,14 +775,7 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (builtin.zig_backend == .stage2_riscv64) {
|
if (builtin.zig_backend == .stage2_riscv64) {
|
||||||
asm volatile ("ecall"
|
std.debug.print("panic: {s}\n", .{msg});
|
||||||
:
|
|
||||||
: [number] "{a7}" (64),
|
|
||||||
[arg1] "{a0}" (1),
|
|
||||||
[arg2] "{a1}" (@intFromPtr(msg.ptr)),
|
|
||||||
[arg3] "{a2}" (msg.len),
|
|
||||||
: "memory"
|
|
||||||
);
|
|
||||||
std.posix.exit(127);
|
std.posix.exit(127);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -159,7 +159,7 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
|
|||||||
|
|
||||||
if (options.inherited.single_threaded) |x| break :b x;
|
if (options.inherited.single_threaded) |x| break :b x;
|
||||||
if (options.parent) |p| break :b p.single_threaded;
|
if (options.parent) |p| break :b p.single_threaded;
|
||||||
break :b target_util.defaultSingleThreaded(target);
|
break :b target_util.defaultSingleThreaded(target, zig_backend);
|
||||||
};
|
};
|
||||||
|
|
||||||
const error_tracing = b: {
|
const error_tracing = b: {
|
||||||
|
|||||||
@ -51,7 +51,6 @@ const InnerError = CodeGenError || error{OutOfRegisters};
|
|||||||
pt: Zcu.PerThread,
|
pt: Zcu.PerThread,
|
||||||
air: Air,
|
air: Air,
|
||||||
liveness: Liveness,
|
liveness: Liveness,
|
||||||
zcu: *Zcu,
|
|
||||||
bin_file: *link.File,
|
bin_file: *link.File,
|
||||||
gpa: Allocator,
|
gpa: Allocator,
|
||||||
|
|
||||||
@ -264,13 +263,13 @@ const MCValue = union(enum) {
|
|||||||
.register_pair,
|
.register_pair,
|
||||||
.memory,
|
.memory,
|
||||||
.indirect,
|
.indirect,
|
||||||
.load_frame,
|
|
||||||
.load_symbol,
|
.load_symbol,
|
||||||
.lea_symbol,
|
.lea_symbol,
|
||||||
=> switch (off) {
|
=> switch (off) {
|
||||||
0 => mcv,
|
0 => mcv,
|
||||||
else => unreachable, // not offsettable
|
else => unreachable,
|
||||||
},
|
},
|
||||||
|
.load_frame => |frame| .{ .load_frame = .{ .index = frame.index, .off = frame.off + off } },
|
||||||
.immediate => |imm| .{ .immediate = @bitCast(@as(i64, @bitCast(imm)) +% off) },
|
.immediate => |imm| .{ .immediate = @bitCast(@as(i64, @bitCast(imm)) +% off) },
|
||||||
.register => |reg| .{ .register_offset = .{ .reg = reg, .off = off } },
|
.register => |reg| .{ .register_offset = .{ .reg = reg, .off = off } },
|
||||||
.register_offset => |reg_off| .{ .register_offset = .{ .reg = reg_off.reg, .off = reg_off.off + off } },
|
.register_offset => |reg_off| .{ .register_offset = .{ .reg = reg_off.reg, .off = reg_off.off + off } },
|
||||||
@ -737,7 +736,6 @@ pub fn generate(
|
|||||||
.air = air,
|
.air = air,
|
||||||
.pt = pt,
|
.pt = pt,
|
||||||
.mod = mod,
|
.mod = mod,
|
||||||
.zcu = zcu,
|
|
||||||
.bin_file = bin_file,
|
.bin_file = bin_file,
|
||||||
.liveness = liveness,
|
.liveness = liveness,
|
||||||
.target = target,
|
.target = target,
|
||||||
@ -946,7 +944,7 @@ fn formatDecl(
|
|||||||
}
|
}
|
||||||
fn fmtDecl(func: *Func, decl_index: InternPool.DeclIndex) std.fmt.Formatter(formatDecl) {
|
fn fmtDecl(func: *Func, decl_index: InternPool.DeclIndex) std.fmt.Formatter(formatDecl) {
|
||||||
return .{ .data = .{
|
return .{ .data = .{
|
||||||
.zcu = func.zcu,
|
.zcu = func.pt.zcu,
|
||||||
.decl_index = decl_index,
|
.decl_index = decl_index,
|
||||||
} };
|
} };
|
||||||
}
|
}
|
||||||
@ -1325,6 +1323,7 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
|
|||||||
.mul,
|
.mul,
|
||||||
.mul_wrap,
|
.mul_wrap,
|
||||||
.div_trunc,
|
.div_trunc,
|
||||||
|
.rem,
|
||||||
|
|
||||||
.shl, .shl_exact,
|
.shl, .shl_exact,
|
||||||
.shr, .shr_exact,
|
.shr, .shr_exact,
|
||||||
@ -1344,7 +1343,6 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
|
|||||||
.ptr_add,
|
.ptr_add,
|
||||||
.ptr_sub => try func.airPtrArithmetic(inst, tag),
|
.ptr_sub => try func.airPtrArithmetic(inst, tag),
|
||||||
|
|
||||||
.rem,
|
|
||||||
.mod,
|
.mod,
|
||||||
.div_float,
|
.div_float,
|
||||||
.div_floor,
|
.div_floor,
|
||||||
@ -2151,11 +2149,16 @@ fn airTrunc(func: *Func, inst: Air.Inst.Index) !void {
|
|||||||
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||||
if (func.liveness.isUnused(inst))
|
if (func.liveness.isUnused(inst))
|
||||||
return func.finishAir(inst, .unreach, .{ ty_op.operand, .none, .none });
|
return func.finishAir(inst, .unreach, .{ ty_op.operand, .none, .none });
|
||||||
|
// we assume no zeroext in the "Zig ABI", so it's fine to just not truncate it.
|
||||||
const operand = try func.resolveInst(ty_op.operand);
|
const operand = try func.resolveInst(ty_op.operand);
|
||||||
_ = operand;
|
|
||||||
return func.fail("TODO implement trunc for {}", .{func.target.cpu.arch});
|
// we can do it just to be safe, but this shouldn't be needed for no-runtime safety modes
|
||||||
// return func.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
switch (operand) {
|
||||||
|
.register => |reg| try func.truncateRegister(func.typeOf(ty_op.operand), reg),
|
||||||
|
else => {},
|
||||||
|
}
|
||||||
|
|
||||||
|
return func.finishAir(inst, operand, .{ ty_op.operand, .none, .none });
|
||||||
}
|
}
|
||||||
|
|
||||||
fn airIntFromBool(func: *Func, inst: Air.Inst.Index) !void {
|
fn airIntFromBool(func: *Func, inst: Air.Inst.Index) !void {
|
||||||
@ -2305,10 +2308,7 @@ fn binOp(
|
|||||||
80, 128 => true,
|
80, 128 => true,
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
};
|
};
|
||||||
switch (air_tag) {
|
if (!type_needs_libcall) break :libcall;
|
||||||
.rem, .mod => {},
|
|
||||||
else => if (!type_needs_libcall) break :libcall,
|
|
||||||
}
|
|
||||||
return func.fail("binOp libcall runtime-float ops", .{});
|
return func.fail("binOp libcall runtime-float ops", .{});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2384,6 +2384,7 @@ fn genBinOp(
|
|||||||
.sub_wrap,
|
.sub_wrap,
|
||||||
.mul,
|
.mul,
|
||||||
.mul_wrap,
|
.mul_wrap,
|
||||||
|
.rem,
|
||||||
=> {
|
=> {
|
||||||
if (!math.isPowerOfTwo(bit_size))
|
if (!math.isPowerOfTwo(bit_size))
|
||||||
return func.fail(
|
return func.fail(
|
||||||
@ -2391,6 +2392,15 @@ fn genBinOp(
|
|||||||
.{ @tagName(tag), bit_size },
|
.{ @tagName(tag), bit_size },
|
||||||
);
|
);
|
||||||
|
|
||||||
|
switch (tag) {
|
||||||
|
.rem,
|
||||||
|
=> {
|
||||||
|
try func.truncateRegister(lhs_ty, lhs_reg);
|
||||||
|
try func.truncateRegister(rhs_ty, rhs_reg);
|
||||||
|
},
|
||||||
|
else => {},
|
||||||
|
}
|
||||||
|
|
||||||
switch (lhs_ty.zigTypeTag(zcu)) {
|
switch (lhs_ty.zigTypeTag(zcu)) {
|
||||||
.Int => {
|
.Int => {
|
||||||
const mir_tag: Mir.Inst.Tag = switch (tag) {
|
const mir_tag: Mir.Inst.Tag = switch (tag) {
|
||||||
@ -2409,6 +2419,10 @@ fn genBinOp(
|
|||||||
32 => .mulw,
|
32 => .mulw,
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
},
|
},
|
||||||
|
.rem => switch (bit_size) {
|
||||||
|
64 => if (is_unsigned) .remu else .rem,
|
||||||
|
else => if (is_unsigned) .remuw else .remu,
|
||||||
|
},
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -2423,14 +2437,6 @@ fn genBinOp(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
// truncate when the instruction is larger than the bit size.
|
|
||||||
switch (bit_size) {
|
|
||||||
8, 16 => try func.truncateRegister(lhs_ty, dst_reg),
|
|
||||||
32 => {}, // addw/subw affects the first 32-bits
|
|
||||||
64 => {}, // add/sub affects the entire register
|
|
||||||
else => unreachable,
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
.Float => {
|
.Float => {
|
||||||
const mir_tag: Mir.Inst.Tag = switch (tag) {
|
const mir_tag: Mir.Inst.Tag = switch (tag) {
|
||||||
@ -2627,23 +2633,17 @@ fn genBinOp(
|
|||||||
.shl,
|
.shl,
|
||||||
.shl_exact,
|
.shl_exact,
|
||||||
=> {
|
=> {
|
||||||
if (!math.isPowerOfTwo(bit_size))
|
if (bit_size > 64) return func.fail("TODO: genBinOp shift > 64 bits, {}", .{bit_size});
|
||||||
return func.fail(
|
|
||||||
"TODO: genBinOp {s} non-pow 2, found {}",
|
|
||||||
.{ @tagName(tag), bit_size },
|
|
||||||
);
|
|
||||||
|
|
||||||
// it's important that the shift amount is exact
|
|
||||||
try func.truncateRegister(rhs_ty, rhs_reg);
|
try func.truncateRegister(rhs_ty, rhs_reg);
|
||||||
|
|
||||||
const mir_tag: Mir.Inst.Tag = switch (tag) {
|
const mir_tag: Mir.Inst.Tag = switch (tag) {
|
||||||
.shl, .shl_exact => switch (bit_size) {
|
.shl, .shl_exact => switch (bit_size) {
|
||||||
8, 16, 64 => .sll,
|
1...31, 33...64 => .sll,
|
||||||
32 => .sllw,
|
32 => .sllw,
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
},
|
},
|
||||||
.shr, .shr_exact => switch (bit_size) {
|
.shr, .shr_exact => switch (bit_size) {
|
||||||
8, 16, 64 => .srl,
|
1...31, 33...64 => .srl,
|
||||||
32 => .srlw,
|
32 => .srlw,
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
},
|
},
|
||||||
@ -2659,13 +2659,6 @@ fn genBinOp(
|
|||||||
.rs2 = rhs_reg,
|
.rs2 = rhs_reg,
|
||||||
} },
|
} },
|
||||||
});
|
});
|
||||||
|
|
||||||
switch (bit_size) {
|
|
||||||
8, 16 => try func.truncateRegister(lhs_ty, dst_reg),
|
|
||||||
32 => {},
|
|
||||||
64 => {},
|
|
||||||
else => unreachable,
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
|
|
||||||
// TODO: move the isel logic out of lower and into here.
|
// TODO: move the isel logic out of lower and into here.
|
||||||
@ -2810,10 +2803,6 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
|
|||||||
if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) {
|
if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) {
|
||||||
const add_result = try func.binOp(null, .add, extra.lhs, extra.rhs);
|
const add_result = try func.binOp(null, .add, extra.lhs, extra.rhs);
|
||||||
|
|
||||||
const add_result_reg = try func.copyToTmpRegister(ty, add_result);
|
|
||||||
const add_result_reg_lock = func.register_manager.lockRegAssumeUnused(add_result_reg);
|
|
||||||
defer func.register_manager.unlockReg(add_result_reg_lock);
|
|
||||||
|
|
||||||
try func.genSetMem(
|
try func.genSetMem(
|
||||||
.{ .frame = offset.index },
|
.{ .frame = offset.index },
|
||||||
offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))),
|
offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))),
|
||||||
@ -2821,14 +2810,21 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
|
|||||||
add_result,
|
add_result,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const trunc_reg = try func.copyToTmpRegister(ty, add_result);
|
||||||
|
const trunc_reg_lock = func.register_manager.lockRegAssumeUnused(trunc_reg);
|
||||||
|
defer func.register_manager.unlockReg(trunc_reg_lock);
|
||||||
|
|
||||||
const overflow_reg, const overflow_lock = try func.allocReg(.int);
|
const overflow_reg, const overflow_lock = try func.allocReg(.int);
|
||||||
defer func.register_manager.unlockReg(overflow_lock);
|
defer func.register_manager.unlockReg(overflow_lock);
|
||||||
|
|
||||||
|
// if the result isn't equal after truncating it to the given type,
|
||||||
|
// an overflow must have happened.
|
||||||
|
try func.truncateRegister(func.typeOf(extra.lhs), trunc_reg);
|
||||||
try func.genBinOp(
|
try func.genBinOp(
|
||||||
.cmp_neq,
|
.cmp_neq,
|
||||||
.{ .register = add_result_reg },
|
add_result,
|
||||||
ty,
|
ty,
|
||||||
.{ .register = add_result_reg },
|
.{ .register = trunc_reg },
|
||||||
ty,
|
ty,
|
||||||
overflow_reg,
|
overflow_reg,
|
||||||
);
|
);
|
||||||
@ -3022,38 +3018,24 @@ fn airMulWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
|
|||||||
switch (lhs_ty.zigTypeTag(zcu)) {
|
switch (lhs_ty.zigTypeTag(zcu)) {
|
||||||
else => |x| return func.fail("TODO: airMulWithOverflow {s}", .{@tagName(x)}),
|
else => |x| return func.fail("TODO: airMulWithOverflow {s}", .{@tagName(x)}),
|
||||||
.Int => {
|
.Int => {
|
||||||
assert(lhs_ty.eql(rhs_ty, zcu));
|
if (std.debug.runtime_safety) assert(lhs_ty.eql(rhs_ty, zcu));
|
||||||
const int_info = lhs_ty.intInfo(zcu);
|
|
||||||
switch (int_info.bits) {
|
|
||||||
1...32 => {
|
|
||||||
if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) {
|
|
||||||
if (int_info.signedness == .unsigned) {
|
|
||||||
switch (int_info.bits) {
|
|
||||||
1...8 => {
|
|
||||||
const max_val = std.math.pow(u16, 2, int_info.bits) - 1;
|
|
||||||
|
|
||||||
const add_reg, const add_lock = try func.promoteReg(lhs_ty, lhs);
|
const trunc_reg = try func.copyToTmpRegister(lhs_ty, .{ .register = dest_reg });
|
||||||
defer if (add_lock) |lock| func.register_manager.unlockReg(lock);
|
const trunc_reg_lock = func.register_manager.lockRegAssumeUnused(trunc_reg);
|
||||||
|
defer func.register_manager.unlockReg(trunc_reg_lock);
|
||||||
|
|
||||||
const overflow_reg, const overflow_lock = try func.allocReg(.int);
|
const overflow_reg, const overflow_lock = try func.allocReg(.int);
|
||||||
defer func.register_manager.unlockReg(overflow_lock);
|
defer func.register_manager.unlockReg(overflow_lock);
|
||||||
|
|
||||||
_ = try func.addInst(.{
|
// if the result isn't equal after truncating it to the given type,
|
||||||
.tag = .andi,
|
// an overflow must have happened.
|
||||||
.ops = .rri,
|
try func.truncateRegister(func.typeOf(extra.lhs), trunc_reg);
|
||||||
.data = .{ .i_type = .{
|
|
||||||
.rd = overflow_reg,
|
|
||||||
.rs1 = add_reg,
|
|
||||||
.imm12 = Immediate.s(max_val),
|
|
||||||
} },
|
|
||||||
});
|
|
||||||
|
|
||||||
try func.genBinOp(
|
try func.genBinOp(
|
||||||
.cmp_neq,
|
.cmp_neq,
|
||||||
.{ .register = overflow_reg },
|
.{ .register = dest_reg },
|
||||||
lhs_ty,
|
|
||||||
.{ .register = add_reg },
|
|
||||||
lhs_ty,
|
lhs_ty,
|
||||||
|
.{ .register = trunc_reg },
|
||||||
|
rhs_ty,
|
||||||
overflow_reg,
|
overflow_reg,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -3065,19 +3047,6 @@ fn airMulWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
|
|||||||
|
|
||||||
break :result result_mcv;
|
break :result result_mcv;
|
||||||
},
|
},
|
||||||
|
|
||||||
else => return func.fail("TODO: airMulWithOverflow check for size {d}", .{int_info.bits}),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return func.fail("TODO: airMulWithOverflow calculate carry for signed addition", .{});
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return func.fail("TODO: airMulWithOverflow with < 8 bits or non-pow of 2", .{});
|
|
||||||
}
|
|
||||||
},
|
|
||||||
else => return func.fail("TODO: airMulWithOverflow larger than 32-bit mul", .{}),
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -3317,7 +3286,17 @@ fn airWrapOptional(func: *Func, inst: Air.Inst.Index) !void {
|
|||||||
Type.u8,
|
Type.u8,
|
||||||
.{ .immediate = 1 },
|
.{ .immediate = 1 },
|
||||||
),
|
),
|
||||||
.register => return func.fail("TODO: airWrapOption opt_mcv register", .{}),
|
|
||||||
|
.register => |opt_reg| {
|
||||||
|
try func.genBinOp(
|
||||||
|
.shl,
|
||||||
|
.{ .immediate = 1 },
|
||||||
|
Type.u64,
|
||||||
|
.{ .immediate = 32 },
|
||||||
|
Type.u64,
|
||||||
|
opt_reg,
|
||||||
|
);
|
||||||
|
},
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -4059,7 +4038,7 @@ fn airLoad(func: *Func, inst: Air.Inst.Index) !void {
|
|||||||
const elem_size = elem_ty.abiSize(pt);
|
const elem_size = elem_ty.abiSize(pt);
|
||||||
|
|
||||||
const dst_mcv: MCValue = blk: {
|
const dst_mcv: MCValue = blk: {
|
||||||
// Pointer is 8 bytes, and if the element is more than that, we cannot reuse it.
|
// "ptr" is 8 bytes, and if the element is more than that, we cannot reuse it.
|
||||||
if (elem_size <= 8 and func.reuseOperand(inst, ty_op.operand, 0, ptr)) {
|
if (elem_size <= 8 and func.reuseOperand(inst, ty_op.operand, 0, ptr)) {
|
||||||
// The MCValue that holds the pointer can be re-used as the value.
|
// The MCValue that holds the pointer can be re-used as the value.
|
||||||
break :blk ptr;
|
break :blk ptr;
|
||||||
@ -4970,7 +4949,7 @@ fn isNull(func: *Func, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
|
|||||||
.lea_symbol,
|
.lea_symbol,
|
||||||
.reserved_frame,
|
.reserved_frame,
|
||||||
.air_ref,
|
.air_ref,
|
||||||
=> return func.fail("TODO: hmm {}", .{opt_mcv}),
|
=> unreachable,
|
||||||
|
|
||||||
.register => |opt_reg| {
|
.register => |opt_reg| {
|
||||||
if (some_info.off == 0) {
|
if (some_info.off == 0) {
|
||||||
@ -4993,9 +4972,27 @@ fn isNull(func: *Func, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
|
|||||||
return return_mcv;
|
return return_mcv;
|
||||||
}
|
}
|
||||||
assert(some_info.ty.ip_index == .bool_type);
|
assert(some_info.ty.ip_index == .bool_type);
|
||||||
const opt_abi_size: u32 = @intCast(opt_ty.abiSize(pt));
|
const bit_offset: u7 = @intCast(some_info.off * 8);
|
||||||
_ = opt_abi_size;
|
|
||||||
return func.fail("TODO: isNull some_info.off != 0 register", .{});
|
try func.genBinOp(
|
||||||
|
.shr,
|
||||||
|
.{ .register = opt_reg },
|
||||||
|
Type.u64,
|
||||||
|
.{ .immediate = bit_offset },
|
||||||
|
Type.u8,
|
||||||
|
return_reg,
|
||||||
|
);
|
||||||
|
try func.truncateRegister(Type.u8, return_reg);
|
||||||
|
try func.genBinOp(
|
||||||
|
.cmp_eq,
|
||||||
|
.{ .register = return_reg },
|
||||||
|
Type.u64,
|
||||||
|
.{ .immediate = 0 },
|
||||||
|
Type.u8,
|
||||||
|
return_reg,
|
||||||
|
);
|
||||||
|
|
||||||
|
return return_mcv;
|
||||||
},
|
},
|
||||||
|
|
||||||
.load_frame => {
|
.load_frame => {
|
||||||
@ -6556,7 +6553,8 @@ fn airAtomicRmw(func: *Func, inst: Air.Inst.Index) !void {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn airAtomicLoad(func: *Func, inst: Air.Inst.Index) !void {
|
fn airAtomicLoad(func: *Func, inst: Air.Inst.Index) !void {
|
||||||
const zcu = func.pt.zcu;
|
const pt = func.pt;
|
||||||
|
const zcu = pt.zcu;
|
||||||
const atomic_load = func.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
|
const atomic_load = func.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
|
||||||
const order: std.builtin.AtomicOrder = atomic_load.order;
|
const order: std.builtin.AtomicOrder = atomic_load.order;
|
||||||
|
|
||||||
@ -6564,6 +6562,9 @@ fn airAtomicLoad(func: *Func, inst: Air.Inst.Index) !void {
|
|||||||
const elem_ty = ptr_ty.childType(zcu);
|
const elem_ty = ptr_ty.childType(zcu);
|
||||||
const ptr_mcv = try func.resolveInst(atomic_load.ptr);
|
const ptr_mcv = try func.resolveInst(atomic_load.ptr);
|
||||||
|
|
||||||
|
const bit_size = elem_ty.bitSize(pt);
|
||||||
|
if (bit_size > 64) return func.fail("TODO: airAtomicStore > 64 bits", .{});
|
||||||
|
|
||||||
const result_mcv = try func.allocRegOrMem(elem_ty, inst, true);
|
const result_mcv = try func.allocRegOrMem(elem_ty, inst, true);
|
||||||
assert(result_mcv == .register); // should be less than 8 bytes
|
assert(result_mcv == .register); // should be less than 8 bytes
|
||||||
|
|
||||||
@ -6616,6 +6617,9 @@ fn airAtomicStore(func: *Func, inst: Air.Inst.Index, order: std.builtin.AtomicOr
|
|||||||
const val_ty = func.typeOf(bin_op.rhs);
|
const val_ty = func.typeOf(bin_op.rhs);
|
||||||
const val_mcv = try func.resolveInst(bin_op.rhs);
|
const val_mcv = try func.resolveInst(bin_op.rhs);
|
||||||
|
|
||||||
|
const bit_size = val_ty.bitSize(func.pt);
|
||||||
|
if (bit_size > 64) return func.fail("TODO: airAtomicStore > 64 bits", .{});
|
||||||
|
|
||||||
switch (order) {
|
switch (order) {
|
||||||
.unordered, .monotonic => {},
|
.unordered, .monotonic => {},
|
||||||
.release, .seq_cst => {
|
.release, .seq_cst => {
|
||||||
|
|||||||
@ -60,9 +60,10 @@ pub fn alwaysSingleThreaded(target: std.Target) bool {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn defaultSingleThreaded(target: std.Target) bool {
|
pub fn defaultSingleThreaded(target: std.Target, backend: std.builtin.CompilerBackend) bool {
|
||||||
switch (target.cpu.arch) {
|
switch (target.cpu.arch) {
|
||||||
.wasm32, .wasm64 => return true,
|
.wasm32, .wasm64 => return true,
|
||||||
|
.riscv64 => if (backend == .stage2_riscv64) return true,
|
||||||
else => {},
|
else => {},
|
||||||
}
|
}
|
||||||
switch (target.os.tag) {
|
switch (target.os.tag) {
|
||||||
|
|||||||
@ -16,7 +16,6 @@ test "global variable alignment" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "large alignment of local constant" {
|
test "large alignment of local constant" {
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
|
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky
|
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky
|
||||||
|
|||||||
@ -16,8 +16,6 @@ test "empty function with comments" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "truncate" {
|
test "truncate" {
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
try expect(testTruncate(0x10fd) == 0xfd);
|
try expect(testTruncate(0x10fd) == 0xfd);
|
||||||
comptime assert(testTruncate(0x10fd) == 0xfd);
|
comptime assert(testTruncate(0x10fd) == 0xfd);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -441,7 +441,6 @@ test "non-anytype generic parameters provide result type" {
|
|||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
const S = struct {
|
const S = struct {
|
||||||
fn f(comptime T: type, y: T) !void {
|
fn f(comptime T: type, y: T) !void {
|
||||||
|
|||||||
@ -1845,7 +1845,6 @@ test "peer type resolution: three-way resolution combines error set and optional
|
|||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
const E = error{Foo};
|
const E = error{Foo};
|
||||||
var a: E = error.Foo;
|
var a: E = error.Foo;
|
||||||
@ -1960,7 +1959,6 @@ test "peer type resolution: vector and tuple" {
|
|||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
var vec: @Vector(3, i32) = .{ 1, 2, 3 };
|
var vec: @Vector(3, i32) = .{ 1, 2, 3 };
|
||||||
_ = &vec;
|
_ = &vec;
|
||||||
|
|||||||
@ -23,8 +23,6 @@ test "simple destructure" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "destructure with comptime syntax" {
|
test "destructure with comptime syntax" {
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
const S = struct {
|
const S = struct {
|
||||||
fn doTheTest() !void {
|
fn doTheTest() !void {
|
||||||
{
|
{
|
||||||
|
|||||||
@ -1076,7 +1076,6 @@ test "enum literal casting to optional" {
|
|||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
var bar: ?Bar = undefined;
|
var bar: ?Bar = undefined;
|
||||||
bar = .B;
|
bar = .B;
|
||||||
|
|||||||
@ -181,7 +181,6 @@ test "function with complex callconv and return type expressions" {
|
|||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
try expect(fComplexCallconvRet(3).x == 9);
|
try expect(fComplexCallconvRet(3).x == 9);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -112,7 +112,6 @@ test "for with null and T peer types and inferred result location type" {
|
|||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
const S = struct {
|
const S = struct {
|
||||||
fn doTheTest(slice: []const u8) !void {
|
fn doTheTest(slice: []const u8) !void {
|
||||||
@ -228,7 +227,6 @@ test "else continue outer for" {
|
|||||||
|
|
||||||
test "for loop with else branch" {
|
test "for loop with else branch" {
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
{
|
{
|
||||||
var x = [_]u32{ 1, 2 };
|
var x = [_]u32{ 1, 2 };
|
||||||
|
|||||||
@ -82,7 +82,6 @@ test "const result loc, runtime if cond, else unreachable" {
|
|||||||
test "if copies its payload" {
|
test "if copies its payload" {
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
const S = struct {
|
const S = struct {
|
||||||
fn doTheTest() !void {
|
fn doTheTest() !void {
|
||||||
@ -147,8 +146,6 @@ test "if-else expression with runtime condition result location is inferred opti
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "result location with inferred type ends up being pointer to comptime_int" {
|
test "result location with inferred type ends up being pointer to comptime_int" {
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
var a: ?u32 = 1234;
|
var a: ?u32 = 1234;
|
||||||
var b: u32 = 2000;
|
var b: u32 = 2000;
|
||||||
_ = .{ &a, &b };
|
_ = .{ &a, &b };
|
||||||
|
|||||||
@ -689,6 +689,8 @@ fn testSignedWrappingEval(x: i32) !void {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "signed negation wrapping" {
|
test "signed negation wrapping" {
|
||||||
|
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||||
|
|
||||||
try testSignedNegationWrappingEval(minInt(i16));
|
try testSignedNegationWrappingEval(minInt(i16));
|
||||||
try comptime testSignedNegationWrappingEval(minInt(i16));
|
try comptime testSignedNegationWrappingEval(minInt(i16));
|
||||||
}
|
}
|
||||||
@ -699,6 +701,8 @@ fn testSignedNegationWrappingEval(x: i16) !void {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "unsigned negation wrapping" {
|
test "unsigned negation wrapping" {
|
||||||
|
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||||
|
|
||||||
try testUnsignedNegationWrappingEval(1);
|
try testUnsignedNegationWrappingEval(1);
|
||||||
try comptime testUnsignedNegationWrappingEval(1);
|
try comptime testUnsignedNegationWrappingEval(1);
|
||||||
}
|
}
|
||||||
@ -725,7 +729,6 @@ fn negateWrap(comptime T: type, x: T) T {
|
|||||||
test "unsigned 64-bit division" {
|
test "unsigned 64-bit division" {
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) {
|
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) {
|
||||||
// https://github.com/ziglang/zig/issues/16846
|
// https://github.com/ziglang/zig/issues/16846
|
||||||
@ -838,7 +841,6 @@ test "@addWithOverflow" {
|
|||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
try testAddWithOverflow(u8, 250, 100, 94, 1);
|
try testAddWithOverflow(u8, 250, 100, 94, 1);
|
||||||
try testAddWithOverflow(u8, 100, 150, 250, 0);
|
try testAddWithOverflow(u8, 100, 150, 250, 0);
|
||||||
@ -927,7 +929,6 @@ fn testMulWithOverflow(comptime T: type, a: T, b: T, mul: T, bit: u1) !void {
|
|||||||
test "basic @mulWithOverflow" {
|
test "basic @mulWithOverflow" {
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
try testMulWithOverflow(u8, 86, 3, 2, 1);
|
try testMulWithOverflow(u8, 86, 3, 2, 1);
|
||||||
try testMulWithOverflow(u8, 85, 3, 255, 0);
|
try testMulWithOverflow(u8, 85, 3, 255, 0);
|
||||||
@ -1330,6 +1331,8 @@ test "quad hex float literal parsing accurate" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "truncating shift left" {
|
test "truncating shift left" {
|
||||||
|
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||||
|
|
||||||
try testShlTrunc(maxInt(u16));
|
try testShlTrunc(maxInt(u16));
|
||||||
try comptime testShlTrunc(maxInt(u16));
|
try comptime testShlTrunc(maxInt(u16));
|
||||||
}
|
}
|
||||||
|
|||||||
@ -188,7 +188,6 @@ test "unwrap optional which is field of global var" {
|
|||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
struct_with_optional.field = null;
|
struct_with_optional.field = null;
|
||||||
if (struct_with_optional.field) |payload| {
|
if (struct_with_optional.field) |payload| {
|
||||||
|
|||||||
@ -134,7 +134,6 @@ test "nested optional field in struct" {
|
|||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
const S2 = struct {
|
const S2 = struct {
|
||||||
y: u8,
|
y: u8,
|
||||||
@ -287,7 +286,6 @@ test "nested orelse" {
|
|||||||
test "self-referential struct through a slice of optional" {
|
test "self-referential struct through a slice of optional" {
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
const S = struct {
|
const S = struct {
|
||||||
const Node = struct {
|
const Node = struct {
|
||||||
@ -566,7 +564,6 @@ test "Optional slice passed to function" {
|
|||||||
test "peer type resolution in nested if expressions" {
|
test "peer type resolution in nested if expressions" {
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
const Thing = struct { n: i32 };
|
const Thing = struct { n: i32 };
|
||||||
var a = false;
|
var a = false;
|
||||||
|
|||||||
@ -1096,7 +1096,6 @@ test "packed struct used as part of anon decl name" {
|
|||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
const S = packed struct { a: u0 = 0 };
|
const S = packed struct { a: u0 = 0 };
|
||||||
var a: u8 = 0;
|
var a: u8 = 0;
|
||||||
|
|||||||
@ -1573,7 +1573,6 @@ test "no dependency loop on optional field wrapped in generic function" {
|
|||||||
test "optional field init with tuple" {
|
test "optional field init with tuple" {
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
const S = struct {
|
const S = struct {
|
||||||
a: ?struct { b: u32 },
|
a: ?struct { b: u32 },
|
||||||
|
|||||||
@ -516,7 +516,6 @@ test "switch with null and T peer types and inferred result location type" {
|
|||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
const S = struct {
|
const S = struct {
|
||||||
fn doTheTest(c: u8) !void {
|
fn doTheTest(c: u8) !void {
|
||||||
|
|||||||
@ -6,7 +6,6 @@ test "thread local variable" {
|
|||||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) {
|
if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) {
|
||||||
.x86_64, .x86 => {},
|
.x86_64, .x86 => {},
|
||||||
else => return error.SkipZigTest,
|
else => return error.SkipZigTest,
|
||||||
@ -47,7 +46,6 @@ test "reference a global threadlocal variable" {
|
|||||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) {
|
if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) {
|
||||||
.x86_64, .x86 => {},
|
.x86_64, .x86 => {},
|
||||||
else => return error.SkipZigTest,
|
else => return error.SkipZigTest,
|
||||||
|
|||||||
@ -106,7 +106,6 @@ fn testBreakOuter() void {
|
|||||||
test "while copies its payload" {
|
test "while copies its payload" {
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
|
||||||
|
|
||||||
const S = struct {
|
const S = struct {
|
||||||
fn doTheTest() !void {
|
fn doTheTest() !void {
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user